Merge branch 'master' into light-local-tx
This commit is contained in:
commit
4a07010539
27
CHANGELOG.md
27
CHANGELOG.md
@ -1,3 +1,30 @@
|
||||
## Parity [v1.8.1](https://github.com/paritytech/parity/releases/tag/v1.8.1) (2017-10-20)
|
||||
|
||||
Parity 1.8.1 fixes several bugs with token balances, tweaks snapshot-sync, improves the performance of nodes with huge amounts of accounts and changes the Trezor account derivation path.
|
||||
|
||||
**Important Note**: The **Trezor** account derivation path was changed in this release ([#6815](https://github.com/paritytech/parity/pull/6815)) to always use the first account (`m/44'/60'/0'/0/0` instead of `m/44'/60'/0'/0`). This way we enable compatibility with other Ethereum wallets supporting Trezor hardware-wallets. However, **action is required** before upgrading, if you have funds on your Parity Trezor wallet. If you already upgraded to 1.8.1, please downgrade to 1.8.0 first to recover the funds with the following steps:
|
||||
|
||||
1. Make sure you have 1.8.0-beta and your Trezor plugged in.
|
||||
2. Create a new standard Parity account. Make sure you have backups of the recovery phrase and don't forget the password.
|
||||
3. Move your funds from the Trezor hardware-wallet account to the freshly generated Parity account.
|
||||
4. Upgrade to 1.8.1-beta and plug in your Trezor.
|
||||
5. Move your funds from your Parity account to the new Trezor account.
|
||||
6. Keep using Parity as normal.
|
||||
|
||||
If you don't want to downgrade or move your funds off your Trezor-device, you can also use the official Trezor application or other wallets allowing to select the derivation path to access the funds.
|
||||
|
||||
Full list of included changes:
|
||||
|
||||
- Add ECIP1017 to Morden config ([#6845](https://github.com/paritytech/parity/pull/6845))
|
||||
- Ethstore optimizations ([#6844](https://github.com/paritytech/parity/pull/6844))
|
||||
- Bumb to v1.8.1 ([#6843](https://github.com/paritytech/parity/pull/6843))
|
||||
- Backport ([#6837](https://github.com/paritytech/parity/pull/6837))
|
||||
- Tweaked snapshot sync threshold ([#6829](https://github.com/paritytech/parity/pull/6829))
|
||||
- Change keypath derivation logic ([#6815](https://github.com/paritytech/parity/pull/6815))
|
||||
- Refresh cached tokens based on registry info & random balances ([#6824](https://github.com/paritytech/parity/pull/6824))
|
||||
- Refresh cached tokens based on registry info & random balances ([#6818](https://github.com/paritytech/parity/pull/6818))
|
||||
- Don't display errored token images
|
||||
|
||||
## Parity [v1.8.0](https://github.com/paritytech/parity/releases/tag/v1.8.0) (2017-10-15)
|
||||
|
||||
We are happy to announce our newest Parity 1.8 release. Among others, it enables the following features:
|
||||
|
53
Cargo.lock
generated
53
Cargo.lock
generated
@ -7,7 +7,7 @@ dependencies = [
|
||||
"ethcore-logger 1.9.0",
|
||||
"ethcore-util 1.9.0",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-wasm 0.14.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-wasm 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"vm 0.1.0",
|
||||
"wasm-utils 0.1.0 (git+https://github.com/paritytech/wasm-utils)",
|
||||
]
|
||||
@ -77,20 +77,6 @@ dependencies = [
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"backtrace-sys 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.3"
|
||||
@ -214,11 +200,6 @@ dependencies = [
|
||||
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.1.0"
|
||||
@ -453,14 +434,6 @@ dependencies = [
|
||||
"regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "error-chain"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"backtrace 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "error-chain"
|
||||
version = "0.11.0"
|
||||
@ -940,6 +913,7 @@ dependencies = [
|
||||
"ethjson 0.1.0",
|
||||
"evm 0.1.0",
|
||||
"panic_hook 0.1.0",
|
||||
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1750,7 +1724,7 @@ dependencies = [
|
||||
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ntp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ntp 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-reactor 0.1.0",
|
||||
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1773,13 +1747,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "ntp"
|
||||
version = "0.2.0"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"error-chain 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -2266,7 +2240,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "parity-ui-precompiled"
|
||||
version = "1.9.0"
|
||||
source = "git+https://github.com/paritytech/js-precompiled.git#f068e601cc43df21f264445339e3682977a49e23"
|
||||
source = "git+https://github.com/paritytech/js-precompiled.git#1626d64235241e75c531eece004a4923d9d4fcc6"
|
||||
dependencies = [
|
||||
"parity-dapps-glue 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -2293,7 +2267,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "parity-wasm"
|
||||
version = "0.14.5"
|
||||
version = "0.15.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -3451,7 +3425,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
[[package]]
|
||||
name = "wasm-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/paritytech/wasm-utils#6a39db802eb6b67a0c4e5cf50741f965e217335a"
|
||||
source = "git+https://github.com/paritytech/wasm-utils#3d59f7ca0661317bc66894a26b2a5a319fa5d229"
|
||||
dependencies = [
|
||||
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clap 2.26.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -3459,7 +3433,7 @@ dependencies = [
|
||||
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-wasm 0.14.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-wasm 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -3538,7 +3512,6 @@ dependencies = [
|
||||
"checksum arrayvec 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)" = "699e63a93b79d717e8c3b5eb1b28b7780d0d6d9e59a72eb769291c83b0c8dc67"
|
||||
"checksum aster 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfdf7355d9db158df68f976ed030ab0f6578af811f5a7bb6dcf221ec24e0e0"
|
||||
"checksum atty 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d912da0db7fa85514874458ca3651fe2cddace8d0b0505571dbdcd41ab490159"
|
||||
"checksum backtrace 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "346d7644f0b5f9bc73082d3b2236b69a05fd35cce0cfa3724e184e6a5c9e2a2f"
|
||||
"checksum backtrace 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "99f2ce94e22b8e664d95c57fff45b98a966c2252b60691d0b7aeeccd88d70983"
|
||||
"checksum backtrace-sys 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "c63ea141ef8fdb10409d0f5daf30ac51f84ef43bff66f16627773d2a292cd189"
|
||||
"checksum base-x 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f59103b47307f76e03bef1633aec7fa9e29bfb5aa6daf5a334f94233c71f6c1"
|
||||
@ -3554,7 +3527,6 @@ dependencies = [
|
||||
"checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"
|
||||
"checksum bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f421095d2a76fc24cd3fb3f912b90df06be7689912b1bdb423caefae59c258d"
|
||||
"checksum bn 0.4.4 (git+https://github.com/paritytech/bn)" = "<none>"
|
||||
"checksum byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855"
|
||||
"checksum byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff81738b726f5d099632ceaffe7fb65b90212e8dce59d518729e7e8634032d3d"
|
||||
"checksum bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d828f97b58cc5de3e40c421d0cf2132d6b2da4ee0e11b8632fa838f0f9333ad6"
|
||||
"checksum cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7db2f146208d7e0fbee761b09cd65a7f51ccc38705d4e7262dad4d73b12a76b1"
|
||||
@ -3582,7 +3554,6 @@ dependencies = [
|
||||
"checksum elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "258ff6a9a94f648d0379dbd79110e057edbb53eb85cc237e33eadf8e5a30df85"
|
||||
"checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b"
|
||||
"checksum error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3"
|
||||
"checksum error-chain 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bd5c82c815138e278b8dcdeffc49f27ea6ffb528403e9dea4194f2e3dd40b143"
|
||||
"checksum eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)" = "<none>"
|
||||
"checksum ethabi 4.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c819a3adef0413a2519cbd9a19a35dd1c20c7a0110705beaba8aa4aa87eda95f"
|
||||
"checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa"
|
||||
@ -3655,7 +3626,7 @@ dependencies = [
|
||||
"checksum net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09"
|
||||
"checksum nodrop 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "52cd74cd09beba596430cc6e3091b74007169a56246e1262f0ba451ea95117b2"
|
||||
"checksum nom 1.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b8c256fd9471521bcb84c3cdba98921497f1a331cbc15b8030fc63b82050ce"
|
||||
"checksum ntp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d23f30ae7da76e2c6c2f5de53f298aa9a3911d3955ab2c349eb944caedceb088"
|
||||
"checksum ntp 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "143149743832c6543b60a8ef2a26cd9122dfecec2b767158e852a7beecf6d7a0"
|
||||
"checksum num 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "a311b77ebdc5dd4cf6449d81e4135d9f0e3b153839ac90e648a8ef538f923525"
|
||||
"checksum num-bigint 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "8fd0f8dbb4c0960998958a796281d88c16fbe68d87b1baa6f31e2979e81fd0bd"
|
||||
"checksum num-complex 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "503e668405c5492d67cf662a81e05be40efe2e6bcf10f7794a07bd9865e704e6"
|
||||
@ -3676,7 +3647,7 @@ dependencies = [
|
||||
"checksum parity-tokio-ipc 0.1.5 (git+https://github.com/nikvolf/parity-tokio-ipc)" = "<none>"
|
||||
"checksum parity-ui-old-precompiled 1.8.0 (git+https://github.com/paritytech/js-precompiled.git?branch=v1)" = "<none>"
|
||||
"checksum parity-ui-precompiled 1.9.0 (git+https://github.com/paritytech/js-precompiled.git)" = "<none>"
|
||||
"checksum parity-wasm 0.14.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d4502e18417d96bd8e72fca9ea4cc18f4d80288ff565582d10aefe86f18b4fc3"
|
||||
"checksum parity-wasm 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "95f6243c2d6fadf903b5edfd0011817efc20522ce5f360abf4648c24ea87581a"
|
||||
"checksum parity-wordlist 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "81451bfab101d186f8fc4a0aa13cb5539b31b02c4ed96425a0842e2a413daba6"
|
||||
"checksum parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "149d8f5b97f3c1133e3cfcd8886449959e856b557ff281e292b733d7c69e005e"
|
||||
"checksum parking_lot_core 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4f610cb9664da38e417ea3225f23051f589851999535290e077939838ab7a595"
|
||||
|
33
README.md
33
README.md
@ -4,7 +4,7 @@
|
||||
[![Snap Status](https://build.snapcraft.io/badge/paritytech/parity.svg)](https://build.snapcraft.io/user/paritytech/parity)
|
||||
[![GPLv3](https://img.shields.io/badge/license-GPL%20v3-green.svg)](https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||
|
||||
- [Download the latest release here.](https://github.com/paritytech/parity/releases)
|
||||
- [Download the latest release here.](https://github.com/paritytech/parity/releases/latest)
|
||||
|
||||
### Join the chat!
|
||||
|
||||
@ -29,19 +29,19 @@ Parity comes with a built-in wallet. To access [Parity Wallet](http://web3.site/
|
||||
- create and register your own tokens;
|
||||
- and much more.
|
||||
|
||||
By default, Parity will also run a JSONRPC server on `127.0.0.1:8545`. This is fully configurable and supports a number of RPC APIs.
|
||||
By default, Parity will also run a JSONRPC server on `127.0.0.1:8545` and a websockets server on `127.0.0.1:8546`. This is fully configurable and supports a number of APIs.
|
||||
|
||||
If you run into an issue while using parity, feel free to file one in this repository or hop on our [gitter chat room](https://gitter.im/paritytech/parity) to ask a question. We are glad to help!
|
||||
If you run into an issue while using parity, feel free to file one in this repository or hop on our [Gitter](https://gitter.im/paritytech/parity) or [Riot](https://riot.im/app/#/room/#parity-watercooler:matrix.org) chat room to ask a question. We are glad to help!
|
||||
|
||||
**For security-critical issues**, please refer to the security policy outlined in `SECURITY.MD`.
|
||||
**For security-critical issues**, please refer to the security policy outlined in [SECURITY.MD](SECURITY.md).
|
||||
|
||||
Parity's current release is 1.7. You can download it at https://github.com/paritytech/parity/releases or follow the instructions below to build from source.
|
||||
Parity's current release is 1.8. You can download it at https://github.com/paritytech/parity/releases or follow the instructions below to build from source.
|
||||
|
||||
----
|
||||
|
||||
## Build dependencies
|
||||
|
||||
**Parity requires Rust version 1.19.0 to build**
|
||||
**Parity requires Rust version 1.21.0 to build**
|
||||
|
||||
We recommend installing Rust through [rustup](https://www.rustup.rs/). If you don't already have rustup, you can install it like this:
|
||||
|
||||
@ -51,17 +51,18 @@ We recommend installing Rust through [rustup](https://www.rustup.rs/). If you do
|
||||
```
|
||||
|
||||
Parity also requires `gcc`, `g++`, `libssl-dev`/`openssl`, `libudev-dev` and `pkg-config` packages to be installed.
|
||||
|
||||
- OSX:
|
||||
```bash
|
||||
$ curl https://sh.rustup.rs -sSf | sh
|
||||
```
|
||||
|
||||
`clang` is required. It comes with Xcode command line tools or can be installed with homebrew.
|
||||
- Windows
|
||||
|
||||
- Windows
|
||||
Make sure you have Visual Studio 2015 with C++ support installed. Next, download and run the rustup installer from
|
||||
https://static.rust-lang.org/rustup/dist/x86_64-pc-windows-msvc/rustup-init.exe, start "VS2015 x64 Native Tools Command Prompt", and use the following command to install and set up the msvc toolchain:
|
||||
```
|
||||
```bash
|
||||
$ rustup default stable-x86_64-pc-windows-msvc
|
||||
```
|
||||
|
||||
@ -98,6 +99,9 @@ Note: if cargo fails to parse manifest try:
|
||||
```bash
|
||||
$ ~/.cargo/bin/cargo build --release
|
||||
```
|
||||
|
||||
This will always compile the latest nightly builds. If you want to build stable or beta, do a `git checkout stable` or `git checkout beta` first.
|
||||
|
||||
----
|
||||
|
||||
## Simple one-line installer for Mac and Ubuntu
|
||||
@ -106,9 +110,14 @@ $ ~/.cargo/bin/cargo build --release
|
||||
bash <(curl https://get.parity.io -Lk)
|
||||
```
|
||||
|
||||
The one-line installer always defaults to the latest beta release.
|
||||
|
||||
## Start Parity
|
||||
|
||||
### Manually
|
||||
|
||||
To start Parity manually, just run
|
||||
|
||||
```bash
|
||||
$ ./target/release/parity
|
||||
```
|
||||
@ -116,11 +125,9 @@ $ ./target/release/parity
|
||||
and Parity will begin syncing the Ethereum blockchain.
|
||||
|
||||
### Using systemd service file
|
||||
|
||||
To start Parity as a regular user using systemd init:
|
||||
|
||||
1. Copy `parity/scripts/parity.service` to your
|
||||
1. Copy `./scripts/parity.service` to your
|
||||
systemd user directory (usually `~/.config/systemd/user`).
|
||||
2. To pass any argument to Parity, write a `~/.parity/parity.conf` file this way:
|
||||
`ARGS="ARG1 ARG2 ARG3"`.
|
||||
|
||||
Example: `ARGS="ui --identity MyMachine"`.
|
||||
2. To configure Parity, write a `/etc/parity/config.toml` config file, see [Configuring Parity](https://github.com/paritytech/parity/wiki/Configuring-Parity) for details.
|
||||
|
@ -9,7 +9,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
futures = "0.1"
|
||||
futures-cpupool = "0.1"
|
||||
log = "0.3"
|
||||
ntp = "0.2.0"
|
||||
ntp = "0.3.0"
|
||||
parking_lot = "0.4"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use std::io::{Read, Write};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::str::{self, Lines};
|
||||
use std::net::{TcpStream, SocketAddr};
|
||||
|
||||
@ -83,9 +83,18 @@ pub fn request(address: &SocketAddr, request: &str) -> Response {
|
||||
req.set_read_timeout(Some(Duration::from_secs(2))).unwrap();
|
||||
req.write_all(request.as_bytes()).unwrap();
|
||||
|
||||
let mut response = String::new();
|
||||
let _ = req.read_to_string(&mut response);
|
||||
let mut response = Vec::new();
|
||||
loop {
|
||||
let mut chunk = [0; 32 *1024];
|
||||
match req.read(&mut chunk) {
|
||||
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break,
|
||||
Err(err) => panic!("Unable to read response: {:?}", err),
|
||||
Ok(0) => break,
|
||||
Ok(read) => response.extend_from_slice(&chunk[..read]),
|
||||
}
|
||||
}
|
||||
|
||||
let response = String::from_utf8_lossy(&response).into_owned();
|
||||
let mut lines = response.lines();
|
||||
let status = lines.next().expect("Expected a response").to_owned();
|
||||
let headers_raw = read_block(&mut lines, false);
|
||||
|
@ -322,20 +322,24 @@ impl<Cost: CostType> Interpreter<Cost> {
|
||||
let init_off = stack.pop_back();
|
||||
let init_size = stack.pop_back();
|
||||
|
||||
let address_scheme = if instruction == instructions::CREATE { CreateContractAddress::FromSenderAndNonce } else { CreateContractAddress::FromSenderAndCodeHash };
|
||||
let create_gas = provided.expect("`provided` comes through Self::exec from `Gasometer::get_gas_cost_mem`; `gas_gas_mem_cost` guarantees `Some` when instruction is `CALL`/`CALLCODE`/`DELEGATECALL`/`CREATE`; this is `CREATE`; qed");
|
||||
|
||||
let contract_code = self.mem.read_slice(init_off, init_size);
|
||||
let can_create = ext.balance(¶ms.address)? >= endowment && ext.depth() < ext.schedule().max_depth;
|
||||
if ext.is_static() {
|
||||
return Err(vm::Error::MutableCallInStaticContext);
|
||||
}
|
||||
|
||||
// clear return data buffer before creating new call frame.
|
||||
self.return_data = ReturnData::empty();
|
||||
|
||||
let can_create = ext.balance(¶ms.address)? >= endowment && ext.depth() < ext.schedule().max_depth;
|
||||
if !can_create {
|
||||
stack.push(U256::zero());
|
||||
return Ok(InstructionResult::UnusedGas(create_gas));
|
||||
}
|
||||
|
||||
let contract_code = self.mem.read_slice(init_off, init_size);
|
||||
let address_scheme = if instruction == instructions::CREATE { CreateContractAddress::FromSenderAndNonce } else { CreateContractAddress::FromSenderAndCodeHash };
|
||||
|
||||
let create_result = ext.create(&create_gas.as_u256(), &endowment, contract_code, address_scheme);
|
||||
return match create_result {
|
||||
ContractCreateResult::Created(address, gas_left) => {
|
||||
@ -351,9 +355,6 @@ impl<Cost: CostType> Interpreter<Cost> {
|
||||
stack.push(U256::zero());
|
||||
Ok(InstructionResult::Ok)
|
||||
},
|
||||
ContractCreateResult::FailedInStaticCall => {
|
||||
Err(vm::Error::MutableCallInStaticContext)
|
||||
},
|
||||
};
|
||||
},
|
||||
instructions::CALL | instructions::CALLCODE | instructions::DELEGATECALL | instructions::STATICCALL => {
|
||||
|
@ -724,7 +724,6 @@ fn test_jumps(factory: super::Factory) {
|
||||
assert_eq!(gas_left, U256::from(54_117));
|
||||
}
|
||||
|
||||
|
||||
evm_test!{test_calls: test_calls_jit, test_calls_int}
|
||||
fn test_calls(factory: super::Factory) {
|
||||
let code = "600054602d57600160005560006000600060006050610998610100f160006000600060006050610998610100f25b".from_hex().unwrap();
|
||||
@ -769,6 +768,27 @@ fn test_calls(factory: super::Factory) {
|
||||
assert_eq!(ext.calls.len(), 2);
|
||||
}
|
||||
|
||||
evm_test!{test_create_in_staticcall: test_create_in_staticcall_jit, test_create_in_staticcall_int}
|
||||
fn test_create_in_staticcall(factory: super::Factory) {
|
||||
let code = "600060006064f000".from_hex().unwrap();
|
||||
|
||||
let address = Address::from(0x155);
|
||||
let mut params = ActionParams::default();
|
||||
params.gas = U256::from(100_000);
|
||||
params.code = Some(Arc::new(code));
|
||||
params.address = address.clone();
|
||||
let mut ext = FakeExt::new_byzantium();
|
||||
ext.is_static = true;
|
||||
|
||||
let err = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap_err()
|
||||
};
|
||||
|
||||
assert_eq!(err, vm::Error::MutableCallInStaticContext);
|
||||
assert_eq!(ext.calls.len(), 0);
|
||||
}
|
||||
|
||||
fn assert_set_contains<T : Debug + Eq + PartialEq + Hash>(set: &HashSet<T>, val: &T) {
|
||||
let contains = set.contains(val);
|
||||
if !contains {
|
||||
|
@ -516,8 +516,8 @@ impl AccountProvider {
|
||||
}
|
||||
|
||||
/// Returns each hardware account along with name and meta.
|
||||
pub fn is_hardware_address(&self, address: Address) -> bool {
|
||||
self.hardware_store.as_ref().and_then(|s| s.wallet_info(&address)).is_some()
|
||||
pub fn is_hardware_address(&self, address: &Address) -> bool {
|
||||
self.hardware_store.as_ref().and_then(|s| s.wallet_info(address)).is_some()
|
||||
}
|
||||
|
||||
/// Returns each account along with name and meta.
|
||||
@ -589,7 +589,7 @@ impl AccountProvider {
|
||||
}
|
||||
}
|
||||
|
||||
if self.unlock_keep_secret && unlock != Unlock::OneTime {
|
||||
if self.unlock_keep_secret && unlock == Unlock::Perm {
|
||||
// verify password and get the secret
|
||||
let secret = self.sstore.raw_secret(&account, &password)?;
|
||||
self.unlocked_secrets.write().insert(account.clone(), secret);
|
||||
@ -639,14 +639,22 @@ impl AccountProvider {
|
||||
}
|
||||
|
||||
/// Checks if given account is unlocked
|
||||
pub fn is_unlocked(&self, address: Address) -> bool {
|
||||
pub fn is_unlocked(&self, address: &Address) -> bool {
|
||||
let unlocked = self.unlocked.read();
|
||||
let unlocked_secrets = self.unlocked_secrets.read();
|
||||
self.sstore.account_ref(&address)
|
||||
self.sstore.account_ref(address)
|
||||
.map(|r| unlocked.get(&r).is_some() || unlocked_secrets.get(&r).is_some())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Checks if given account is unlocked permanently
|
||||
pub fn is_unlocked_permanently(&self, address: &Address) -> bool {
|
||||
let unlocked = self.unlocked.read();
|
||||
self.sstore.account_ref(address)
|
||||
.map(|r| unlocked.get(&r).map_or(false, |account| account.unlock == Unlock::Perm))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Signs the message. If password is not provided the account must be unlocked.
|
||||
pub fn sign(&self, address: Address, password: Option<String>, message: Message) -> Result<Signature, SignError> {
|
||||
let account = self.sstore.account_ref(&address)?;
|
||||
|
@ -1142,7 +1142,7 @@ impl Client {
|
||||
state_diff: bool,
|
||||
transaction: &SignedTransaction,
|
||||
options: TransactOptions<T, V>,
|
||||
) -> Result<Executed, CallError> where
|
||||
) -> Result<Executed<T::Output, V::Output>, CallError> where
|
||||
T: trace::Tracer,
|
||||
V: trace::VMTracer,
|
||||
{
|
||||
@ -1242,7 +1242,7 @@ impl BlockChainClient for Client {
|
||||
// that's just a copy of the state.
|
||||
let original_state = self.state_at(block).ok_or(CallError::StatePruned)?;
|
||||
let sender = t.sender();
|
||||
let options = || TransactOptions::with_tracing();
|
||||
let options = || TransactOptions::with_tracing().dont_check_nonce();
|
||||
|
||||
let cond = |gas| {
|
||||
let mut tx = t.as_unsigned().clone();
|
||||
|
@ -197,7 +197,7 @@ impl<'a> EvmTestClient<'a> {
|
||||
env_info: &client::EnvInfo,
|
||||
transaction: transaction::SignedTransaction,
|
||||
vm_tracer: T,
|
||||
) -> TransactResult {
|
||||
) -> TransactResult<T::Output> {
|
||||
let initial_gas = transaction.gas;
|
||||
// Verify transaction
|
||||
let is_ok = transaction.verify_basic(true, None, env_info.number >= self.spec.engine.params().eip86_transition);
|
||||
@ -218,7 +218,8 @@ impl<'a> EvmTestClient<'a> {
|
||||
TransactResult::Ok {
|
||||
state_root: *self.state.root(),
|
||||
gas_left: initial_gas - result.receipt.gas_used,
|
||||
output: result.output
|
||||
output: result.output,
|
||||
vm_trace: result.vm_trace,
|
||||
}
|
||||
},
|
||||
Err(error) => TransactResult::Err {
|
||||
@ -230,7 +231,7 @@ impl<'a> EvmTestClient<'a> {
|
||||
}
|
||||
|
||||
/// A result of applying transaction to the state.
|
||||
pub enum TransactResult {
|
||||
pub enum TransactResult<T> {
|
||||
/// Successful execution
|
||||
Ok {
|
||||
/// State root
|
||||
@ -239,6 +240,8 @@ pub enum TransactResult {
|
||||
gas_left: U256,
|
||||
/// Output
|
||||
output: Vec<u8>,
|
||||
/// VM Traces
|
||||
vm_trace: Option<T>,
|
||||
},
|
||||
/// Transaction failed to run
|
||||
Err {
|
||||
|
@ -29,7 +29,7 @@ use std::fmt;
|
||||
|
||||
/// Transaction execution receipt.
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct Executed {
|
||||
pub struct Executed<T = FlatTrace, V = VMTrace> {
|
||||
/// True if the outer call/create resulted in an exceptional exit.
|
||||
pub exception: Option<vm::Error>,
|
||||
|
||||
@ -63,9 +63,9 @@ pub struct Executed {
|
||||
/// Transaction output.
|
||||
pub output: Bytes,
|
||||
/// The trace of this transaction.
|
||||
pub trace: Vec<FlatTrace>,
|
||||
pub trace: Vec<T>,
|
||||
/// The VM trace of this transaction.
|
||||
pub vm_trace: Option<VMTrace>,
|
||||
pub vm_trace: Option<V>,
|
||||
/// The state diff, if we traced it.
|
||||
pub state_diff: Option<StateDiff>,
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ use evm::{CallType, Factory, Finalize, FinalizationResult};
|
||||
use vm::{self, Ext, CreateContractAddress, ReturnData, CleanDustMode, ActionParams, ActionValue};
|
||||
use wasm;
|
||||
use externalities::*;
|
||||
use trace::{self, FlatTrace, VMTrace, Tracer, VMTracer};
|
||||
use trace::{self, Tracer, VMTracer};
|
||||
use transaction::{Action, SignedTransaction};
|
||||
use crossbeam;
|
||||
pub use executed::{Executed, ExecutionResult};
|
||||
@ -214,7 +214,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
|
||||
/// This function should be used to execute transaction.
|
||||
pub fn transact<T, V>(&'a mut self, t: &SignedTransaction, options: TransactOptions<T, V>)
|
||||
-> Result<Executed, ExecutionError> where T: Tracer, V: VMTracer,
|
||||
-> Result<Executed<T::Output, V::Output>, ExecutionError> where T: Tracer, V: VMTracer,
|
||||
{
|
||||
self.transact_with_tracer(t, options.check_nonce, options.output_from_init_contract, options.tracer, options.vm_tracer)
|
||||
}
|
||||
@ -223,7 +223,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
/// This will ensure the caller has enough balance to execute the desired transaction.
|
||||
/// Used for extra-block executions for things like consensus contracts and RPCs
|
||||
pub fn transact_virtual<T, V>(&'a mut self, t: &SignedTransaction, options: TransactOptions<T, V>)
|
||||
-> Result<Executed, ExecutionError> where T: Tracer, V: VMTracer,
|
||||
-> Result<Executed<T::Output, V::Output>, ExecutionError> where T: Tracer, V: VMTracer,
|
||||
{
|
||||
let sender = t.sender();
|
||||
let balance = self.state.balance(&sender)?;
|
||||
@ -244,7 +244,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
output_from_create: bool,
|
||||
mut tracer: T,
|
||||
mut vm_tracer: V
|
||||
) -> Result<Executed, ExecutionError> where T: Tracer, V: VMTracer {
|
||||
) -> Result<Executed<T::Output, V::Output>, ExecutionError> where T: Tracer, V: VMTracer {
|
||||
let sender = t.sender();
|
||||
let nonce = self.state.nonce(&sender)?;
|
||||
|
||||
@ -309,6 +309,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
code: Some(Arc::new(t.data.clone())),
|
||||
data: None,
|
||||
call_type: CallType::None,
|
||||
params_type: vm::ParamsType::Embedded,
|
||||
};
|
||||
let mut out = if output_from_create { Some(vec![]) } else { None };
|
||||
(self.create(params, &mut substate, &mut out, &mut tracer, &mut vm_tracer), out.unwrap_or_else(Vec::new))
|
||||
@ -326,6 +327,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
code_hash: Some(self.state.code_hash(address)?),
|
||||
data: Some(t.data.clone()),
|
||||
call_type: CallType::Call,
|
||||
params_type: vm::ParamsType::Separate,
|
||||
};
|
||||
let mut out = vec![];
|
||||
(self.call(params, &mut substate, BytesRef::Flexible(&mut out), &mut tracer, &mut vm_tracer), out)
|
||||
@ -587,15 +589,15 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
}
|
||||
|
||||
/// Finalizes the transaction (does refunds and suicides).
|
||||
fn finalize(
|
||||
fn finalize<T, V>(
|
||||
&mut self,
|
||||
t: &SignedTransaction,
|
||||
mut substate: Substate,
|
||||
result: vm::Result<FinalizationResult>,
|
||||
output: Bytes,
|
||||
trace: Vec<FlatTrace>,
|
||||
vm_trace: Option<VMTrace>
|
||||
) -> ExecutionResult {
|
||||
trace: Vec<T>,
|
||||
vm_trace: Option<V>
|
||||
) -> Result<Executed<T, V>, ExecutionError> {
|
||||
let schedule = self.machine.schedule(self.info.number);
|
||||
|
||||
// refunds from SSTORE nonzero -> zero
|
||||
|
@ -171,6 +171,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B>
|
||||
code_hash: Some(code_hash),
|
||||
data: Some(H256::from(number).to_vec()),
|
||||
call_type: CallType::Call,
|
||||
params_type: vm::ParamsType::Separate,
|
||||
};
|
||||
|
||||
let mut output = H256::new();
|
||||
@ -219,6 +220,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B>
|
||||
code_hash: code_hash,
|
||||
data: None,
|
||||
call_type: CallType::None,
|
||||
params_type: vm::ParamsType::Embedded,
|
||||
};
|
||||
|
||||
if !self.static_flag {
|
||||
@ -240,7 +242,6 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B>
|
||||
Ok(FinalizationResult{ gas_left, apply_state: false, return_data }) => {
|
||||
ContractCreateResult::Reverted(gas_left, return_data)
|
||||
},
|
||||
Err(vm::Error::MutableCallInStaticContext) => ContractCreateResult::FailedInStaticCall,
|
||||
_ => ContractCreateResult::Failed,
|
||||
}
|
||||
}
|
||||
@ -277,6 +278,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B>
|
||||
code_hash: Some(code_hash),
|
||||
data: Some(data.to_vec()),
|
||||
call_type: call_type,
|
||||
params_type: vm::ParamsType::Separate,
|
||||
};
|
||||
|
||||
if let Some(value) = value {
|
||||
|
@ -162,6 +162,12 @@ impl Header {
|
||||
pub fn difficulty(&self) -> &U256 { &self.difficulty }
|
||||
/// Get the seal field of the header.
|
||||
pub fn seal(&self) -> &[Bytes] { &self.seal }
|
||||
/// Get the seal field with RLP-decoded values as bytes.
|
||||
pub fn decode_seal<'a, T: ::std::iter::FromIterator<&'a [u8]>>(&'a self) -> Result<T, DecoderError> {
|
||||
self.seal.iter().map(|rlp| {
|
||||
UntrustedRlp::new(rlp).data()
|
||||
}).collect()
|
||||
}
|
||||
|
||||
// TODO: seal_at, set_seal_at &c.
|
||||
|
||||
@ -340,13 +346,20 @@ mod tests {
|
||||
// that's rlp of block header created with ethash engine.
|
||||
let header_rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap();
|
||||
let mix_hash = "a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd".from_hex().unwrap();
|
||||
let mix_hash_decoded = "a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd".from_hex().unwrap();
|
||||
let nonce = "88ab4e252a7e8c2a23".from_hex().unwrap();
|
||||
let nonce_decoded = "ab4e252a7e8c2a23".from_hex().unwrap();
|
||||
|
||||
let header: Header = rlp::decode(&header_rlp);
|
||||
let seal_fields = header.seal;
|
||||
let seal_fields = header.seal.clone();
|
||||
assert_eq!(seal_fields.len(), 2);
|
||||
assert_eq!(seal_fields[0], mix_hash);
|
||||
assert_eq!(seal_fields[1], nonce);
|
||||
|
||||
let decoded_seal = header.decode_seal::<Vec<_>>().unwrap();
|
||||
assert_eq!(decoded_seal.len(), 2);
|
||||
assert_eq!(decoded_seal[0], &*mix_hash_decoded);
|
||||
assert_eq!(decoded_seal[1], &*nonce_decoded);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -35,7 +35,7 @@ use tx_filter::TransactionFilter;
|
||||
use bigint::prelude::U256;
|
||||
use bytes::BytesRef;
|
||||
use util::Address;
|
||||
use vm::{CallType, ActionParams, ActionValue};
|
||||
use vm::{CallType, ActionParams, ActionValue, ParamsType};
|
||||
use vm::{EnvInfo, Schedule, CreateContractAddress};
|
||||
|
||||
/// Parity tries to round block.gas_limit to multiple of this constant
|
||||
@ -149,6 +149,7 @@ impl EthereumMachine {
|
||||
code_hash: Some(state.code_hash(&contract_address)?),
|
||||
data: data,
|
||||
call_type: CallType::Call,
|
||||
params_type: ParamsType::Separate,
|
||||
};
|
||||
let mut ex = Executive::new(&mut state, &env_info, self);
|
||||
let mut substate = Substate::new();
|
||||
|
@ -123,6 +123,10 @@ pub struct MinerOptions {
|
||||
pub tx_queue_banning: Banning,
|
||||
/// Do we refuse to accept service transactions even if sender is certified.
|
||||
pub refuse_service_transactions: bool,
|
||||
/// Create a pending block with maximal possible gas limit.
|
||||
/// NOTE: Such block will contain all pending transactions but
|
||||
/// will be invalid if mined.
|
||||
pub infinite_pending_block: bool,
|
||||
}
|
||||
|
||||
impl Default for MinerOptions {
|
||||
@ -145,6 +149,7 @@ impl Default for MinerOptions {
|
||||
enable_resubmission: true,
|
||||
tx_queue_banning: Banning::Disabled,
|
||||
refuse_service_transactions: false,
|
||||
infinite_pending_block: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -374,15 +379,14 @@ impl Miner {
|
||||
let mut sealing_work = self.sealing_work.lock();
|
||||
let last_work_hash = sealing_work.queue.peek_last_ref().map(|pb| pb.block().fields().header.hash());
|
||||
let best_hash = chain_info.best_block_hash;
|
||||
/*
|
||||
|
||||
// check to see if last ClosedBlock in would_seals is actually same parent block.
|
||||
// if so
|
||||
// duplicate, re-open and push any new transactions.
|
||||
// if at least one was pushed successfully, close and enqueue new ClosedBlock;
|
||||
// otherwise, leave everything alone.
|
||||
// otherwise, author a fresh block.
|
||||
*/
|
||||
let open_block = match sealing_work.queue.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) {
|
||||
let mut open_block = match sealing_work.queue.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) {
|
||||
Some(old_block) => {
|
||||
trace!(target: "miner", "prepare_block: Already have previous work; updating and returning");
|
||||
// add transactions to old_block
|
||||
@ -398,6 +402,11 @@ impl Miner {
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
if self.options.infinite_pending_block {
|
||||
open_block.set_gas_limit(!U256::zero());
|
||||
}
|
||||
|
||||
(transactions, open_block, last_work_hash)
|
||||
};
|
||||
|
||||
@ -1301,6 +1310,7 @@ mod tests {
|
||||
enable_resubmission: true,
|
||||
tx_queue_banning: Banning::Disabled,
|
||||
refuse_service_transactions: false,
|
||||
infinite_pending_block: false,
|
||||
},
|
||||
GasPricer::new_fixed(0u64.into()),
|
||||
&Spec::new_test(),
|
||||
|
@ -515,10 +515,6 @@ pub struct AccountDetails {
|
||||
/// `new_gas_price > old_gas_price + old_gas_price >> SHIFT`
|
||||
const GAS_PRICE_BUMP_SHIFT: usize = 3; // 2 = 25%, 3 = 12.5%, 4 = 6.25%
|
||||
|
||||
/// Future queue limits are lower from current queue limits:
|
||||
/// `future_limit = current_limit >> SHIFT`
|
||||
const FUTURE_QUEUE_LIMITS_SHIFT: usize = 3; // 2 = 25%, 3 = 12.5%, 4 = 6.25%
|
||||
|
||||
/// Describes the strategy used to prioritize transactions in the queue.
|
||||
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
@ -626,9 +622,9 @@ impl TransactionQueue {
|
||||
by_priority: BTreeSet::new(),
|
||||
by_address: Table::new(),
|
||||
by_gas_price: Default::default(),
|
||||
total_gas_limit: total_gas_limit >> FUTURE_QUEUE_LIMITS_SHIFT,
|
||||
limit: limit >> FUTURE_QUEUE_LIMITS_SHIFT,
|
||||
memory_limit: memory_limit >> FUTURE_QUEUE_LIMITS_SHIFT,
|
||||
total_gas_limit,
|
||||
limit,
|
||||
memory_limit,
|
||||
};
|
||||
|
||||
TransactionQueue {
|
||||
@ -649,7 +645,7 @@ impl TransactionQueue {
|
||||
/// Set the new limit for `current` and `future` queue.
|
||||
pub fn set_limit(&mut self, limit: usize) {
|
||||
self.current.set_limit(limit);
|
||||
self.future.set_limit(limit >> FUTURE_QUEUE_LIMITS_SHIFT);
|
||||
self.future.set_limit(limit);
|
||||
// And ensure the limits
|
||||
self.current.enforce_limit(&mut self.by_hash, &mut self.local_transactions);
|
||||
self.future.enforce_limit(&mut self.by_hash, &mut self.local_transactions);
|
||||
@ -686,7 +682,7 @@ impl TransactionQueue {
|
||||
/// Sets new total gas limit.
|
||||
pub fn set_total_gas_limit(&mut self, total_gas_limit: U256) {
|
||||
self.current.total_gas_limit = total_gas_limit;
|
||||
self.future.total_gas_limit = total_gas_limit >> FUTURE_QUEUE_LIMITS_SHIFT;
|
||||
self.future.total_gas_limit = total_gas_limit;
|
||||
self.future.enforce_limit(&mut self.by_hash, &mut self.local_transactions);
|
||||
}
|
||||
|
||||
@ -2412,7 +2408,7 @@ pub mod test {
|
||||
fn should_limit_future_transactions() {
|
||||
let mut txq = TransactionQueue::with_limits(
|
||||
PrioritizationStrategy::GasPriceOnly,
|
||||
1 << FUTURE_QUEUE_LIMITS_SHIFT,
|
||||
1,
|
||||
usize::max_value(),
|
||||
!U256::zero(),
|
||||
!U256::zero(),
|
||||
@ -2736,7 +2732,7 @@ pub mod test {
|
||||
// given
|
||||
let mut txq = TransactionQueue::with_limits(
|
||||
PrioritizationStrategy::GasPriceOnly,
|
||||
1 << FUTURE_QUEUE_LIMITS_SHIFT,
|
||||
1,
|
||||
usize::max_value(),
|
||||
!U256::zero(),
|
||||
!U256::zero()
|
||||
|
@ -30,7 +30,7 @@ use parking_lot::RwLock;
|
||||
use rlp::{Rlp, RlpStream};
|
||||
use rustc_hex::FromHex;
|
||||
use util::*;
|
||||
use vm::{EnvInfo, CallType, ActionValue, ActionParams};
|
||||
use vm::{EnvInfo, CallType, ActionValue, ActionParams, ParamsType};
|
||||
|
||||
use super::genesis::Genesis;
|
||||
use super::seal::Generic as GenericSeal;
|
||||
@ -504,6 +504,7 @@ impl Spec {
|
||||
code: Some(Arc::new(constructor.clone())),
|
||||
data: None,
|
||||
call_type: CallType::None,
|
||||
params_type: ParamsType::Embedded,
|
||||
};
|
||||
|
||||
let mut substate = Substate::new();
|
||||
|
@ -62,19 +62,19 @@ pub use self::backend::Backend;
|
||||
pub use self::substate::Substate;
|
||||
|
||||
/// Used to return information about an `State::apply` operation.
|
||||
pub struct ApplyOutcome {
|
||||
pub struct ApplyOutcome<T, V> {
|
||||
/// The receipt for the applied transaction.
|
||||
pub receipt: Receipt,
|
||||
/// The output of the applied transaction.
|
||||
pub output: Bytes,
|
||||
/// The trace for the applied transaction, empty if tracing was not produced.
|
||||
pub trace: Vec<FlatTrace>,
|
||||
pub trace: Vec<T>,
|
||||
/// The VM trace for the applied transaction, None if tracing was not produced.
|
||||
pub vm_trace: Option<VMTrace>
|
||||
pub vm_trace: Option<V>
|
||||
}
|
||||
|
||||
/// Result type for the execution ("application") of a transaction.
|
||||
pub type ApplyResult = Result<ApplyOutcome, Error>;
|
||||
pub type ApplyResult<T, V> = Result<ApplyOutcome<T, V>, Error>;
|
||||
|
||||
/// Return type of proof validity check.
|
||||
#[derive(Debug, Clone)]
|
||||
@ -668,7 +668,7 @@ impl<B: Backend> State<B> {
|
||||
|
||||
/// Execute a given transaction, producing a receipt and an optional trace.
|
||||
/// This will change the state accordingly.
|
||||
pub fn apply(&mut self, env_info: &EnvInfo, machine: &Machine, t: &SignedTransaction, tracing: bool) -> ApplyResult {
|
||||
pub fn apply(&mut self, env_info: &EnvInfo, machine: &Machine, t: &SignedTransaction, tracing: bool) -> ApplyResult<FlatTrace, VMTrace> {
|
||||
if tracing {
|
||||
let options = TransactOptions::with_tracing();
|
||||
self.apply_with_tracing(env_info, machine, t, options.tracer, options.vm_tracer)
|
||||
@ -687,7 +687,7 @@ impl<B: Backend> State<B> {
|
||||
t: &SignedTransaction,
|
||||
tracer: T,
|
||||
vm_tracer: V,
|
||||
) -> ApplyResult where
|
||||
) -> ApplyResult<T::Output, V::Output> where
|
||||
T: trace::Tracer,
|
||||
V: trace::VMTracer,
|
||||
{
|
||||
@ -728,7 +728,7 @@ impl<B: Backend> State<B> {
|
||||
// `virt` signals that we are executing outside of a block set and restrictions like
|
||||
// gas limits and gas costs should be lifted.
|
||||
fn execute<T, V>(&mut self, env_info: &EnvInfo, machine: &Machine, t: &SignedTransaction, options: TransactOptions<T, V>, virt: bool)
|
||||
-> Result<Executed, ExecutionError> where T: trace::Tracer, V: trace::VMTracer,
|
||||
-> Result<Executed<T::Output, V::Output>, ExecutionError> where T: trace::Tracer, V: trace::VMTracer,
|
||||
{
|
||||
let mut e = Executive::new(self, env_info, machine);
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
use hash::keccak;
|
||||
use vm::{EnvInfo, ActionParams, ActionValue, CallType};
|
||||
use vm::{EnvInfo, ActionParams, ActionValue, CallType, ParamsType};
|
||||
use evm::{Factory, VMType};
|
||||
use executive::Executive;
|
||||
use state::Substate;
|
||||
@ -45,6 +45,7 @@ fn test_blockhash_eip210(factory: Factory) {
|
||||
code_hash: Some(blockhash_contract_code_hash),
|
||||
data: Some(H256::from(i - 1).to_vec()),
|
||||
call_type: CallType::Call,
|
||||
params_type: ParamsType::Separate,
|
||||
};
|
||||
let mut ex = Executive::new(&mut state, &env_info, &machine);
|
||||
let mut substate = Substate::new();
|
||||
@ -67,6 +68,7 @@ fn test_blockhash_eip210(factory: Factory) {
|
||||
code_hash: Some(get_prev_hash_code_hash),
|
||||
data: None,
|
||||
call_type: CallType::Call,
|
||||
params_type: ParamsType::Separate,
|
||||
};
|
||||
let mut ex = Executive::new(&mut state, &env_info, &machine);
|
||||
let mut substate = Substate::new();
|
||||
|
@ -83,6 +83,8 @@ fn should_prefix_address_properly() {
|
||||
}
|
||||
|
||||
impl Tracer for ExecutiveTracer {
|
||||
type Output = FlatTrace;
|
||||
|
||||
fn prepare_trace_call(&self, params: &ActionParams) -> Option<Call> {
|
||||
Some(Call::from(params.clone()))
|
||||
}
|
||||
@ -201,6 +203,8 @@ impl ExecutiveVMTracer {
|
||||
}
|
||||
|
||||
impl VMTracer for ExecutiveVMTracer {
|
||||
type Output = VMTrace;
|
||||
|
||||
fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8) -> bool { true }
|
||||
|
||||
fn trace_prepare_execute(&mut self, pc: usize, instruction: u8, gas_cost: U256) {
|
||||
|
@ -48,6 +48,9 @@ use header::BlockNumber;
|
||||
|
||||
/// This trait is used by executive to build traces.
|
||||
pub trait Tracer: Send {
|
||||
/// Data returned when draining the Tracer.
|
||||
type Output;
|
||||
|
||||
/// Prepares call trace for given params. Noop tracer should return None.
|
||||
fn prepare_trace_call(&self, params: &ActionParams) -> Option<Call>;
|
||||
|
||||
@ -63,7 +66,7 @@ pub trait Tracer: Send {
|
||||
call: Option<Call>,
|
||||
gas_used: U256,
|
||||
output: Option<Bytes>,
|
||||
subs: Vec<FlatTrace>,
|
||||
subs: Vec<Self::Output>,
|
||||
);
|
||||
|
||||
/// Stores trace create info.
|
||||
@ -73,14 +76,14 @@ pub trait Tracer: Send {
|
||||
gas_used: U256,
|
||||
code: Option<Bytes>,
|
||||
address: Address,
|
||||
subs: Vec<FlatTrace>
|
||||
subs: Vec<Self::Output>
|
||||
);
|
||||
|
||||
/// Stores failed call trace.
|
||||
fn trace_failed_call(&mut self, call: Option<Call>, subs: Vec<FlatTrace>, error: TraceError);
|
||||
fn trace_failed_call(&mut self, call: Option<Call>, subs: Vec<Self::Output>, error: TraceError);
|
||||
|
||||
/// Stores failed create trace.
|
||||
fn trace_failed_create(&mut self, create: Option<Create>, subs: Vec<FlatTrace>, error: TraceError);
|
||||
fn trace_failed_create(&mut self, create: Option<Create>, subs: Vec<Self::Output>, error: TraceError);
|
||||
|
||||
/// Stores suicide info.
|
||||
fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address);
|
||||
@ -92,12 +95,15 @@ pub trait Tracer: Send {
|
||||
fn subtracer(&self) -> Self where Self: Sized;
|
||||
|
||||
/// Consumes self and returns all traces.
|
||||
fn drain(self) -> Vec<FlatTrace>;
|
||||
fn drain(self) -> Vec<Self::Output>;
|
||||
}
|
||||
|
||||
/// Used by executive to build VM traces.
|
||||
pub trait VMTracer: Send {
|
||||
|
||||
/// Data returned when draining the VMTracer.
|
||||
type Output;
|
||||
|
||||
/// Trace the progression of interpreter to next instruction.
|
||||
/// If tracer returns `false` it won't be called again.
|
||||
/// @returns true if `trace_prepare_execute` and `trace_executed` should be called.
|
||||
@ -116,7 +122,7 @@ pub trait VMTracer: Send {
|
||||
fn done_subtrace(&mut self, sub: Self) where Self: Sized;
|
||||
|
||||
/// Consumes self and returns the VM trace.
|
||||
fn drain(self) -> Option<VMTrace>;
|
||||
fn drain(self) -> Option<Self::Output>;
|
||||
}
|
||||
|
||||
/// `DbExtras` provides an interface to query extra data which is not stored in tracesdb,
|
||||
|
@ -27,6 +27,8 @@ use trace::trace::{Call, Create, VMTrace, RewardType};
|
||||
pub struct NoopTracer;
|
||||
|
||||
impl Tracer for NoopTracer {
|
||||
type Output = FlatTrace;
|
||||
|
||||
fn prepare_trace_call(&self, _: &ActionParams) -> Option<Call> {
|
||||
None
|
||||
}
|
||||
@ -76,6 +78,8 @@ impl Tracer for NoopTracer {
|
||||
pub struct NoopVMTracer;
|
||||
|
||||
impl VMTracer for NoopVMTracer {
|
||||
type Output = VMTrace;
|
||||
|
||||
fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8) -> bool { false }
|
||||
|
||||
fn trace_prepare_execute(&mut self, _pc: usize, _instruction: u8, _gas_cost: U256) {}
|
||||
|
@ -21,7 +21,7 @@ use bigint::prelude::U256;
|
||||
use bigint::hash::{H256, H2048};
|
||||
use util::Address;
|
||||
use bytes::Bytes;
|
||||
use rlp::Rlp;
|
||||
use rlp::{self, Rlp};
|
||||
use header::BlockNumber;
|
||||
|
||||
/// View onto block header rlp.
|
||||
@ -99,6 +99,14 @@ impl<'a> HeaderView<'a> {
|
||||
}
|
||||
seal
|
||||
}
|
||||
|
||||
/// Returns a vector of seal fields (RLP-decoded).
|
||||
pub fn decode_seal(&self) -> Result<Vec<Bytes>, rlp::DecoderError> {
|
||||
let seal = self.seal();
|
||||
seal.into_iter()
|
||||
.map(|s| rlp::UntrustedRlp::new(&s).data().map(|x| x.to_vec()))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -35,6 +35,15 @@ pub enum ActionValue {
|
||||
Apparent(U256)
|
||||
}
|
||||
|
||||
/// Type of the way parameters encoded
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ParamsType {
|
||||
/// Parameters are included in code
|
||||
Embedded,
|
||||
/// Parameters are passed in data section
|
||||
Separate,
|
||||
}
|
||||
|
||||
impl ActionValue {
|
||||
/// Returns action value as U256.
|
||||
pub fn value(&self) -> U256 {
|
||||
@ -81,7 +90,8 @@ pub struct ActionParams {
|
||||
pub data: Option<Bytes>,
|
||||
/// Type of call
|
||||
pub call_type: CallType,
|
||||
|
||||
/// Param types encoding
|
||||
pub params_type: ParamsType,
|
||||
}
|
||||
|
||||
impl Default for ActionParams {
|
||||
@ -99,6 +109,7 @@ impl Default for ActionParams {
|
||||
code: None,
|
||||
data: None,
|
||||
call_type: CallType::None,
|
||||
params_type: ParamsType::Separate,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -118,6 +129,7 @@ impl From<ethjson::vm::Transaction> for ActionParams {
|
||||
gas_price: t.gas_price.into(),
|
||||
value: ActionValue::Transfer(t.value.into()),
|
||||
call_type: match address.is_zero() { true => CallType::None, false => CallType::Call }, // TODO @debris is this correct?
|
||||
params_type: ParamsType::Separate,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -35,9 +35,6 @@ pub enum ContractCreateResult {
|
||||
/// Returned when contract creation failed.
|
||||
/// VM doesn't have to know the reason.
|
||||
Failed,
|
||||
/// Returned when contract creation failed.
|
||||
/// VM doesn't have to know the reason.
|
||||
FailedInStaticCall,
|
||||
/// Reverted with REVERT.
|
||||
Reverted(U256, ReturnData),
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ mod error;
|
||||
|
||||
pub mod tests;
|
||||
|
||||
pub use action_params::{ActionParams, ActionValue};
|
||||
pub use action_params::{ActionParams, ActionValue, ParamsType};
|
||||
pub use call_type::CallType;
|
||||
pub use env_info::{EnvInfo, LastHashes};
|
||||
pub use schedule::{Schedule, CleanDustMode};
|
||||
|
@ -65,6 +65,7 @@ pub struct FakeExt {
|
||||
pub schedule: Schedule,
|
||||
pub balances: HashMap<Address, U256>,
|
||||
pub tracing: bool,
|
||||
pub is_static: bool,
|
||||
}
|
||||
|
||||
// similar to the normal `finalize` function, but ignoring NeedsReturn.
|
||||
@ -192,7 +193,7 @@ impl Ext for FakeExt {
|
||||
}
|
||||
|
||||
fn is_static(&self) -> bool {
|
||||
false
|
||||
self.is_static
|
||||
}
|
||||
|
||||
fn inc_sstore_clears(&mut self) {
|
||||
|
@ -8,7 +8,7 @@ byteorder = "1.0"
|
||||
ethcore-util = { path = "../../util" }
|
||||
ethcore-bigint = { path = "../../util/bigint" }
|
||||
log = "0.3"
|
||||
parity-wasm = "0.14"
|
||||
parity-wasm = "0.15"
|
||||
wasm-utils = { git = "https://github.com/paritytech/wasm-utils" }
|
||||
vm = { path = "../vm" }
|
||||
ethcore-logger = { path = "../../logger" }
|
||||
|
@ -115,7 +115,18 @@ impl vm::Vm for WasmInterpreter {
|
||||
&self.program,
|
||||
);
|
||||
|
||||
let mut cursor = ::std::io::Cursor::new(&*code);
|
||||
let (mut cursor, data_position) = match params.params_type {
|
||||
vm::ParamsType::Embedded => {
|
||||
let module_size = parity_wasm::peek_size(&*code);
|
||||
(
|
||||
::std::io::Cursor::new(&code[..module_size]),
|
||||
module_size
|
||||
)
|
||||
},
|
||||
vm::ParamsType::Separate => {
|
||||
(::std::io::Cursor::new(&code[..]), 0)
|
||||
},
|
||||
};
|
||||
|
||||
let contract_module = wasm_utils::inject_gas_counter(
|
||||
elements::Module::deserialize(
|
||||
@ -134,8 +145,19 @@ impl vm::Vm for WasmInterpreter {
|
||||
let static_segment_cost = data_section_length * runtime.ext().schedule().wasm.static_region as u64;
|
||||
runtime.charge(|_| static_segment_cost).map_err(Error)?;
|
||||
|
||||
let d_ptr = runtime.write_descriptor(¶ms.data.unwrap_or_default())
|
||||
.map_err(Error)?;
|
||||
let d_ptr = {
|
||||
match params.params_type {
|
||||
vm::ParamsType::Embedded => {
|
||||
runtime.write_descriptor(
|
||||
if data_position < code.len() { &code[data_position..] } else { &[] }
|
||||
).map_err(Error)?
|
||||
},
|
||||
vm::ParamsType::Separate => {
|
||||
runtime.write_descriptor(¶ms.data.unwrap_or_default())
|
||||
.map_err(Error)?
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
{
|
||||
let execution_params = runtime.execution_params()
|
||||
|
@ -277,10 +277,6 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
||||
self.gas_counter = self.gas_limit - gas_left.low_u64();
|
||||
Ok(Some((-1i32).into()))
|
||||
},
|
||||
vm::ContractCreateResult::FailedInStaticCall => {
|
||||
trace!(target: "wasm", "runtime: create contract called in static context");
|
||||
Err(interpreter::Error::Trap("CREATE in static context".to_owned()))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -677,3 +677,30 @@ fn externs() {
|
||||
|
||||
assert_eq!(gas_left, U256::from(91_857));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn embedded_keccak() {
|
||||
|
||||
::ethcore_logger::init_log();
|
||||
let mut code = load_sample!("keccak.wasm");
|
||||
code.extend_from_slice(b"something");
|
||||
|
||||
let mut params = ActionParams::default();
|
||||
params.gas = U256::from(100_000);
|
||||
params.code = Some(Arc::new(code));
|
||||
params.params_type = vm::ParamsType::Embedded;
|
||||
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let (gas_left, result) = {
|
||||
let mut interpreter = wasm_interpreter();
|
||||
let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors");
|
||||
match result {
|
||||
GasLeft::Known(_) => { panic!("keccak should return payload"); },
|
||||
GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()),
|
||||
}
|
||||
};
|
||||
|
||||
assert_eq!(H256::from_slice(&result), H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87"));
|
||||
assert_eq!(gas_left, U256::from(80_452));
|
||||
}
|
@ -22,5 +22,8 @@ evm = { path = "../ethcore/evm" }
|
||||
vm = { path = "../ethcore/vm" }
|
||||
panic_hook = { path = "../panic_hook" }
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = "0.1"
|
||||
|
||||
[features]
|
||||
evm-debug = ["ethcore/evm-debug-tests"]
|
||||
|
@ -16,11 +16,13 @@
|
||||
|
||||
//! JSON VM output.
|
||||
|
||||
use ethcore::trace;
|
||||
use std::collections::HashMap;
|
||||
use bigint::prelude::U256;
|
||||
use std::mem;
|
||||
|
||||
use bigint::hash::H256;
|
||||
use bigint::prelude::U256;
|
||||
use bytes::ToPretty;
|
||||
use ethcore::trace;
|
||||
|
||||
use display;
|
||||
use info as vm;
|
||||
@ -37,6 +39,9 @@ pub struct Informant {
|
||||
stack: Vec<U256>,
|
||||
memory: Vec<u8>,
|
||||
storage: HashMap<H256, H256>,
|
||||
traces: Vec<String>,
|
||||
subtraces: Vec<String>,
|
||||
unmatched: bool,
|
||||
}
|
||||
|
||||
impl Informant {
|
||||
@ -70,28 +75,43 @@ impl vm::Informant for Informant {
|
||||
self.gas_used = gas;
|
||||
}
|
||||
|
||||
fn finish(result: Result<vm::Success, vm::Failure>) {
|
||||
fn finish(result: vm::RunResult<Self::Output>) {
|
||||
match result {
|
||||
Ok(success) => println!(
|
||||
Ok(success) => {
|
||||
for trace in success.traces.unwrap_or_else(Vec::new) {
|
||||
println!("{}", trace);
|
||||
}
|
||||
|
||||
println!(
|
||||
"{{\"output\":\"0x{output}\",\"gasUsed\":\"{gas:x}\",\"time\":{time}}}",
|
||||
output = success.output.to_hex(),
|
||||
gas = success.gas_used,
|
||||
time = display::as_micros(&success.time),
|
||||
),
|
||||
Err(failure) => println!(
|
||||
)
|
||||
},
|
||||
Err(failure) => {
|
||||
for trace in failure.traces.unwrap_or_else(Vec::new) {
|
||||
println!("{}", trace);
|
||||
}
|
||||
|
||||
println!(
|
||||
"{{\"error\":\"{error}\",\"gasUsed\":\"{gas:x}\",\"time\":{time}}}",
|
||||
error = failure.error,
|
||||
gas = failure.gas_used,
|
||||
time = display::as_micros(&failure.time),
|
||||
),
|
||||
)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl trace::VMTracer for Informant {
|
||||
type Output = Vec<String>;
|
||||
|
||||
fn trace_next_instruction(&mut self, pc: usize, instruction: u8) -> bool {
|
||||
self.pc = pc;
|
||||
self.instruction = instruction;
|
||||
self.unmatched = true;
|
||||
true
|
||||
}
|
||||
|
||||
@ -104,19 +124,21 @@ impl trace::VMTracer for Informant {
|
||||
fn trace_executed(&mut self, gas_used: U256, stack_push: &[U256], mem_diff: Option<(usize, &[u8])>, store_diff: Option<(U256, U256)>) {
|
||||
let info = ::evm::INSTRUCTIONS[self.instruction as usize];
|
||||
|
||||
println!(
|
||||
let trace = format!(
|
||||
"{{\"pc\":{pc},\"op\":{op},\"opName\":\"{name}\",\"gas\":{gas},\"gasCost\":{gas_cost},\"memory\":{memory},\"stack\":{stack},\"storage\":{storage},\"depth\":{depth}}}",
|
||||
pc = self.pc,
|
||||
op = self.instruction,
|
||||
name = info.name,
|
||||
gas = display::u256_as_str(&(gas_used + self.gas_cost)),
|
||||
gas = display::u256_as_str(&(gas_used.saturating_add(self.gas_cost))),
|
||||
gas_cost = display::u256_as_str(&self.gas_cost),
|
||||
memory = self.memory(),
|
||||
stack = self.stack(),
|
||||
storage = self.storage(),
|
||||
depth = self.depth,
|
||||
);
|
||||
self.traces.push(trace);
|
||||
|
||||
self.unmatched = false;
|
||||
self.gas_used = gas_used;
|
||||
|
||||
let len = self.stack.len();
|
||||
@ -133,6 +155,11 @@ impl trace::VMTracer for Informant {
|
||||
if let Some((pos, val)) = store_diff {
|
||||
self.storage.insert(pos.into(), val.into());
|
||||
}
|
||||
|
||||
|
||||
if !self.subtraces.is_empty() {
|
||||
self.traces.extend(mem::replace(&mut self.subtraces, vec![]));
|
||||
}
|
||||
}
|
||||
|
||||
fn prepare_subtrace(&self, code: &[u8]) -> Self where Self: Sized {
|
||||
@ -143,14 +170,21 @@ impl trace::VMTracer for Informant {
|
||||
vm
|
||||
}
|
||||
|
||||
fn done_subtrace(&mut self, mut sub: Self) {
|
||||
if sub.depth == 1 {
|
||||
// print last line with final state:
|
||||
sub.gas_cost = 0.into();
|
||||
let gas_used = sub.gas_used;
|
||||
trace::VMTracer::trace_executed(&mut sub, gas_used, &[], None, None);
|
||||
fn done_subtrace(&mut self, sub: Self) {
|
||||
if let Some(subtraces) = sub.drain() {
|
||||
self.subtraces.extend(subtraces);
|
||||
}
|
||||
}
|
||||
|
||||
fn drain(self) -> Option<trace::VMTrace> { None }
|
||||
fn drain(mut self) -> Option<Self::Output> {
|
||||
if self.unmatched {
|
||||
// print last line with final state:
|
||||
self.gas_cost = 0.into();
|
||||
let gas_used = self.gas_used;
|
||||
self.trace_executed(gas_used, &[], None, None);
|
||||
} else if !self.subtraces.is_empty() {
|
||||
self.traces.extend(mem::replace(&mut self.subtraces, vec![]));
|
||||
}
|
||||
Some(self.traces)
|
||||
}
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ impl vm::Informant for Informant {
|
||||
println!("Test: {} ({})", name, action);
|
||||
}
|
||||
|
||||
fn finish(result: Result<vm::Success, vm::Failure>) {
|
||||
fn finish(result: vm::RunResult<Self::Output>) {
|
||||
match result {
|
||||
Ok(success) => {
|
||||
println!("Output: 0x{}", success.output.to_hex());
|
||||
@ -47,7 +47,9 @@ impl vm::Informant for Informant {
|
||||
}
|
||||
|
||||
impl trace::VMTracer for Informant {
|
||||
type Output = ();
|
||||
|
||||
fn prepare_subtrace(&self, _code: &[u8]) -> Self where Self: Sized { Default::default() }
|
||||
fn done_subtrace(&mut self, _sub: Self) {}
|
||||
fn drain(self) -> Option<trace::VMTrace> { None }
|
||||
fn drain(self) -> Option<()> { None }
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ use bigint::hash::H256;
|
||||
use ethcore::{trace, spec, transaction, pod_state};
|
||||
use ethcore::client::{self, EvmTestClient, EvmTestError, TransactResult};
|
||||
use ethjson;
|
||||
use vm::ActionParams;
|
||||
|
||||
/// VM execution informant
|
||||
pub trait Informant: trace::VMTracer {
|
||||
@ -30,27 +31,51 @@ pub trait Informant: trace::VMTracer {
|
||||
/// Set initial gas.
|
||||
fn set_gas(&mut self, _gas: U256) {}
|
||||
/// Display final result.
|
||||
fn finish(result: Result<Success, Failure>);
|
||||
fn finish(result: RunResult<Self::Output>);
|
||||
}
|
||||
|
||||
/// Execution finished correctly
|
||||
pub struct Success {
|
||||
#[derive(Debug)]
|
||||
pub struct Success<T> {
|
||||
/// Used gas
|
||||
pub gas_used: U256,
|
||||
/// Output as bytes
|
||||
pub output: Vec<u8>,
|
||||
/// Time Taken
|
||||
pub time: Duration,
|
||||
/// Traces
|
||||
pub traces: Option<T>,
|
||||
}
|
||||
|
||||
/// Execution failed
|
||||
pub struct Failure {
|
||||
#[derive(Debug)]
|
||||
pub struct Failure<T> {
|
||||
/// Used gas
|
||||
pub gas_used: U256,
|
||||
/// Internal error
|
||||
pub error: EvmTestError,
|
||||
/// Duration
|
||||
pub time: Duration,
|
||||
/// Traces
|
||||
pub traces: Option<T>,
|
||||
}
|
||||
|
||||
/// EVM Execution result
|
||||
pub type RunResult<T> = Result<Success<T>, Failure<T>>;
|
||||
|
||||
/// Execute given `ActionParams` and return the result.
|
||||
pub fn run_action<T: Informant>(
|
||||
spec: &spec::Spec,
|
||||
params: ActionParams,
|
||||
mut informant: T,
|
||||
) -> RunResult<T::Output> {
|
||||
informant.set_gas(params.gas);
|
||||
run(spec, params.gas, None, |mut client| {
|
||||
let result = client
|
||||
.call(params, &mut informant)
|
||||
.map(|r| (r.gas_left, r.return_data.to_vec()));
|
||||
(result, informant.drain())
|
||||
})
|
||||
}
|
||||
|
||||
/// Execute given Transaction and verify resulting state root.
|
||||
@ -82,19 +107,19 @@ pub fn run_transaction<T: Informant>(
|
||||
let result = client.transact(env_info, transaction, informant);
|
||||
match result {
|
||||
TransactResult::Ok { state_root, .. } if state_root != post_root => {
|
||||
Err(EvmTestError::PostCondition(format!(
|
||||
(Err(EvmTestError::PostCondition(format!(
|
||||
"State root mismatch (got: {}, expected: {})",
|
||||
state_root,
|
||||
post_root,
|
||||
)))
|
||||
))), None)
|
||||
},
|
||||
TransactResult::Ok { gas_left, output, .. } => {
|
||||
Ok((gas_left, output))
|
||||
TransactResult::Ok { gas_left, output, vm_trace, .. } => {
|
||||
(Ok((gas_left, output)), vm_trace)
|
||||
},
|
||||
TransactResult::Err { error, .. } => {
|
||||
Err(EvmTestError::PostCondition(format!(
|
||||
(Err(EvmTestError::PostCondition(format!(
|
||||
"Unexpected execution error: {:?}", error
|
||||
)))
|
||||
))), None)
|
||||
},
|
||||
}
|
||||
});
|
||||
@ -103,8 +128,13 @@ pub fn run_transaction<T: Informant>(
|
||||
}
|
||||
|
||||
/// Execute VM with given `ActionParams`
|
||||
pub fn run<'a, F, T>(spec: &'a spec::Spec, initial_gas: U256, pre_state: T, run: F) -> Result<Success, Failure> where
|
||||
F: FnOnce(EvmTestClient) -> Result<(U256, Vec<u8>), EvmTestError>,
|
||||
pub fn run<'a, F, T, X>(
|
||||
spec: &'a spec::Spec,
|
||||
initial_gas: U256,
|
||||
pre_state: T,
|
||||
run: F,
|
||||
) -> RunResult<X> where
|
||||
F: FnOnce(EvmTestClient) -> (Result<(U256, Vec<u8>), EvmTestError>, Option<X>),
|
||||
T: Into<Option<&'a pod_state::PodState>>,
|
||||
{
|
||||
let test_client = match pre_state.into() {
|
||||
@ -113,23 +143,135 @@ pub fn run<'a, F, T>(spec: &'a spec::Spec, initial_gas: U256, pre_state: T, run:
|
||||
}.map_err(|error| Failure {
|
||||
gas_used: 0.into(),
|
||||
error,
|
||||
time: Duration::from_secs(0)
|
||||
time: Duration::from_secs(0),
|
||||
traces: None,
|
||||
})?;
|
||||
|
||||
let start = Instant::now();
|
||||
let result = run(test_client);
|
||||
let duration = start.elapsed();
|
||||
let time = start.elapsed();
|
||||
|
||||
match result {
|
||||
Ok((gas_left, output)) => Ok(Success {
|
||||
(Ok((gas_left, output)), traces) => Ok(Success {
|
||||
gas_used: initial_gas - gas_left,
|
||||
output: output,
|
||||
time: duration,
|
||||
output,
|
||||
time,
|
||||
traces,
|
||||
}),
|
||||
Err(e) => Err(Failure {
|
||||
(Err(error), traces) => Err(Failure {
|
||||
gas_used: initial_gas,
|
||||
error: e,
|
||||
time: duration,
|
||||
error,
|
||||
time,
|
||||
traces,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use rustc_hex::FromHex;
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn should_trace_failure() {
|
||||
run_test(
|
||||
"60F8d6",
|
||||
0xffff,
|
||||
r#"
|
||||
{"pc":0,"op":96,"opName":"PUSH1","gas":"0xffff","gasCost":"0x3","memory":"0x","stack":[],"storage":{},"depth":1}
|
||||
{"pc":2,"op":214,"opName":"","gas":"0xfffc","gasCost":"0x0","memory":"0x","stack":["0xf8"],"storage":{},"depth":1}
|
||||
"#,
|
||||
);
|
||||
|
||||
run_test(
|
||||
"F8d6",
|
||||
0xffff,
|
||||
r#"
|
||||
{"pc":0,"op":248,"opName":"","gas":"0xffff","gasCost":"0x0","memory":"0x","stack":[],"storage":{},"depth":1}
|
||||
"#,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_trace_create_correctly() {
|
||||
run_test(
|
||||
"32343434345830f138343438323439f0",
|
||||
0xffff,
|
||||
r#"
|
||||
{"pc":0,"op":50,"opName":"ORIGIN","gas":"0xffff","gasCost":"0x2","memory":"0x","stack":[],"storage":{},"depth":1}
|
||||
{"pc":1,"op":52,"opName":"CALLVALUE","gas":"0xfffd","gasCost":"0x2","memory":"0x","stack":["0x0"],"storage":{},"depth":1}
|
||||
{"pc":2,"op":52,"opName":"CALLVALUE","gas":"0xfffb","gasCost":"0x2","memory":"0x","stack":["0x0","0x0"],"storage":{},"depth":1}
|
||||
{"pc":3,"op":52,"opName":"CALLVALUE","gas":"0xfff9","gasCost":"0x2","memory":"0x","stack":["0x0","0x0","0x0"],"storage":{},"depth":1}
|
||||
{"pc":4,"op":52,"opName":"CALLVALUE","gas":"0xfff7","gasCost":"0x2","memory":"0x","stack":["0x0","0x0","0x0","0x0"],"storage":{},"depth":1}
|
||||
{"pc":5,"op":88,"opName":"PC","gas":"0xfff5","gasCost":"0x2","memory":"0x","stack":["0x0","0x0","0x0","0x0","0x0"],"storage":{},"depth":1}
|
||||
{"pc":6,"op":48,"opName":"ADDRESS","gas":"0xfff3","gasCost":"0x2","memory":"0x","stack":["0x0","0x0","0x0","0x0","0x0","0x5"],"storage":{},"depth":1}
|
||||
{"pc":7,"op":241,"opName":"CALL","gas":"0xfff1","gasCost":"0x61d0","memory":"0x","stack":["0x0","0x0","0x0","0x0","0x0","0x5","0x0"],"storage":{},"depth":1}
|
||||
{"pc":8,"op":56,"opName":"CODESIZE","gas":"0x9e21","gasCost":"0x2","memory":"0x","stack":["0x1"],"storage":{},"depth":1}
|
||||
{"pc":9,"op":52,"opName":"CALLVALUE","gas":"0x9e1f","gasCost":"0x2","memory":"0x","stack":["0x1","0x10"],"storage":{},"depth":1}
|
||||
{"pc":10,"op":52,"opName":"CALLVALUE","gas":"0x9e1d","gasCost":"0x2","memory":"0x","stack":["0x1","0x10","0x0"],"storage":{},"depth":1}
|
||||
{"pc":11,"op":56,"opName":"CODESIZE","gas":"0x9e1b","gasCost":"0x2","memory":"0x","stack":["0x1","0x10","0x0","0x0"],"storage":{},"depth":1}
|
||||
{"pc":12,"op":50,"opName":"ORIGIN","gas":"0x9e19","gasCost":"0x2","memory":"0x","stack":["0x1","0x10","0x0","0x0","0x10"],"storage":{},"depth":1}
|
||||
{"pc":13,"op":52,"opName":"CALLVALUE","gas":"0x9e17","gasCost":"0x2","memory":"0x","stack":["0x1","0x10","0x0","0x0","0x10","0x0"],"storage":{},"depth":1}
|
||||
{"pc":14,"op":57,"opName":"CODECOPY","gas":"0x9e15","gasCost":"0x9","memory":"0x","stack":["0x1","0x10","0x0","0x0","0x10","0x0","0x0"],"storage":{},"depth":1}
|
||||
{"pc":15,"op":240,"opName":"CREATE","gas":"0x9e0c","gasCost":"0x9e0c","memory":"0x32343434345830f138343438323439f0","stack":["0x1","0x10","0x0","0x0"],"storage":{},"depth":1}
|
||||
{"pc":0,"op":50,"opName":"ORIGIN","gas":"0x210c","gasCost":"0x2","memory":"0x","stack":[],"storage":{},"depth":2}
|
||||
{"pc":1,"op":52,"opName":"CALLVALUE","gas":"0x210a","gasCost":"0x2","memory":"0x","stack":["0x0"],"storage":{},"depth":2}
|
||||
{"pc":2,"op":52,"opName":"CALLVALUE","gas":"0x2108","gasCost":"0x2","memory":"0x","stack":["0x0","0x0"],"storage":{},"depth":2}
|
||||
{"pc":3,"op":52,"opName":"CALLVALUE","gas":"0x2106","gasCost":"0x2","memory":"0x","stack":["0x0","0x0","0x0"],"storage":{},"depth":2}
|
||||
{"pc":4,"op":52,"opName":"CALLVALUE","gas":"0x2104","gasCost":"0x2","memory":"0x","stack":["0x0","0x0","0x0","0x0"],"storage":{},"depth":2}
|
||||
{"pc":5,"op":88,"opName":"PC","gas":"0x2102","gasCost":"0x2","memory":"0x","stack":["0x0","0x0","0x0","0x0","0x0"],"storage":{},"depth":2}
|
||||
{"pc":6,"op":48,"opName":"ADDRESS","gas":"0x2100","gasCost":"0x2","memory":"0x","stack":["0x0","0x0","0x0","0x0","0x0","0x5"],"storage":{},"depth":2}
|
||||
{"pc":7,"op":241,"opName":"CALL","gas":"0x20fe","gasCost":"0x0","memory":"0x","stack":["0x0","0x0","0x0","0x0","0x0","0x5","0xbd770416a3345f91e4b34576cb804a576fa48eb1"],"storage":{},"depth":2}
|
||||
"#,
|
||||
)
|
||||
}
|
||||
|
||||
fn run_test<T: Into<U256>>(
|
||||
code: &str,
|
||||
gas: T,
|
||||
expected: &str,
|
||||
) {
|
||||
let mut params = ActionParams::default();
|
||||
params.code = Some(Arc::new(code.from_hex().unwrap()));
|
||||
params.gas = gas.into();
|
||||
|
||||
let spec = ::ethcore::ethereum::new_foundation(&::std::env::temp_dir());
|
||||
let informant = ::display::json::Informant::default();
|
||||
let result = run_action(&spec, params, informant);
|
||||
let expected = expected.split("\n")
|
||||
.map(|x| x.trim())
|
||||
.map(|x| x.to_owned())
|
||||
.filter(|x| !x.is_empty())
|
||||
.collect::<Vec<_>>();
|
||||
match result {
|
||||
Ok(Success { traces, .. }) => {
|
||||
assert_traces_eq(&traces.unwrap(), &expected);
|
||||
},
|
||||
Err(Failure { traces, .. }) => {
|
||||
assert_traces_eq(&traces.unwrap(), &expected);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_traces_eq(
|
||||
a: &[String],
|
||||
b: &[String],
|
||||
) {
|
||||
let mut ita = a.iter();
|
||||
let mut itb = b.iter();
|
||||
|
||||
loop {
|
||||
match (ita.next(), itb.next()) {
|
||||
(Some(a), Some(b)) => {
|
||||
assert_eq!(a, b);
|
||||
println!("{}", a);
|
||||
},
|
||||
(None, None) => return,
|
||||
e => {
|
||||
panic!("Traces mismatch: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -32,6 +32,10 @@ extern crate vm;
|
||||
extern crate evm;
|
||||
extern crate panic_hook;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate pretty_assertions;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, fs};
|
||||
use std::path::PathBuf;
|
||||
@ -136,7 +140,7 @@ fn run_state_test(args: Args) {
|
||||
}
|
||||
}
|
||||
|
||||
fn run_call<T: Informant>(args: Args, mut informant: T) {
|
||||
fn run_call<T: Informant>(args: Args, informant: T) {
|
||||
let from = arg(args.from(), "--from");
|
||||
let to = arg(args.to(), "--to");
|
||||
let code = arg(args.code(), "--code");
|
||||
@ -160,10 +164,7 @@ fn run_call<T: Informant>(args: Args, mut informant: T) {
|
||||
params.code = code.map(Arc::new);
|
||||
params.data = data;
|
||||
|
||||
informant.set_gas(gas);
|
||||
let result = info::run(&spec, gas, None, |mut client| {
|
||||
client.call(params, &mut informant).map(|r| (r.gas_left, r.return_data.to_vec()))
|
||||
});
|
||||
let result = info::run_action(&spec, params, informant);
|
||||
T::finish(result);
|
||||
}
|
||||
|
||||
@ -187,7 +188,7 @@ impl Args {
|
||||
pub fn gas(&self) -> Result<U256, String> {
|
||||
match self.flag_gas {
|
||||
Some(ref gas) => gas.parse().map_err(to_string),
|
||||
None => Ok(!U256::zero()),
|
||||
None => Ok(U256::from(u64::max_value())),
|
||||
}
|
||||
}
|
||||
|
||||
|
BIN
js/assets/parity-logo-black.png
Normal file
BIN
js/assets/parity-logo-black.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.6 KiB |
BIN
js/assets/parity-logo.png
Normal file
BIN
js/assets/parity-logo.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.8 KiB |
2158
js/package-lock.json
generated
2158
js/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "Parity",
|
||||
"version": "1.9.1",
|
||||
"version": "1.9.5",
|
||||
"main": "src/index.parity.js",
|
||||
"jsnext:main": "src/index.parity.js",
|
||||
"author": "Parity Team <admin@parity.io>",
|
||||
@ -180,6 +180,7 @@
|
||||
"lodash.omitby": "4.6.0",
|
||||
"lodash.throttle": "4.1.1",
|
||||
"lodash.uniq": "4.5.0",
|
||||
"oo7": "paritytech/oo7#34fdb5991f4e59b2cf84260cab48cec9a57d88c0",
|
||||
"prop-types": "15.5.10",
|
||||
"react": "15.6.1",
|
||||
"react-dom": "15.6.1",
|
||||
|
@ -18,6 +18,23 @@
|
||||
.application {
|
||||
box-sizing: border-box;
|
||||
margin-top: 2.75em;
|
||||
|
||||
.logo {
|
||||
top: 0;
|
||||
right: 0;
|
||||
left: 0;
|
||||
bottom: 0;
|
||||
opacity: 0.2;
|
||||
position: fixed;
|
||||
padding: 7em;
|
||||
text-align: center;
|
||||
z-index: 0;
|
||||
|
||||
img {
|
||||
display: inline-block;
|
||||
margin: 0 auto;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.container {
|
||||
|
@ -35,6 +35,7 @@ import Snackbar from '../Snackbar';
|
||||
import Status from '../Status';
|
||||
import UpgradeParity from '../UpgradeParity';
|
||||
|
||||
import parityLogo from '../../assets/parity-logo-black.png';
|
||||
import Store from './store';
|
||||
import styles from './application.css';
|
||||
|
||||
@ -132,6 +133,9 @@ class Application extends Component {
|
||||
|
||||
return (
|
||||
<div className={ styles.container }>
|
||||
<div className={ styles.logo }>
|
||||
<img src={ parityLogo } />
|
||||
</div>
|
||||
<Errors />
|
||||
{ children }
|
||||
</div>
|
||||
|
@ -18,9 +18,11 @@
|
||||
.frame {
|
||||
background: white;
|
||||
border: 0;
|
||||
opacity: 0;
|
||||
position: absolute;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
.full {
|
||||
@ -32,13 +34,4 @@
|
||||
font-family: 'Roboto', sans-serif;
|
||||
font-size: 16px;
|
||||
font-weight: 300;
|
||||
|
||||
.text {
|
||||
text-align: center;
|
||||
padding: 5em;
|
||||
font-size: 2em;
|
||||
color: #999;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import builtinDapps from '@parity/shared/config/dappsBuiltin.json';
|
||||
import viewsDapps from '@parity/shared/config/dappsViews.json';
|
||||
import DappsStore from '@parity/shared/mobx/dappsStore';
|
||||
import HistoryStore from '@parity/shared/mobx/historyStore';
|
||||
// import { Bond } from 'oo7';
|
||||
|
||||
import styles from './dapp.css';
|
||||
|
||||
@ -90,16 +91,7 @@ export default class Dapp extends Component {
|
||||
const { app, loading } = this.state;
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<div className={ styles.full }>
|
||||
<div className={ styles.text }>
|
||||
<FormattedMessage
|
||||
id='dapp.loading'
|
||||
defaultMessage='Loading'
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!app) {
|
||||
@ -157,11 +149,20 @@ export default class Dapp extends Component {
|
||||
<iframe
|
||||
className={ styles.frame }
|
||||
frameBorder={ 0 }
|
||||
id='dappFrame'
|
||||
name={ name }
|
||||
onLoad={ this.onDappLoad }
|
||||
sandbox='allow-forms allow-popups allow-same-origin allow-scripts allow-top-navigation'
|
||||
scrolling='auto'
|
||||
src={ `${src}${hash}` }
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
onDappLoad = () => {
|
||||
const frame = document.getElementById('dappFrame');
|
||||
|
||||
frame.style.opacity = 1;
|
||||
// frame.contentWindow.injectedBondCache = Bond.cache;
|
||||
}
|
||||
}
|
||||
|
@ -94,7 +94,6 @@ class Dapps extends Component {
|
||||
}
|
||||
|
||||
renderList (items, overlay) {
|
||||
console.log(items);
|
||||
return (
|
||||
<SectionList
|
||||
items={ items }
|
||||
|
@ -29,7 +29,7 @@ import Button from '@parity/ui/Button';
|
||||
import ContainerTitle from '@parity/ui/Container/Title';
|
||||
import IdentityIcon from '@parity/ui/IdentityIcon';
|
||||
import GradientBg from '@parity/ui/GradientBg';
|
||||
import SelectionList from '@parity/ui/SectionList';
|
||||
import SelectionList from '@parity/ui/SelectionList';
|
||||
import SignerPending from '@parity/ui/SignerPending';
|
||||
import { CancelIcon } from '@parity/ui/Icons';
|
||||
|
||||
|
@ -45,6 +45,7 @@ function initProvider () {
|
||||
});
|
||||
|
||||
window.ethereum = ethereum;
|
||||
window.isParity = true;
|
||||
|
||||
return ethereum;
|
||||
}
|
||||
|
@ -209,8 +209,11 @@ module.exports = {
|
||||
: Api.util.sha3(dapp.url);
|
||||
|
||||
return [
|
||||
'index.html', 'dist.css', 'dist.css.map', 'dist.js', 'dist.js.map'
|
||||
'index.html', 'dist.css', 'dist.js',
|
||||
isProd ? null : 'dist.css.map',
|
||||
isProd ? null : 'dist.js.map'
|
||||
]
|
||||
.filter((file) => file)
|
||||
.map((file) => path.join(dir, file))
|
||||
.filter((from) => fs.existsSync(from))
|
||||
.map((from) => ({
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
module.exports = {
|
||||
test: /\.js$/,
|
||||
include: /node_modules\/@parity\//,
|
||||
include: /node_modules\/(@parity|oo7)\//,
|
||||
use: [ {
|
||||
loader: 'happypack/loader',
|
||||
options: {
|
||||
|
@ -11,8 +11,8 @@
|
||||
!define VERSIONMAJOR 1
|
||||
!define VERSIONMINOR 9
|
||||
!define VERSIONBUILD 0
|
||||
!define ARGS "--warp"
|
||||
!define FIRST_START_ARGS "ui --warp --mode=passive"
|
||||
!define ARGS ""
|
||||
!define FIRST_START_ARGS "--mode=passive ui"
|
||||
|
||||
!addplugindir .\
|
||||
|
||||
|
BIN
nsis/logo.ico
BIN
nsis/logo.ico
Binary file not shown.
Before Width: | Height: | Size: 66 KiB After Width: | Height: | Size: 114 KiB |
@ -470,7 +470,7 @@ usage! {
|
||||
"--jsonrpc-hosts=[HOSTS]",
|
||||
"List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\",.",
|
||||
|
||||
ARG arg_jsonrpc_threads: (usize) = 0usize, or |c: &Config| otry!(c.rpc).processing_threads,
|
||||
ARG arg_jsonrpc_threads: (usize) = 4usize, or |c: &Config| otry!(c.rpc).processing_threads,
|
||||
"--jsonrpc-threads=[THREADS]",
|
||||
"Turn on additional processing threads in all RPC servers. Setting this to non-zero value allows parallel cpu-heavy queries execution.",
|
||||
|
||||
@ -610,7 +610,11 @@ usage! {
|
||||
|
||||
FLAG flag_refuse_service_transactions: (bool) = false, or |c: &Config| otry!(c.mining).refuse_service_transactions.clone(),
|
||||
"--refuse-service-transactions",
|
||||
"Always refuse service transactions..",
|
||||
"Always refuse service transactions.",
|
||||
|
||||
FLAG flag_infinite_pending_block: (bool) = false, or |c: &Config| otry!(c.mining).infinite_pending_block.clone(),
|
||||
"--infinite-pending-block",
|
||||
"Pending block will be created with maximal possible gas limit and will execute all transactions in the queue. Note that such block is invalid and should never be attempted to be mined.",
|
||||
|
||||
FLAG flag_no_persistent_txqueue: (bool) = false, or |c: &Config| otry!(c.parity).no_persistent_txqueue,
|
||||
"--no-persistent-txqueue",
|
||||
@ -694,7 +698,7 @@ usage! {
|
||||
|
||||
ARG arg_min_gas_price: (Option<u64>) = None, or |c: &Config| otry!(c.mining).min_gas_price.clone(),
|
||||
"--min-gas-price=[STRING]",
|
||||
"Minimum amount of Wei per GAS to be paid for a transaction to be accepted for mining. Overrides --basic-tx-usd.",
|
||||
"Minimum amount of Wei per GAS to be paid for a transaction to be accepted for mining. Overrides --usd-per-tx.",
|
||||
|
||||
ARG arg_author: (Option<String>) = None, or |c: &Config| otry!(c.mining).author.clone(),
|
||||
"--author=[ADDRESS]",
|
||||
@ -1140,6 +1144,7 @@ struct Mining {
|
||||
remove_solved: Option<bool>,
|
||||
notify_work: Option<Vec<String>>,
|
||||
refuse_service_transactions: Option<bool>,
|
||||
infinite_pending_block: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq, Deserialize)]
|
||||
@ -1449,7 +1454,7 @@ mod tests {
|
||||
arg_jsonrpc_apis: "web3,eth,net,parity,traces,rpc,secretstore".into(),
|
||||
arg_jsonrpc_hosts: "none".into(),
|
||||
arg_jsonrpc_server_threads: None,
|
||||
arg_jsonrpc_threads: 0,
|
||||
arg_jsonrpc_threads: 4,
|
||||
|
||||
// WS
|
||||
flag_no_ws: false,
|
||||
@ -1515,6 +1520,7 @@ mod tests {
|
||||
flag_remove_solved: false,
|
||||
arg_notify_work: Some("http://localhost:3001".into()),
|
||||
flag_refuse_service_transactions: false,
|
||||
flag_infinite_pending_block: false,
|
||||
|
||||
flag_stratum: false,
|
||||
arg_stratum_interface: "local".to_owned(),
|
||||
@ -1755,6 +1761,7 @@ mod tests {
|
||||
remove_solved: None,
|
||||
notify_work: None,
|
||||
refuse_service_transactions: None,
|
||||
infinite_pending_block: None,
|
||||
}),
|
||||
footprint: Some(Footprint {
|
||||
tracing: Some("on".into()),
|
||||
|
@ -40,7 +40,7 @@ use parity_rpc::NetworkSettings;
|
||||
use cache::CacheConfig;
|
||||
use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, replace_home_and_local,
|
||||
geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address, to_gas_limit, to_queue_strategy};
|
||||
use params::{ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras};
|
||||
use params::{ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras, SpecType};
|
||||
use ethcore_logger::Config as LogConfig;
|
||||
use dir::{self, Directories, default_hypervisor_path, default_local_path, default_data_path};
|
||||
use dapps::Configuration as DappsConfiguration;
|
||||
@ -109,7 +109,7 @@ impl Configuration {
|
||||
let pruning = self.args.arg_pruning.parse()?;
|
||||
let pruning_history = self.args.arg_pruning_history;
|
||||
let vm_type = self.vm_type()?;
|
||||
let spec = self.chain().parse()?;
|
||||
let spec = self.chain()?;
|
||||
let mode = match self.args.arg_mode.as_ref() {
|
||||
"last" => None,
|
||||
mode => Some(to_mode(&mode, self.args.arg_mode_timeout, self.args.arg_mode_alarm)?),
|
||||
@ -336,7 +336,7 @@ impl Configuration {
|
||||
pruning_memory: self.args.arg_pruning_memory,
|
||||
daemon: daemon,
|
||||
logger_config: logger_config.clone(),
|
||||
miner_options: self.miner_options(self.args.arg_reseal_min_period)?,
|
||||
miner_options: self.miner_options()?,
|
||||
ntp_servers: self.ntp_servers(),
|
||||
ws_conf: ws_conf,
|
||||
http_conf: http_conf,
|
||||
@ -441,15 +441,16 @@ impl Configuration {
|
||||
}
|
||||
}
|
||||
|
||||
fn chain(&self) -> String {
|
||||
if let Some(ref s) = self.spec_name_override {
|
||||
fn chain(&self) -> Result<SpecType, String> {
|
||||
let name = if let Some(ref s) = self.spec_name_override {
|
||||
s.clone()
|
||||
}
|
||||
else if self.args.flag_testnet {
|
||||
} else if self.args.flag_testnet {
|
||||
"testnet".to_owned()
|
||||
} else {
|
||||
self.args.arg_chain.clone()
|
||||
}
|
||||
};
|
||||
|
||||
Ok(name.parse()?)
|
||||
}
|
||||
|
||||
fn max_peers(&self) -> u32 {
|
||||
@ -504,8 +505,9 @@ impl Configuration {
|
||||
} else { Ok(None) }
|
||||
}
|
||||
|
||||
fn miner_options(&self, reseal_min_period: u64) -> Result<MinerOptions, String> {
|
||||
if self.args.flag_force_sealing && reseal_min_period == 0 {
|
||||
fn miner_options(&self) -> Result<MinerOptions, String> {
|
||||
let is_dev_chain = self.chain()? == SpecType::Dev;
|
||||
if is_dev_chain && self.args.flag_force_sealing && self.args.arg_reseal_min_period == 0 {
|
||||
return Err("Force sealing can't be used with reseal_min_period = 0".into());
|
||||
}
|
||||
|
||||
@ -528,7 +530,7 @@ impl Configuration {
|
||||
tx_queue_gas_limit: to_gas_limit(&self.args.arg_tx_queue_gas)?,
|
||||
tx_queue_strategy: to_queue_strategy(&self.args.arg_tx_queue_strategy)?,
|
||||
pending_set: to_pending_set(&self.args.arg_relay_set)?,
|
||||
reseal_min_period: Duration::from_millis(reseal_min_period),
|
||||
reseal_min_period: Duration::from_millis(self.args.arg_reseal_min_period),
|
||||
reseal_max_period: Duration::from_millis(self.args.arg_reseal_max_period),
|
||||
work_queue_size: self.args.arg_work_queue_size,
|
||||
enable_resubmission: !self.args.flag_remove_solved,
|
||||
@ -541,6 +543,7 @@ impl Configuration {
|
||||
None => Banning::Disabled,
|
||||
},
|
||||
refuse_service_transactions: self.args.flag_refuse_service_transactions,
|
||||
infinite_pending_block: self.args.flag_infinite_pending_block,
|
||||
};
|
||||
|
||||
Ok(options)
|
||||
@ -649,6 +652,8 @@ impl Configuration {
|
||||
return Ok(GasPricerConfig::Fixed(to_u256(dec)?));
|
||||
} else if let Some(dec) = self.args.arg_min_gas_price {
|
||||
return Ok(GasPricerConfig::Fixed(U256::from(dec)));
|
||||
} else if self.chain()? != SpecType::Foundation {
|
||||
return Ok(GasPricerConfig::Fixed(U256::zero()));
|
||||
}
|
||||
|
||||
let usd_per_tx = to_price(&self.args.arg_usd_per_tx)?;
|
||||
@ -885,7 +890,7 @@ impl Configuration {
|
||||
let net_addresses = self.net_addresses()?;
|
||||
Ok(NetworkSettings {
|
||||
name: self.args.arg_identity.clone(),
|
||||
chain: self.chain(),
|
||||
chain: format!("{}", self.chain()?),
|
||||
network_port: net_addresses.0.port(),
|
||||
rpc_enabled: http_conf.enabled,
|
||||
rpc_interface: http_conf.interface,
|
||||
@ -916,8 +921,6 @@ impl Configuration {
|
||||
}
|
||||
|
||||
fn directories(&self) -> Directories {
|
||||
use path;
|
||||
|
||||
let local_path = default_local_path();
|
||||
let base_path = self.args.arg_base_path.as_ref().or_else(|| self.args.arg_datadir.as_ref()).map_or_else(|| default_data_path(), |s| s.clone());
|
||||
let data_path = replace_home("", &base_path);
|
||||
@ -937,21 +940,6 @@ impl Configuration {
|
||||
let secretstore_path = replace_home(&data_path, &self.args.arg_secretstore_path);
|
||||
let ui_path = replace_home(&data_path, &self.args.arg_ui_path);
|
||||
|
||||
if self.args.flag_geth && !cfg!(windows) {
|
||||
let geth_root = if self.chain() == "testnet".to_owned() { path::ethereum::test() } else { path::ethereum::default() };
|
||||
::std::fs::create_dir_all(geth_root.as_path()).unwrap_or_else(
|
||||
|e| warn!("Failed to create '{}' for geth mode: {}", &geth_root.to_str().unwrap(), e));
|
||||
}
|
||||
|
||||
if cfg!(feature = "ipc") && !cfg!(feature = "windows") {
|
||||
let mut path_buf = PathBuf::from(data_path.clone());
|
||||
path_buf.push("ipc");
|
||||
let ipc_path = path_buf.to_str().unwrap();
|
||||
::std::fs::create_dir_all(ipc_path).unwrap_or_else(
|
||||
|e| warn!("Failed to directory '{}' for ipc sockets: {}", ipc_path, e)
|
||||
);
|
||||
}
|
||||
|
||||
Directories {
|
||||
keys: keys_path,
|
||||
base: data_path,
|
||||
@ -1404,21 +1392,20 @@ mod tests {
|
||||
let conf3 = parse(&["parity", "--tx-queue-strategy", "gas"]);
|
||||
|
||||
// then
|
||||
let min_period = conf0.args.arg_reseal_min_period;
|
||||
assert_eq!(conf0.miner_options(min_period).unwrap(), mining_options);
|
||||
assert_eq!(conf0.miner_options().unwrap(), mining_options);
|
||||
mining_options.tx_queue_strategy = PrioritizationStrategy::GasFactorAndGasPrice;
|
||||
assert_eq!(conf1.miner_options(min_period).unwrap(), mining_options);
|
||||
assert_eq!(conf1.miner_options().unwrap(), mining_options);
|
||||
mining_options.tx_queue_strategy = PrioritizationStrategy::GasPriceOnly;
|
||||
assert_eq!(conf2.miner_options(min_period).unwrap(), mining_options);
|
||||
assert_eq!(conf2.miner_options().unwrap(), mining_options);
|
||||
mining_options.tx_queue_strategy = PrioritizationStrategy::GasAndGasPrice;
|
||||
assert_eq!(conf3.miner_options(min_period).unwrap(), mining_options);
|
||||
assert_eq!(conf3.miner_options().unwrap(), mining_options);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_fail_on_force_reseal_and_reseal_min_period() {
|
||||
let conf = parse(&["parity", "--chain", "dev", "--force-sealing"]);
|
||||
let conf = parse(&["parity", "--chain", "dev", "--force-sealing", "--reseal-min-period", "0"]);
|
||||
|
||||
assert!(conf.miner_options(0).is_err());
|
||||
assert!(conf.miner_options().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1446,7 +1433,7 @@ mod tests {
|
||||
// then
|
||||
assert_eq!(conf.network_settings(), Ok(NetworkSettings {
|
||||
name: "testname".to_owned(),
|
||||
chain: "testnet".to_owned(),
|
||||
chain: "kovan".to_owned(),
|
||||
network_port: 30303,
|
||||
rpc_enabled: true,
|
||||
rpc_interface: "127.0.0.1".to_owned(),
|
||||
|
@ -62,7 +62,7 @@ impl Default for HttpConfiguration {
|
||||
cors: None,
|
||||
hosts: Some(Vec::new()),
|
||||
server_threads: 1,
|
||||
processing_threads: 0,
|
||||
processing_threads: 4,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -303,6 +303,16 @@ pub fn new_ipc<D: rpc_apis::Dependencies>(
|
||||
|
||||
let handler = setup_apis(conf.apis, dependencies);
|
||||
let remote = dependencies.remote.clone();
|
||||
let path = PathBuf::from(&conf.socket_addr);
|
||||
// Make sure socket file can be created on unix-like OS.
|
||||
// Windows pipe paths are not on the FS.
|
||||
if !cfg!(windows) {
|
||||
if let Some(dir) = path.parent() {
|
||||
::std::fs::create_dir_all(&dir)
|
||||
.map_err(|err| format!("Unable to create IPC directory at {}: {}", dir.display(), err))?;
|
||||
}
|
||||
}
|
||||
|
||||
match rpc::start_ipc(&conf.socket_addr, handler, remote, rpc::RpcExtractor) {
|
||||
Ok(server) => Ok(Some(server)),
|
||||
Err(io_error) => Err(format!("IPC error: {}", io_error)),
|
||||
|
@ -239,10 +239,10 @@ impl FullDependencies {
|
||||
use parity_rpc::v1::*;
|
||||
|
||||
macro_rules! add_signing_methods {
|
||||
($namespace:ident, $handler:expr, $deps:expr) => {
|
||||
($namespace:ident, $handler:expr, $deps:expr, $nonces:expr) => {
|
||||
{
|
||||
let deps = &$deps;
|
||||
let dispatcher = FullDispatcher::new(deps.client.clone(), deps.miner.clone());
|
||||
let dispatcher = FullDispatcher::new(deps.client.clone(), deps.miner.clone(), $nonces);
|
||||
if deps.signer_service.is_enabled() {
|
||||
$handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, deps.remote.clone(), &deps.secret_store)))
|
||||
} else {
|
||||
@ -252,7 +252,12 @@ impl FullDependencies {
|
||||
}
|
||||
}
|
||||
|
||||
let dispatcher = FullDispatcher::new(self.client.clone(), self.miner.clone());
|
||||
let nonces = Arc::new(Mutex::new(dispatch::Reservations::with_pool(self.fetch.pool())));
|
||||
let dispatcher = FullDispatcher::new(
|
||||
self.client.clone(),
|
||||
self.miner.clone(),
|
||||
nonces.clone(),
|
||||
);
|
||||
for api in apis {
|
||||
match *api {
|
||||
Api::Web3 => {
|
||||
@ -281,7 +286,7 @@ impl FullDependencies {
|
||||
let filter_client = EthFilterClient::new(self.client.clone(), self.miner.clone());
|
||||
handler.extend_with(filter_client.to_delegate());
|
||||
|
||||
add_signing_methods!(EthSigning, handler, self);
|
||||
add_signing_methods!(EthSigning, handler, self, nonces.clone());
|
||||
}
|
||||
},
|
||||
Api::EthPubSub => {
|
||||
@ -292,7 +297,7 @@ impl FullDependencies {
|
||||
}
|
||||
},
|
||||
Api::Personal => {
|
||||
handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate());
|
||||
handler.extend_with(PersonalClient::new(self.secret_store.clone(), dispatcher.clone(), self.geth_compatibility).to_delegate());
|
||||
},
|
||||
Api::Signer => {
|
||||
handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.remote.clone()).to_delegate());
|
||||
@ -318,7 +323,7 @@ impl FullDependencies {
|
||||
).to_delegate());
|
||||
|
||||
if !for_generic_pubsub {
|
||||
add_signing_methods!(ParitySigning, handler, self);
|
||||
add_signing_methods!(ParitySigning, handler, self, nonces.clone());
|
||||
}
|
||||
},
|
||||
Api::ParityPubSub => {
|
||||
@ -343,7 +348,7 @@ impl FullDependencies {
|
||||
).to_delegate())
|
||||
},
|
||||
Api::Traces => {
|
||||
handler.extend_with(TracesClient::new(&self.client, &self.miner).to_delegate())
|
||||
handler.extend_with(TracesClient::new(&self.client).to_delegate())
|
||||
},
|
||||
Api::Rpc => {
|
||||
let modules = to_modules(&apis);
|
||||
@ -435,6 +440,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
|
||||
self.on_demand.clone(),
|
||||
self.cache.clone(),
|
||||
self.transaction_queue.clone(),
|
||||
Arc::new(Mutex::new(dispatch::Reservations::with_pool(self.fetch.pool()))),
|
||||
);
|
||||
|
||||
macro_rules! add_signing_methods {
|
||||
@ -495,7 +501,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
|
||||
},
|
||||
Api::Personal => {
|
||||
let secret_store = Some(self.secret_store.clone());
|
||||
handler.extend_with(PersonalClient::new(&secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate());
|
||||
handler.extend_with(PersonalClient::new(secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate());
|
||||
},
|
||||
Api::Signer => {
|
||||
let secret_store = Some(self.secret_store.clone());
|
||||
|
@ -60,9 +60,9 @@ hash = { path = "../util/hash" }
|
||||
hardware-wallet = { path = "../hw" }
|
||||
|
||||
clippy = { version = "0.0.103", optional = true}
|
||||
pretty_assertions = "0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = "0.1"
|
||||
macros = { path = "../util/macros" }
|
||||
ethcore-network = { path = "../util/network" }
|
||||
kvdb-memorydb = { path = "../util/kvdb-memorydb" }
|
||||
|
@ -43,9 +43,9 @@ use ethcore::account_provider::AccountProvider;
|
||||
use crypto::DEFAULT_MAC;
|
||||
|
||||
use jsonrpc_core::{BoxFuture, Error};
|
||||
use jsonrpc_core::futures::{future, Future};
|
||||
use jsonrpc_core::futures::{future, Future, Poll, Async};
|
||||
use jsonrpc_core::futures::future::Either;
|
||||
use v1::helpers::{errors, TransactionRequest, FilledTransactionRequest, ConfirmationPayload};
|
||||
use v1::helpers::{errors, nonce, TransactionRequest, FilledTransactionRequest, ConfirmationPayload};
|
||||
use v1::types::{
|
||||
H256 as RpcH256, H520 as RpcH520, Bytes as RpcBytes,
|
||||
RichRawTransaction as RpcRichRawTransaction,
|
||||
@ -55,6 +55,8 @@ use v1::types::{
|
||||
DecryptRequest as RpcDecryptRequest,
|
||||
};
|
||||
|
||||
pub use self::nonce::Reservations;
|
||||
|
||||
/// Has the capability to dispatch, sign, and decrypt.
|
||||
///
|
||||
/// Requires a clone implementation, with the implication that it be cheap;
|
||||
@ -75,7 +77,8 @@ pub trait Dispatcher: Send + Sync + Clone {
|
||||
fn enrich(&self, SignedTransaction) -> RpcRichRawTransaction;
|
||||
|
||||
/// "Dispatch" a local transaction.
|
||||
fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result<H256, Error>;
|
||||
fn dispatch_transaction(&self, signed_transaction: PendingTransaction)
|
||||
-> Result<H256, Error>;
|
||||
}
|
||||
|
||||
/// A dispatcher which uses references to a client and miner in order to sign
|
||||
@ -84,14 +87,16 @@ pub trait Dispatcher: Send + Sync + Clone {
|
||||
pub struct FullDispatcher<C, M> {
|
||||
client: Arc<C>,
|
||||
miner: Arc<M>,
|
||||
nonces: Arc<Mutex<nonce::Reservations>>,
|
||||
}
|
||||
|
||||
impl<C, M> FullDispatcher<C, M> {
|
||||
/// Create a `FullDispatcher` from Arc references to a client and miner.
|
||||
pub fn new(client: Arc<C>, miner: Arc<M>) -> Self {
|
||||
pub fn new(client: Arc<C>, miner: Arc<M>, nonces: Arc<Mutex<nonce::Reservations>>) -> Self {
|
||||
FullDispatcher {
|
||||
client,
|
||||
miner,
|
||||
nonces,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -101,15 +106,24 @@ impl<C, M> Clone for FullDispatcher<C, M> {
|
||||
FullDispatcher {
|
||||
client: self.client.clone(),
|
||||
miner: self.miner.clone(),
|
||||
nonces: self.nonces.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: MiningBlockChainClient, M: MinerService> FullDispatcher<C, M> {
|
||||
fn fill_nonce(nonce: Option<U256>, from: &Address, miner: &M, client: &C) -> U256 {
|
||||
nonce
|
||||
.or_else(|| miner.last_nonce(from).map(|nonce| nonce + U256::one()))
|
||||
.unwrap_or_else(|| client.latest_nonce(from))
|
||||
fn state_nonce(&self, from: &Address) -> U256 {
|
||||
self.miner.last_nonce(from).map(|nonce| nonce + U256::one())
|
||||
.unwrap_or_else(|| self.client.latest_nonce(from))
|
||||
}
|
||||
|
||||
/// Imports transaction to the miner's queue.
|
||||
pub fn dispatch_transaction(client: &C, miner: &M, signed_transaction: PendingTransaction) -> Result<H256, Error> {
|
||||
let hash = signed_transaction.transaction.hash();
|
||||
|
||||
miner.import_own_transaction(client, signed_transaction)
|
||||
.map_err(errors::transaction)
|
||||
.map(|_| hash)
|
||||
}
|
||||
}
|
||||
|
||||
@ -117,20 +131,21 @@ impl<C: MiningBlockChainClient, M: MinerService> Dispatcher for FullDispatcher<C
|
||||
fn fill_optional_fields(&self, request: TransactionRequest, default_sender: Address, force_nonce: bool)
|
||||
-> BoxFuture<FilledTransactionRequest, Error>
|
||||
{
|
||||
let (client, miner) = (self.client.clone(), self.miner.clone());
|
||||
let request = request;
|
||||
let from = request.from.unwrap_or(default_sender);
|
||||
let nonce = match force_nonce {
|
||||
false => request.nonce,
|
||||
true => Some(Self::fill_nonce(request.nonce, &from, &miner, &client)),
|
||||
let nonce = if force_nonce {
|
||||
request.nonce.or_else(|| Some(self.state_nonce(&from)))
|
||||
} else {
|
||||
request.nonce
|
||||
};
|
||||
|
||||
Box::new(future::ok(FilledTransactionRequest {
|
||||
from: from,
|
||||
from,
|
||||
used_default_from: request.from.is_none(),
|
||||
to: request.to,
|
||||
nonce: nonce,
|
||||
gas_price: request.gas_price.unwrap_or_else(|| default_gas_price(&*client, &*miner)),
|
||||
gas: request.gas.unwrap_or_else(|| miner.sensible_gas_limit()),
|
||||
nonce,
|
||||
gas_price: request.gas_price.unwrap_or_else(|| default_gas_price(&*self.client, &*self.miner)),
|
||||
gas: request.gas.unwrap_or_else(|| self.miner.sensible_gas_limit()),
|
||||
value: request.value.unwrap_or_else(|| 0.into()),
|
||||
data: request.data.unwrap_or_else(Vec::new),
|
||||
condition: request.condition,
|
||||
@ -140,30 +155,15 @@ impl<C: MiningBlockChainClient, M: MinerService> Dispatcher for FullDispatcher<C
|
||||
fn sign(&self, accounts: Arc<AccountProvider>, filled: FilledTransactionRequest, password: SignWith)
|
||||
-> BoxFuture<WithToken<SignedTransaction>, Error>
|
||||
{
|
||||
let (client, miner) = (self.client.clone(), self.miner.clone());
|
||||
let chain_id = client.signing_chain_id();
|
||||
let address = filled.from;
|
||||
Box::new(future::done({
|
||||
let t = Transaction {
|
||||
nonce: Self::fill_nonce(filled.nonce, &filled.from, &miner, &client),
|
||||
action: filled.to.map_or(Action::Create, Action::Call),
|
||||
gas: filled.gas,
|
||||
gas_price: filled.gas_price,
|
||||
value: filled.value,
|
||||
data: filled.data,
|
||||
};
|
||||
let chain_id = self.client.signing_chain_id();
|
||||
|
||||
if accounts.is_hardware_address(address) {
|
||||
hardware_signature(&*accounts, address, t, chain_id).map(WithToken::No)
|
||||
} else {
|
||||
let hash = t.hash(chain_id);
|
||||
let signature = try_bf!(signature(&*accounts, address, hash, password));
|
||||
Ok(signature.map(|sig| {
|
||||
SignedTransaction::new(t.with_signature(sig, chain_id))
|
||||
.expect("Transaction was signed by AccountsProvider; it never produces invalid signatures; qed")
|
||||
}))
|
||||
if let Some(nonce) = filled.nonce {
|
||||
return Box::new(future::done(sign_transaction(&*accounts, filled, chain_id, nonce, password)));
|
||||
}
|
||||
}))
|
||||
|
||||
let state = self.state_nonce(&filled.from);
|
||||
let reserved = self.nonces.lock().reserve_nonce(state);
|
||||
Box::new(ProspectiveSigner::new(accounts, filled, chain_id, reserved, password))
|
||||
}
|
||||
|
||||
fn enrich(&self, signed_transaction: SignedTransaction) -> RpcRichRawTransaction {
|
||||
@ -172,11 +172,7 @@ impl<C: MiningBlockChainClient, M: MinerService> Dispatcher for FullDispatcher<C
|
||||
}
|
||||
|
||||
fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result<H256, Error> {
|
||||
let hash = signed_transaction.transaction.hash();
|
||||
|
||||
self.miner.import_own_transaction(&*self.client, signed_transaction)
|
||||
.map_err(errors::transaction)
|
||||
.map(|_| hash)
|
||||
Self::dispatch_transaction(&*self.client, &*self.miner, signed_transaction)
|
||||
}
|
||||
}
|
||||
|
||||
@ -255,6 +251,8 @@ pub struct LightDispatcher {
|
||||
pub cache: Arc<Mutex<LightDataCache>>,
|
||||
/// Transaction queue.
|
||||
pub transaction_queue: Arc<RwLock<LightTransactionQueue>>,
|
||||
/// Nonce reservations
|
||||
pub nonces: Arc<Mutex<nonce::Reservations>>,
|
||||
}
|
||||
|
||||
impl LightDispatcher {
|
||||
@ -267,6 +265,7 @@ impl LightDispatcher {
|
||||
on_demand: Arc<OnDemand>,
|
||||
cache: Arc<Mutex<LightDataCache>>,
|
||||
transaction_queue: Arc<RwLock<LightTransactionQueue>>,
|
||||
nonces: Arc<Mutex<nonce::Reservations>>,
|
||||
) -> Self {
|
||||
LightDispatcher {
|
||||
sync,
|
||||
@ -274,6 +273,7 @@ impl LightDispatcher {
|
||||
on_demand,
|
||||
cache,
|
||||
transaction_queue,
|
||||
nonces,
|
||||
}
|
||||
}
|
||||
|
||||
@ -372,39 +372,19 @@ impl Dispatcher for LightDispatcher {
|
||||
-> BoxFuture<WithToken<SignedTransaction>, Error>
|
||||
{
|
||||
let chain_id = self.client.signing_chain_id();
|
||||
let address = filled.from;
|
||||
|
||||
let with_nonce = move |filled: FilledTransactionRequest, nonce| {
|
||||
let t = Transaction {
|
||||
nonce: nonce,
|
||||
action: filled.to.map_or(Action::Create, Action::Call),
|
||||
gas: filled.gas,
|
||||
gas_price: filled.gas_price,
|
||||
value: filled.value,
|
||||
data: filled.data,
|
||||
};
|
||||
|
||||
if accounts.is_hardware_address(address) {
|
||||
return hardware_signature(&*accounts, address, t, chain_id).map(WithToken::No)
|
||||
}
|
||||
|
||||
let hash = t.hash(chain_id);
|
||||
let signature = signature(&*accounts, address, hash, password)?;
|
||||
|
||||
Ok(signature.map(|sig| {
|
||||
SignedTransaction::new(t.with_signature(sig, chain_id))
|
||||
.expect("Transaction was signed by AccountsProvider; it never produces invalid signatures; qed")
|
||||
}))
|
||||
};
|
||||
|
||||
// fast path for pre-filled nonce.
|
||||
if let Some(nonce) = filled.nonce {
|
||||
return Box::new(future::done(with_nonce(filled, nonce)))
|
||||
return Box::new(future::done(sign_transaction(&*accounts, filled, chain_id, nonce, password)))
|
||||
}
|
||||
|
||||
Box::new(self.next_nonce(address)
|
||||
let nonces = self.nonces.clone();
|
||||
Box::new(self.next_nonce(filled.from)
|
||||
.map_err(|_| errors::no_light_peers())
|
||||
.and_then(move |nonce| with_nonce(filled, nonce)))
|
||||
.and_then(move |nonce| {
|
||||
let reserved = nonces.lock().reserve_nonce(nonce);
|
||||
ProspectiveSigner::new(accounts, filled, chain_id, reserved, password)
|
||||
}))
|
||||
}
|
||||
|
||||
fn enrich(&self, signed_transaction: SignedTransaction) -> RpcRichRawTransaction {
|
||||
@ -422,6 +402,147 @@ impl Dispatcher for LightDispatcher {
|
||||
}
|
||||
}
|
||||
|
||||
fn sign_transaction(
|
||||
accounts: &AccountProvider,
|
||||
filled: FilledTransactionRequest,
|
||||
chain_id: Option<u64>,
|
||||
nonce: U256,
|
||||
password: SignWith,
|
||||
) -> Result<WithToken<SignedTransaction>, Error> {
|
||||
let t = Transaction {
|
||||
nonce: nonce,
|
||||
action: filled.to.map_or(Action::Create, Action::Call),
|
||||
gas: filled.gas,
|
||||
gas_price: filled.gas_price,
|
||||
value: filled.value,
|
||||
data: filled.data,
|
||||
};
|
||||
|
||||
if accounts.is_hardware_address(&filled.from) {
|
||||
return hardware_signature(accounts, filled.from, t, chain_id).map(WithToken::No)
|
||||
}
|
||||
|
||||
let hash = t.hash(chain_id);
|
||||
let signature = signature(accounts, filled.from, hash, password)?;
|
||||
|
||||
Ok(signature.map(|sig| {
|
||||
SignedTransaction::new(t.with_signature(sig, chain_id))
|
||||
.expect("Transaction was signed by AccountsProvider; it never produces invalid signatures; qed")
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum ProspectiveSignerState {
|
||||
TryProspectiveSign,
|
||||
WaitForNonce,
|
||||
Finish,
|
||||
}
|
||||
|
||||
struct ProspectiveSigner {
|
||||
accounts: Arc<AccountProvider>,
|
||||
filled: FilledTransactionRequest,
|
||||
chain_id: Option<u64>,
|
||||
reserved: nonce::Reserved,
|
||||
password: SignWith,
|
||||
state: ProspectiveSignerState,
|
||||
prospective: Option<Result<WithToken<SignedTransaction>, Error>>,
|
||||
ready: Option<nonce::Ready>,
|
||||
}
|
||||
|
||||
impl ProspectiveSigner {
|
||||
pub fn new(
|
||||
accounts: Arc<AccountProvider>,
|
||||
filled: FilledTransactionRequest,
|
||||
chain_id: Option<u64>,
|
||||
reserved: nonce::Reserved,
|
||||
password: SignWith,
|
||||
) -> Self {
|
||||
// If the account is permanently unlocked we can try to sign
|
||||
// using prospective nonce. This should speed up sending
|
||||
// multiple subsequent transactions in multi-threaded RPC environment.
|
||||
let is_unlocked_permanently = accounts.is_unlocked_permanently(&filled.from);
|
||||
let has_password = password.is_password();
|
||||
|
||||
ProspectiveSigner {
|
||||
accounts,
|
||||
filled,
|
||||
chain_id,
|
||||
reserved,
|
||||
password,
|
||||
state: if is_unlocked_permanently || has_password {
|
||||
ProspectiveSignerState::TryProspectiveSign
|
||||
} else {
|
||||
ProspectiveSignerState::WaitForNonce
|
||||
},
|
||||
prospective: None,
|
||||
ready: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn sign(&self, nonce: &U256) -> Result<WithToken<SignedTransaction>, Error> {
|
||||
sign_transaction(
|
||||
&*self.accounts,
|
||||
self.filled.clone(),
|
||||
self.chain_id,
|
||||
*nonce,
|
||||
self.password.clone()
|
||||
)
|
||||
}
|
||||
|
||||
fn poll_reserved(&mut self) -> Poll<nonce::Ready, Error> {
|
||||
self.reserved.poll().map_err(|_| errors::internal("Nonce reservation failure", ""))
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for ProspectiveSigner {
|
||||
type Item = WithToken<SignedTransaction>;
|
||||
type Error = Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
use self::ProspectiveSignerState::*;
|
||||
|
||||
loop {
|
||||
match self.state {
|
||||
TryProspectiveSign => {
|
||||
// Try to poll reserved, it might be ready.
|
||||
match self.poll_reserved()? {
|
||||
Async::NotReady => {
|
||||
self.state = WaitForNonce;
|
||||
self.prospective = Some(self.sign(self.reserved.prospective_value()));
|
||||
},
|
||||
Async::Ready(nonce) => {
|
||||
self.state = Finish;
|
||||
self.prospective = Some(self.sign(nonce.value()));
|
||||
self.ready = Some(nonce);
|
||||
},
|
||||
}
|
||||
},
|
||||
WaitForNonce => {
|
||||
let nonce = try_ready!(self.poll_reserved());
|
||||
let result = match (self.prospective.take(), nonce.matches_prospective()) {
|
||||
(Some(prospective), true) => prospective,
|
||||
_ => self.sign(nonce.value()),
|
||||
};
|
||||
self.state = Finish;
|
||||
self.prospective = Some(result);
|
||||
self.ready = Some(nonce);
|
||||
},
|
||||
Finish => {
|
||||
if let (Some(result), Some(nonce)) = (self.prospective.take(), self.ready.take()) {
|
||||
// Mark nonce as used on successful signing
|
||||
return result.map(move |tx| {
|
||||
nonce.mark_used();
|
||||
Async::Ready(tx)
|
||||
})
|
||||
} else {
|
||||
panic!("Poll after ready.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Single-use account token.
|
||||
pub type AccountToken = String;
|
||||
|
||||
@ -436,6 +557,16 @@ pub enum SignWith {
|
||||
Token(AccountToken),
|
||||
}
|
||||
|
||||
impl SignWith {
|
||||
fn is_password(&self) -> bool {
|
||||
if let SignWith::Password(_) = *self {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A value, potentially accompanied by a signing token.
|
||||
#[derive(Debug)]
|
||||
pub enum WithToken<T: Debug> {
|
||||
@ -529,7 +660,7 @@ pub fn execute<D: Dispatcher + 'static>(
|
||||
))
|
||||
},
|
||||
ConfirmationPayload::EthSignMessage(address, data) => {
|
||||
if accounts.is_hardware_address(address) {
|
||||
if accounts.is_hardware_address(&address) {
|
||||
return Box::new(future::err(errors::unsupported("Signing via hardware wallets is not supported.", None)));
|
||||
}
|
||||
|
||||
@ -543,7 +674,7 @@ pub fn execute<D: Dispatcher + 'static>(
|
||||
Box::new(future::done(res))
|
||||
},
|
||||
ConfirmationPayload::Decrypt(address, data) => {
|
||||
if accounts.is_hardware_address(address) {
|
||||
if accounts.is_hardware_address(&address) {
|
||||
return Box::new(future::err(errors::unsupported("Decrypting via hardware wallets is not supported.", None)));
|
||||
}
|
||||
|
||||
@ -572,7 +703,7 @@ fn signature(accounts: &AccountProvider, address: Address, hash: H256, password:
|
||||
fn hardware_signature(accounts: &AccountProvider, address: Address, t: Transaction, chain_id: Option<u64>)
|
||||
-> Result<SignedTransaction, Error>
|
||||
{
|
||||
debug_assert!(accounts.is_hardware_address(address));
|
||||
debug_assert!(accounts.is_hardware_address(&address));
|
||||
|
||||
let mut stream = rlp::RlpStream::new();
|
||||
t.rlp_append_unsigned_transaction(&mut stream, chain_id);
|
||||
|
@ -14,22 +14,13 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
use ethcore::client::MiningBlockChainClient;
|
||||
use ethcore::miner::MinerService;
|
||||
use ethcore::transaction::{Transaction, SignedTransaction, Action};
|
||||
use bigint::prelude::U256;
|
||||
|
||||
use jsonrpc_core::Error;
|
||||
use v1::helpers::CallRequest;
|
||||
use v1::helpers::dispatch::default_gas_price;
|
||||
|
||||
pub fn sign_call<C: MiningBlockChainClient, M: MinerService> (
|
||||
client: &Arc<C>,
|
||||
miner: &Arc<M>,
|
||||
request: CallRequest,
|
||||
gas_cap: bool,
|
||||
) -> Result<SignedTransaction, Error> {
|
||||
pub fn sign_call(request: CallRequest, gas_cap: bool) -> Result<SignedTransaction, Error> {
|
||||
let max_gas = 50_000_000.into();
|
||||
let gas = match request.gas {
|
||||
Some(gas) if gas_cap && gas > max_gas => {
|
||||
@ -43,10 +34,10 @@ pub fn sign_call<C: MiningBlockChainClient, M: MinerService> (
|
||||
let from = request.from.unwrap_or(0.into());
|
||||
|
||||
Ok(Transaction {
|
||||
nonce: request.nonce.unwrap_or_else(|| client.latest_nonce(&from)),
|
||||
nonce: request.nonce.unwrap_or_else(|| 0.into()),
|
||||
action: request.to.map_or(Action::Create, Action::Call),
|
||||
gas,
|
||||
gas_price: request.gas_price.unwrap_or_else(|| default_gas_price(&**client, &**miner)),
|
||||
gas_price: request.gas_price.unwrap_or_else(|| 0.into()),
|
||||
value: request.value.unwrap_or(0.into()),
|
||||
data: request.data.unwrap_or_default(),
|
||||
}.fake_sign(from))
|
||||
|
@ -22,14 +22,15 @@ pub mod block_import;
|
||||
pub mod dapps;
|
||||
pub mod dispatch;
|
||||
pub mod fake_sign;
|
||||
pub mod light_fetch;
|
||||
pub mod oneshot;
|
||||
pub mod ipfs;
|
||||
pub mod light_fetch;
|
||||
pub mod nonce;
|
||||
pub mod oneshot;
|
||||
pub mod secretstore;
|
||||
|
||||
mod network_settings;
|
||||
mod poll_manager;
|
||||
mod poll_filter;
|
||||
mod poll_manager;
|
||||
mod requests;
|
||||
mod signer;
|
||||
mod signing_queue;
|
||||
|
249
rpc/src/v1/helpers/nonce.rs
Normal file
249
rpc/src/v1/helpers/nonce.rs
Normal file
@ -0,0 +1,249 @@
|
||||
// Copyright 2015-2017 harity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::{cmp, mem};
|
||||
use std::sync::{atomic, Arc};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
|
||||
use bigint::prelude::U256;
|
||||
use futures::{Future, future, Poll, Async};
|
||||
use futures::future::Either;
|
||||
use futures::sync::oneshot;
|
||||
use futures_cpupool::CpuPool;
|
||||
|
||||
/// Manages currently reserved and prospective nonces.
|
||||
#[derive(Debug)]
|
||||
pub struct Reservations {
|
||||
previous: Option<oneshot::Receiver<U256>>,
|
||||
pool: CpuPool,
|
||||
prospective_value: U256,
|
||||
dropped: Arc<AtomicUsize>,
|
||||
}
|
||||
|
||||
impl Reservations {
|
||||
/// Create new nonces manager and spawn a single-threaded cpu pool
|
||||
/// for progressing execution of dropped nonces.
|
||||
pub fn new() -> Self {
|
||||
Self::with_pool(CpuPool::new(1))
|
||||
}
|
||||
|
||||
/// Create new nonces manager with given cpu pool.
|
||||
pub fn with_pool(pool: CpuPool) -> Self {
|
||||
Reservations {
|
||||
previous: None,
|
||||
pool,
|
||||
prospective_value: Default::default(),
|
||||
dropped: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Reserves a prospective nonce.
|
||||
/// The caller should provide a minimal nonce that needs to be reserved (taken from state/txqueue).
|
||||
/// If there were any previous reserved nonces the returned future will be resolved when those are finished
|
||||
/// (confirmed that the nonce were indeed used).
|
||||
/// The caller can use `prospective_nonce` and perform some heavy computation anticipating
|
||||
/// that the `prospective_nonce` will be equal to the one he will get.
|
||||
pub fn reserve_nonce(&mut self, minimal: U256) -> Reserved {
|
||||
// Update prospective value
|
||||
let dropped = self.dropped.swap(0, atomic::Ordering::SeqCst);
|
||||
let prospective_value = cmp::max(minimal, self.prospective_value - dropped.into());
|
||||
self.prospective_value = prospective_value + 1.into();
|
||||
|
||||
let (next, rx) = oneshot::channel();
|
||||
let next = Some(next);
|
||||
let pool = self.pool.clone();
|
||||
let dropped = self.dropped.clone();
|
||||
match mem::replace(&mut self.previous, Some(rx)) {
|
||||
Some(previous) => Reserved {
|
||||
previous: Either::A(previous),
|
||||
next,
|
||||
minimal,
|
||||
prospective_value,
|
||||
pool,
|
||||
dropped,
|
||||
},
|
||||
None => Reserved {
|
||||
previous: Either::B(future::ok(minimal)),
|
||||
next,
|
||||
minimal,
|
||||
prospective_value,
|
||||
pool,
|
||||
dropped,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a future nonce.
|
||||
#[derive(Debug)]
|
||||
pub struct Reserved {
|
||||
previous: Either<
|
||||
oneshot::Receiver<U256>,
|
||||
future::FutureResult<U256, oneshot::Canceled>
|
||||
>,
|
||||
next: Option<oneshot::Sender<U256>>,
|
||||
minimal: U256,
|
||||
prospective_value: U256,
|
||||
pool: CpuPool,
|
||||
dropped: Arc<AtomicUsize>,
|
||||
}
|
||||
|
||||
impl Reserved {
|
||||
/// Returns a prospective value of the nonce.
|
||||
/// NOTE: This might be different than the one we resolve to.
|
||||
/// Make sure to check if both nonces match or use the latter one.
|
||||
pub fn prospective_value(&self) -> &U256 {
|
||||
&self.prospective_value
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for Reserved {
|
||||
type Item = Ready;
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let mut value = try_ready!(self.previous.poll().map_err(|e| {
|
||||
warn!("Unexpected nonce cancellation: {}", e);
|
||||
}));
|
||||
|
||||
if value < self.minimal {
|
||||
value = self.minimal
|
||||
}
|
||||
let matches_prospective = value == self.prospective_value;
|
||||
|
||||
Ok(Async::Ready(Ready {
|
||||
value,
|
||||
matches_prospective,
|
||||
next: self.next.take(),
|
||||
dropped: self.dropped.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Reserved {
|
||||
fn drop(&mut self) {
|
||||
if let Some(next) = self.next.take() {
|
||||
self.dropped.fetch_add(1, atomic::Ordering::SeqCst);
|
||||
// If Reserved is dropped just pipe previous and next together.
|
||||
let previous = mem::replace(&mut self.previous, Either::B(future::ok(U256::default())));
|
||||
self.pool.spawn(previous.map(|nonce| {
|
||||
next.send(nonce).expect(Ready::RECV_PROOF)
|
||||
})).forget()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a valid reserved nonce.
|
||||
/// This can be used to dispatch the transaction.
|
||||
///
|
||||
/// After this nonce is used it should be marked as such
|
||||
/// using `mark_used` method.
|
||||
#[derive(Debug)]
|
||||
pub struct Ready {
|
||||
value: U256,
|
||||
matches_prospective: bool,
|
||||
next: Option<oneshot::Sender<U256>>,
|
||||
dropped: Arc<AtomicUsize>,
|
||||
}
|
||||
|
||||
impl Ready {
|
||||
const RECV_PROOF: &'static str = "Receiver never dropped.";
|
||||
|
||||
/// Returns a value of the nonce.
|
||||
pub fn value(&self) -> &U256 {
|
||||
&self.value
|
||||
}
|
||||
|
||||
/// Returns true if current value matches the prospective nonce.
|
||||
pub fn matches_prospective(&self) -> bool {
|
||||
self.matches_prospective
|
||||
}
|
||||
|
||||
/// Marks this nonce as used.
|
||||
/// Make sure to call that method after this nonce has been consumed.
|
||||
pub fn mark_used(mut self) {
|
||||
let next = self.next.take().expect("Nonce can be marked as used only once; qed");
|
||||
next.send(self.value + 1.into()).expect(Self::RECV_PROOF);
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Ready {
|
||||
fn drop(&mut self) {
|
||||
if let Some(send) = self.next.take() {
|
||||
self.dropped.fetch_add(1, atomic::Ordering::SeqCst);
|
||||
send.send(self.value).expect(Self::RECV_PROOF);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn should_reserve_a_set_of_nonces_and_resolve_them() {
|
||||
let mut nonces = Reservations::new();
|
||||
|
||||
let n1 = nonces.reserve_nonce(5.into());
|
||||
let n2 = nonces.reserve_nonce(5.into());
|
||||
let n3 = nonces.reserve_nonce(5.into());
|
||||
let n4 = nonces.reserve_nonce(5.into());
|
||||
|
||||
// Check first nonce
|
||||
let r = n1.wait().unwrap();
|
||||
assert_eq!(r.value(), &U256::from(5));
|
||||
assert!(r.matches_prospective());
|
||||
r.mark_used();
|
||||
|
||||
// Drop second nonce
|
||||
drop(n2);
|
||||
|
||||
// Drop third without marking as used
|
||||
let r = n3.wait().unwrap();
|
||||
drop(r);
|
||||
|
||||
// Last nonce should be resolved to 6
|
||||
let r = n4.wait().unwrap();
|
||||
assert_eq!(r.value(), &U256::from(6));
|
||||
assert!(!r.matches_prospective());
|
||||
r.mark_used();
|
||||
|
||||
// Next nonce should be immediately available.
|
||||
let n5 = nonces.reserve_nonce(5.into());
|
||||
let r = n5.wait().unwrap();
|
||||
assert_eq!(r.value(), &U256::from(7));
|
||||
assert!(r.matches_prospective());
|
||||
r.mark_used();
|
||||
|
||||
// Should use start number if it's greater
|
||||
let n6 = nonces.reserve_nonce(10.into());
|
||||
let r = n6.wait().unwrap();
|
||||
assert_eq!(r.value(), &U256::from(10));
|
||||
assert!(r.matches_prospective());
|
||||
r.mark_used();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_return_prospective_nonce() {
|
||||
let mut nonces = Reservations::new();
|
||||
|
||||
let n1 = nonces.reserve_nonce(5.into());
|
||||
let n2 = nonces.reserve_nonce(5.into());
|
||||
|
||||
assert_eq!(n1.prospective_value(), &U256::from(5));
|
||||
assert_eq!(n2.prospective_value(), &U256::from(6));
|
||||
}
|
||||
}
|
@ -15,8 +15,9 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use bigint::prelude::U256;
|
||||
use util::Address;
|
||||
use bytes::Bytes;
|
||||
use util::Address;
|
||||
|
||||
use v1::types::{Origin, TransactionCondition};
|
||||
|
||||
/// Transaction request coming from RPC
|
||||
|
@ -45,7 +45,7 @@ use jsonrpc_core::futures::future;
|
||||
use jsonrpc_macros::Trailing;
|
||||
|
||||
use v1::helpers::{errors, limit_logs, fake_sign};
|
||||
use v1::helpers::dispatch::{Dispatcher, FullDispatcher, default_gas_price};
|
||||
use v1::helpers::dispatch::{FullDispatcher, default_gas_price};
|
||||
use v1::helpers::block_import::is_major_importing;
|
||||
use v1::helpers::accounts::unwrap_provider;
|
||||
use v1::traits::Eth;
|
||||
@ -610,8 +610,11 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
|
||||
.map_err(errors::rlp)
|
||||
.and_then(|tx| SignedTransaction::new(tx).map_err(errors::transaction))
|
||||
.and_then(|signed_transaction| {
|
||||
FullDispatcher::new(self.client.clone(), self.miner.clone())
|
||||
.dispatch_transaction(signed_transaction.into())
|
||||
FullDispatcher::dispatch_transaction(
|
||||
&*self.client,
|
||||
&*self.miner,
|
||||
signed_transaction.into(),
|
||||
)
|
||||
})
|
||||
.map(Into::into)
|
||||
}
|
||||
@ -622,7 +625,7 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
|
||||
|
||||
fn call(&self, meta: Self::Metadata, request: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> {
|
||||
let request = CallRequest::into(request);
|
||||
let signed = try_bf!(fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp()));
|
||||
let signed = try_bf!(fake_sign::sign_call(request, meta.is_dapp()));
|
||||
|
||||
let num = num.unwrap_or_default();
|
||||
let result = self.client.call(&signed, Default::default(), num.into());
|
||||
@ -635,7 +638,7 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
|
||||
|
||||
fn estimate_gas(&self, meta: Self::Metadata, request: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> {
|
||||
let request = CallRequest::into(request);
|
||||
let signed = try_bf!(fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp()));
|
||||
let signed = try_bf!(fake_sign::sign_call(request, meta.is_dapp()));
|
||||
Box::new(future::done(self.client.estimate_gas(&signed, num.unwrap_or_default().into())
|
||||
.map(Into::into)
|
||||
.map_err(errors::call)
|
||||
|
@ -418,7 +418,7 @@ impl<C, M, U> Parity for ParityClient<C, M, U> where
|
||||
let requests = requests
|
||||
.into_iter()
|
||||
.map(|request| Ok((
|
||||
fake_sign::sign_call(&self.client, &self.miner, request.into(), meta.is_dapp())?,
|
||||
fake_sign::sign_call(request.into(), meta.is_dapp())?,
|
||||
Default::default()
|
||||
)))
|
||||
.collect::<Result<Vec<_>, Error>>()?;
|
||||
|
@ -42,11 +42,11 @@ pub struct PersonalClient<D: Dispatcher> {
|
||||
|
||||
impl<D: Dispatcher> PersonalClient<D> {
|
||||
/// Creates new PersonalClient
|
||||
pub fn new(store: &Option<Arc<AccountProvider>>, dispatcher: D, allow_perm_unlock: bool) -> Self {
|
||||
pub fn new(accounts: Option<Arc<AccountProvider>>, dispatcher: D, allow_perm_unlock: bool) -> Self {
|
||||
PersonalClient {
|
||||
accounts: store.clone(),
|
||||
dispatcher: dispatcher,
|
||||
allow_perm_unlock: allow_perm_unlock,
|
||||
accounts,
|
||||
dispatcher,
|
||||
allow_perm_unlock,
|
||||
}
|
||||
}
|
||||
|
||||
@ -89,15 +89,18 @@ impl<D: Dispatcher + 'static> Personal for PersonalClient<D> {
|
||||
};
|
||||
|
||||
let r = match (self.allow_perm_unlock, duration) {
|
||||
(false, _) => store.unlock_account_temporarily(account, account_pass),
|
||||
(false, None) => store.unlock_account_temporarily(account, account_pass),
|
||||
(false, _) => return Err(errors::unsupported(
|
||||
"Time-unlocking is only supported in --geth compatibility mode.",
|
||||
Some("Restart your client with --geth flag or use personal_sendTransaction instead."),
|
||||
)),
|
||||
(true, Some(0)) => store.unlock_account_permanently(account, account_pass),
|
||||
(true, Some(d)) => store.unlock_account_timed(account, account_pass, d * 1000),
|
||||
(true, None) => store.unlock_account_timed(account, account_pass, 300_000),
|
||||
};
|
||||
match r {
|
||||
Ok(_) => Ok(true),
|
||||
// TODO [ToDr] Proper error here?
|
||||
Err(_) => Ok(false),
|
||||
Err(err) => Err(errors::account("Unable to unlock the account.", err)),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ impl<D: Dispatcher + 'static> SigningQueueClient<D> {
|
||||
Box::new(dispatch::from_rpc(payload, default_account, &dispatcher)
|
||||
.and_then(move |payload| {
|
||||
let sender = payload.sender();
|
||||
if accounts.is_unlocked(sender) {
|
||||
if accounts.is_unlocked(&sender) {
|
||||
Either::A(dispatch::execute(dispatcher, accounts, payload, dispatch::SignWith::Nothing)
|
||||
.map(|v| v.into_value())
|
||||
.map(DispatchResult::Value))
|
||||
|
@ -19,7 +19,6 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use ethcore::client::{MiningBlockChainClient, CallAnalytics, TransactionId, TraceId};
|
||||
use ethcore::miner::MinerService;
|
||||
use ethcore::transaction::SignedTransaction;
|
||||
use rlp::UntrustedRlp;
|
||||
|
||||
@ -39,22 +38,20 @@ fn to_call_analytics(flags: TraceOptions) -> CallAnalytics {
|
||||
}
|
||||
|
||||
/// Traces api implementation.
|
||||
pub struct TracesClient<C, M> {
|
||||
pub struct TracesClient<C> {
|
||||
client: Arc<C>,
|
||||
miner: Arc<M>,
|
||||
}
|
||||
|
||||
impl<C, M> TracesClient<C, M> {
|
||||
impl<C> TracesClient<C> {
|
||||
/// Creates new Traces client.
|
||||
pub fn new(client: &Arc<C>, miner: &Arc<M>) -> Self {
|
||||
pub fn new(client: &Arc<C>) -> Self {
|
||||
TracesClient {
|
||||
client: client.clone(),
|
||||
miner: miner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<C, M> Traces for TracesClient<C, M> where C: MiningBlockChainClient + 'static, M: MinerService + 'static {
|
||||
impl<C> Traces for TracesClient<C> where C: MiningBlockChainClient + 'static {
|
||||
type Metadata = Metadata;
|
||||
|
||||
fn filter(&self, filter: TraceFilter) -> Result<Option<Vec<LocalizedTrace>>, Error> {
|
||||
@ -86,7 +83,7 @@ impl<C, M> Traces for TracesClient<C, M> where C: MiningBlockChainClient + 'stat
|
||||
let block = block.unwrap_or_default();
|
||||
|
||||
let request = CallRequest::into(request);
|
||||
let signed = fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp())?;
|
||||
let signed = fake_sign::sign_call(request, meta.is_dapp())?;
|
||||
|
||||
self.client.call(&signed, to_call_analytics(flags), block.into())
|
||||
.map(TraceResults::from)
|
||||
@ -99,7 +96,7 @@ impl<C, M> Traces for TracesClient<C, M> where C: MiningBlockChainClient + 'stat
|
||||
let requests = requests.into_iter()
|
||||
.map(|(request, flags)| {
|
||||
let request = CallRequest::into(request);
|
||||
let signed = fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp())?;
|
||||
let signed = fake_sign::sign_call(request, meta.is_dapp())?;
|
||||
Ok((signed, to_call_analytics(flags)))
|
||||
})
|
||||
.collect::<Result<Vec<_>, Error>>()?;
|
||||
|
@ -19,25 +19,27 @@ use std::env;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use ethcore::client::{BlockChainClient, Client, ClientConfig};
|
||||
use ethcore::ids::BlockId;
|
||||
use ethcore::spec::{Genesis, Spec};
|
||||
use ethcore::block::Block;
|
||||
use ethcore::views::BlockView;
|
||||
use ethcore::ethereum;
|
||||
use ethcore::miner::{MinerOptions, Banning, GasPricer, MinerService, ExternalMiner, Miner, PendingSet, PrioritizationStrategy, GasLimit};
|
||||
use bigint::hash::H256;
|
||||
use bigint::prelude::U256;
|
||||
use ethcore::account_provider::AccountProvider;
|
||||
use ethcore::block::Block;
|
||||
use ethcore::client::{BlockChainClient, Client, ClientConfig};
|
||||
use ethcore::ethereum;
|
||||
use ethcore::ids::BlockId;
|
||||
use ethcore::miner::{MinerOptions, Banning, GasPricer, MinerService, ExternalMiner, Miner, PendingSet, PrioritizationStrategy, GasLimit};
|
||||
use ethcore::spec::{Genesis, Spec};
|
||||
use ethcore::views::BlockView;
|
||||
use ethjson::blockchain::BlockChain;
|
||||
use ethjson::state::test::ForkSpec;
|
||||
use io::IoChannel;
|
||||
use bigint::prelude::U256;
|
||||
use bigint::hash::H256;
|
||||
use util::Address;
|
||||
use kvdb_memorydb;
|
||||
use parking_lot::Mutex;
|
||||
use util::Address;
|
||||
|
||||
use jsonrpc_core::IoHandler;
|
||||
use v1::impls::{EthClient, SigningUnsafeClient};
|
||||
use v1::helpers::dispatch::FullDispatcher;
|
||||
use v1::helpers::nonce;
|
||||
use v1::impls::{EthClient, SigningUnsafeClient};
|
||||
use v1::metadata::Metadata;
|
||||
use v1::tests::helpers::{TestSnapshotService, TestSyncProvider, Config};
|
||||
use v1::traits::eth::Eth;
|
||||
@ -75,6 +77,7 @@ fn miner_service(spec: &Spec, accounts: Arc<AccountProvider>) -> Arc<Miner> {
|
||||
work_queue_size: 50,
|
||||
enable_resubmission: true,
|
||||
refuse_service_transactions: false,
|
||||
infinite_pending_block: false,
|
||||
},
|
||||
GasPricer::new_fixed(20_000_000_000u64.into()),
|
||||
&spec,
|
||||
@ -148,7 +151,9 @@ impl EthTester {
|
||||
Default::default(),
|
||||
);
|
||||
|
||||
let dispatcher = FullDispatcher::new(client.clone(), miner_service.clone());
|
||||
let reservations = Arc::new(Mutex::new(nonce::Reservations::new()));
|
||||
|
||||
let dispatcher = FullDispatcher::new(client.clone(), miner_service.clone(), reservations);
|
||||
let eth_sign = SigningUnsafeClient::new(
|
||||
&opt_account_provider,
|
||||
dispatcher,
|
||||
|
@ -37,6 +37,7 @@ use ethsync::SyncState;
|
||||
|
||||
use jsonrpc_core::IoHandler;
|
||||
use v1::{Eth, EthClient, EthClientOptions, EthFilter, EthFilterClient, EthSigning, SigningUnsafeClient};
|
||||
use v1::helpers::nonce;
|
||||
use v1::helpers::dispatch::FullDispatcher;
|
||||
use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService, TestSnapshotService};
|
||||
use v1::metadata::Metadata;
|
||||
@ -94,8 +95,9 @@ impl EthTester {
|
||||
let external_miner = Arc::new(ExternalMiner::new(hashrates.clone()));
|
||||
let eth = EthClient::new(&client, &snapshot, &sync, &opt_ap, &miner, &external_miner, options).to_delegate();
|
||||
let filter = EthFilterClient::new(client.clone(), miner.clone()).to_delegate();
|
||||
let reservations = Arc::new(Mutex::new(nonce::Reservations::new()));
|
||||
|
||||
let dispatcher = FullDispatcher::new(client.clone(), miner.clone());
|
||||
let dispatcher = FullDispatcher::new(client.clone(), miner.clone(), reservations);
|
||||
let sign = SigningUnsafeClient::new(&opt_ap, dispatcher).to_delegate();
|
||||
let mut io: IoHandler<Metadata> = IoHandler::default();
|
||||
io.extend_with(eth);
|
||||
|
@ -17,14 +17,16 @@
|
||||
use std::sync::Arc;
|
||||
use std::str::FromStr;
|
||||
|
||||
use bigint::prelude::U256;
|
||||
use ethcore::account_provider::AccountProvider;
|
||||
use ethcore::client::TestBlockChainClient;
|
||||
use ethcore::transaction::{Action, Transaction};
|
||||
use jsonrpc_core::IoHandler;
|
||||
use bigint::prelude::U256;
|
||||
use parking_lot::Mutex;
|
||||
use util::Address;
|
||||
|
||||
use v1::{PersonalClient, Personal, Metadata};
|
||||
use v1::helpers::nonce;
|
||||
use v1::helpers::dispatch::FullDispatcher;
|
||||
use v1::tests::helpers::TestMinerService;
|
||||
|
||||
@ -52,9 +54,10 @@ fn setup() -> PersonalTester {
|
||||
let opt_accounts = Some(accounts.clone());
|
||||
let client = blockchain_client();
|
||||
let miner = miner_service();
|
||||
let reservations = Arc::new(Mutex::new(nonce::Reservations::new()));
|
||||
|
||||
let dispatcher = FullDispatcher::new(client, miner.clone());
|
||||
let personal = PersonalClient::new(&opt_accounts, dispatcher, false);
|
||||
let dispatcher = FullDispatcher::new(client, miner.clone(), reservations);
|
||||
let personal = PersonalClient::new(opt_accounts, dispatcher, false);
|
||||
|
||||
let mut io = IoHandler::default();
|
||||
io.extend_with(personal.to_delegate());
|
||||
@ -178,7 +181,7 @@ fn sign_and_send_test(method: &str) {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_unlock_account_temporarily() {
|
||||
fn should_unlock_not_account_temporarily_if_allow_perm_is_disabled() {
|
||||
let tester = setup();
|
||||
let address = tester.accounts.new_account("password123").unwrap();
|
||||
|
||||
@ -192,10 +195,10 @@ fn should_unlock_account_temporarily() {
|
||||
],
|
||||
"id": 1
|
||||
}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"Time-unlocking is only supported in --geth compatibility mode.","data":"Restart your client with --geth flag or use personal_sendTransaction instead."},"id":1}"#;
|
||||
assert_eq!(tester.io.handle_request_sync(&request), Some(response.into()));
|
||||
|
||||
assert!(tester.accounts.sign(address, None, Default::default()).is_ok(), "Should unlock account.");
|
||||
assert!(tester.accounts.sign(address, None, Default::default()).is_err(), "Should not unlock account.");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -24,6 +24,7 @@ use ethcore::account_provider::AccountProvider;
|
||||
use ethcore::client::TestBlockChainClient;
|
||||
use ethcore::transaction::{Transaction, Action, SignedTransaction};
|
||||
use parity_reactor::EventLoop;
|
||||
use parking_lot::Mutex;
|
||||
use rlp::encode;
|
||||
|
||||
use serde_json;
|
||||
@ -32,7 +33,7 @@ use v1::{SignerClient, Signer, Origin};
|
||||
use v1::metadata::Metadata;
|
||||
use v1::tests::helpers::TestMinerService;
|
||||
use v1::types::{Bytes as RpcBytes, H520};
|
||||
use v1::helpers::{SigningQueue, SignerService, FilledTransactionRequest, ConfirmationPayload};
|
||||
use v1::helpers::{nonce, SigningQueue, SignerService, FilledTransactionRequest, ConfirmationPayload};
|
||||
use v1::helpers::dispatch::{FullDispatcher, eth_data_hash};
|
||||
|
||||
struct SignerTester {
|
||||
@ -61,9 +62,10 @@ fn signer_tester() -> SignerTester {
|
||||
let opt_accounts = Some(accounts.clone());
|
||||
let client = blockchain_client();
|
||||
let miner = miner_service();
|
||||
let reservations = Arc::new(Mutex::new(nonce::Reservations::new()));
|
||||
let event_loop = EventLoop::spawn();
|
||||
|
||||
let dispatcher = FullDispatcher::new(client, miner.clone());
|
||||
let dispatcher = FullDispatcher::new(client, miner.clone(), reservations);
|
||||
let mut io = IoHandler::default();
|
||||
io.extend_with(SignerClient::new(&opt_accounts, dispatcher, &signer, event_loop.remote()).to_delegate());
|
||||
|
||||
|
@ -25,7 +25,7 @@ use jsonrpc_core::futures::Future;
|
||||
use v1::impls::SigningQueueClient;
|
||||
use v1::metadata::Metadata;
|
||||
use v1::traits::{EthSigning, ParitySigning, Parity};
|
||||
use v1::helpers::{SignerService, SigningQueue, FullDispatcher};
|
||||
use v1::helpers::{nonce, SignerService, SigningQueue, FullDispatcher};
|
||||
use v1::types::{ConfirmationResponse, RichRawTransaction};
|
||||
use v1::tests::helpers::TestMinerService;
|
||||
use v1::tests::mocked::parity;
|
||||
@ -39,6 +39,7 @@ use ethcore::client::TestBlockChainClient;
|
||||
use ethcore::transaction::{Transaction, Action, SignedTransaction};
|
||||
use ethstore::ethkey::{Generator, Random};
|
||||
use serde_json;
|
||||
use parking_lot::Mutex;
|
||||
|
||||
use parity_reactor::Remote;
|
||||
|
||||
@ -57,9 +58,10 @@ impl Default for SigningTester {
|
||||
let miner = Arc::new(TestMinerService::default());
|
||||
let accounts = Arc::new(AccountProvider::transient_provider());
|
||||
let opt_accounts = Some(accounts.clone());
|
||||
let reservations = Arc::new(Mutex::new(nonce::Reservations::new()));
|
||||
let mut io = IoHandler::default();
|
||||
|
||||
let dispatcher = FullDispatcher::new(client.clone(), miner.clone());
|
||||
let dispatcher = FullDispatcher::new(client.clone(), miner.clone(), reservations);
|
||||
|
||||
let remote = Remote::new_thread_per_future();
|
||||
|
||||
|
@ -66,7 +66,7 @@ fn io() -> Tester {
|
||||
state_diff: None,
|
||||
}));
|
||||
let miner = Arc::new(TestMinerService::default());
|
||||
let traces = TracesClient::new(&client, &miner);
|
||||
let traces = TracesClient::new(&client);
|
||||
let mut io = IoHandler::default();
|
||||
io.extend_with(traces.to_delegate());
|
||||
|
||||
|
@ -173,8 +173,10 @@ impl<'a> From<&'a EthHeader> for Header {
|
||||
logs_bloom: h.log_bloom().into(),
|
||||
timestamp: h.timestamp().into(),
|
||||
difficulty: h.difficulty().into(),
|
||||
seal_fields: h.seal().into_iter().map(Into::into).collect(),
|
||||
extra_data: h.extra_data().into(),
|
||||
seal_fields: h.view().decode_seal()
|
||||
.expect("Client/Miner returns only valid headers. We only serialize headers from Client/Miner; qed")
|
||||
.into_iter().map(Into::into).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use v1::types::{Log, H160, H256, H2048, U256};
|
||||
use v1::types::{Log, H160, H256, H2048, U256, U64};
|
||||
use ethcore::receipt::{Receipt as EthReceipt, RichReceipt, LocalizedReceipt, TransactionOutcome};
|
||||
|
||||
/// Receipt
|
||||
@ -51,7 +51,7 @@ pub struct Receipt {
|
||||
pub logs_bloom: H2048,
|
||||
/// Status code
|
||||
#[serde(rename="status")]
|
||||
pub status_code: Option<u8>,
|
||||
pub status_code: Option<U64>,
|
||||
}
|
||||
|
||||
impl Receipt {
|
||||
@ -62,10 +62,10 @@ impl Receipt {
|
||||
}
|
||||
}
|
||||
|
||||
fn outcome_to_status_code(outcome: &TransactionOutcome) -> Option<u8> {
|
||||
fn outcome_to_status_code(outcome: &TransactionOutcome) -> Option<U64> {
|
||||
match *outcome {
|
||||
TransactionOutcome::Unknown | TransactionOutcome::StateRoot(_) => None,
|
||||
TransactionOutcome::StatusCode(ref code) => Some(*code),
|
||||
TransactionOutcome::StatusCode(ref code) => Some((*code as u64).into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -131,7 +131,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn receipt_serialization() {
|
||||
let s = r#"{"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","cumulativeGasUsed":"0x20","gasUsed":"0x10","contractAddress":null,"logs":[{"address":"0x33990122638b9132ca29c723bdf037f1a891a70c","topics":["0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc","0x4861736852656700000000000000000000000000000000000000000000000000"],"data":"0x","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","logIndex":"0x1","transactionLogIndex":null,"type":"mined"}],"root":"0x000000000000000000000000000000000000000000000000000000000000000a","logsBloom":"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f","status":null}"#;
|
||||
let s = r#"{"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","cumulativeGasUsed":"0x20","gasUsed":"0x10","contractAddress":null,"logs":[{"address":"0x33990122638b9132ca29c723bdf037f1a891a70c","topics":["0xa6697e974e6a320f454390be03f74955e8978f1a6971ea6730542e37b66179bc","0x4861736852656700000000000000000000000000000000000000000000000000"],"data":"0x","blockHash":"0xed76641c68a1c641aee09a94b3b471f4dc0316efe5ac19cf488e2674cf8d05b5","blockNumber":"0x4510c","transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","logIndex":"0x1","transactionLogIndex":null,"type":"mined"}],"root":"0x000000000000000000000000000000000000000000000000000000000000000a","logsBloom":"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f","status":"0x1"}"#;
|
||||
|
||||
let receipt = Receipt {
|
||||
transaction_hash: Some(0.into()),
|
||||
@ -158,7 +158,7 @@ mod tests {
|
||||
}],
|
||||
logs_bloom: 15.into(),
|
||||
state_root: Some(10.into()),
|
||||
status_code: None,
|
||||
status_code: Some(1u64.into()),
|
||||
};
|
||||
|
||||
let serialized = serde_json::to_string(&receipt).unwrap();
|
||||
|
@ -106,7 +106,7 @@ impl DocumentKeyServer for KeyServerImpl {
|
||||
.map_err(|_| Error::BadSignature)?;
|
||||
|
||||
// decrypt document key
|
||||
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), signature.clone(), false)?;
|
||||
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), signature.clone(), None, false)?;
|
||||
let document_key = decryption_session.wait()?.decrypted_secret;
|
||||
|
||||
// encrypt document key with requestor public key
|
||||
@ -116,7 +116,7 @@ impl DocumentKeyServer for KeyServerImpl {
|
||||
}
|
||||
|
||||
fn restore_document_key_shadow(&self, key_id: &ServerKeyId, signature: &RequestSignature) -> Result<EncryptedDocumentKeyShadow, Error> {
|
||||
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), signature.clone(), true)?;
|
||||
let decryption_session = self.data.lock().cluster.new_decryption_session(key_id.clone(), signature.clone(), None, true)?;
|
||||
decryption_session.wait().map_err(Into::into)
|
||||
}
|
||||
}
|
||||
@ -128,7 +128,7 @@ impl MessageSigner for KeyServerImpl {
|
||||
.map_err(|_| Error::BadSignature)?;
|
||||
|
||||
// sign message
|
||||
let signing_session = self.data.lock().cluster.new_signing_session(key_id.clone(), signature.clone(), message)?;
|
||||
let signing_session = self.data.lock().cluster.new_signing_session(key_id.clone(), signature.clone(), None, message)?;
|
||||
let message_signature = signing_session.wait()?;
|
||||
|
||||
// compose two message signature components into single one
|
||||
@ -396,4 +396,52 @@ pub mod tests {
|
||||
assert_eq!(math::verify_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decryption_session_is_delegated_when_node_does_not_have_key_share() {
|
||||
//::logger::init_log();
|
||||
let key_servers = make_key_servers(6110, 3);
|
||||
|
||||
// generate document key
|
||||
let threshold = 0;
|
||||
let document = Random.generate().unwrap().secret().clone();
|
||||
let secret = Random.generate().unwrap().secret().clone();
|
||||
let signature = ethkey::sign(&secret, &document).unwrap();
|
||||
let generated_key = key_servers[0].generate_document_key(&document, &signature, threshold).unwrap();
|
||||
let generated_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &generated_key).unwrap();
|
||||
|
||||
// remove key from node0
|
||||
key_servers[0].cluster().key_storage().remove(&document).unwrap();
|
||||
|
||||
// now let's try to retrieve key back by requesting it from node0, so that session must be delegated
|
||||
let retrieved_key = key_servers[0].restore_document_key(&document, &signature).unwrap();
|
||||
let retrieved_key = ethcrypto::ecies::decrypt(&secret, ðcrypto::DEFAULT_MAC, &retrieved_key).unwrap();
|
||||
assert_eq!(retrieved_key, generated_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn signing_session_is_delegated_when_node_does_not_have_key_share() {
|
||||
//::logger::init_log();
|
||||
let key_servers = make_key_servers(6114, 3);
|
||||
let threshold = 1;
|
||||
|
||||
// generate server key
|
||||
let server_key_id = Random.generate().unwrap().secret().clone();
|
||||
let requestor_secret = Random.generate().unwrap().secret().clone();
|
||||
let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap();
|
||||
let server_public = key_servers[0].generate_key(&server_key_id, &signature, threshold).unwrap();
|
||||
|
||||
// remove key from node0
|
||||
key_servers[0].cluster().key_storage().remove(&server_key_id).unwrap();
|
||||
|
||||
// sign message
|
||||
let message_hash = H256::from(42);
|
||||
let combined_signature = key_servers[0].sign_message(&server_key_id, &signature, message_hash.clone()).unwrap();
|
||||
let combined_signature = ethcrypto::ecies::decrypt(&requestor_secret, ðcrypto::DEFAULT_MAC, &combined_signature).unwrap();
|
||||
let signature_c = Secret::from_slice(&combined_signature[..32]);
|
||||
let signature_s = Secret::from_slice(&combined_signature[32..]);
|
||||
|
||||
// check signature
|
||||
assert_eq!(math::verify_signature(&server_public, &(signature_c, signature_s), &message_hash), Ok(true));
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,725 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::collections::{BTreeSet, BTreeMap};
|
||||
use bigint::hash::H256;
|
||||
use ethkey::Secret;
|
||||
use parking_lot::{Mutex, Condvar};
|
||||
use key_server_cluster::{Error, SessionId, NodeId, DocumentKeyShare};
|
||||
use key_server_cluster::cluster::Cluster;
|
||||
use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession};
|
||||
use key_server_cluster::decryption_session::SessionImpl as DecryptionSession;
|
||||
use key_server_cluster::signing_session::SessionImpl as SigningSession;
|
||||
use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions, KeyVersions};
|
||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||
|
||||
// TODO: optimizations: change sessions so that versions are sent by chunks.
|
||||
/// Number of versions sent in single message.
|
||||
const VERSIONS_PER_MESSAGE: usize = 32;
|
||||
|
||||
/// Key version negotiation session API.
|
||||
pub trait Session: Send + Sync + 'static {
|
||||
/// Set continue action.
|
||||
fn set_continue_action(&self, action: ContinueAction);
|
||||
/// Get continue action.
|
||||
fn continue_action(&self) -> Option<ContinueAction>;
|
||||
/// Wait until session is completed.
|
||||
fn wait(&self) -> Result<(H256, NodeId), Error>;
|
||||
}
|
||||
|
||||
/// Key version negotiation transport.
|
||||
pub trait SessionTransport {
|
||||
/// Send message to given node.
|
||||
fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Key version negotiation result computer.
|
||||
pub trait SessionResultComputer: Send + Sync {
|
||||
/// Compute result of session, if possible.
|
||||
fn compute_result(&self, threshold: Option<usize>, confirmations: &BTreeSet<NodeId>, versions: &BTreeMap<H256, BTreeSet<NodeId>>) -> Option<Result<(H256, NodeId), Error>>;
|
||||
}
|
||||
|
||||
/// Key discovery session API.
|
||||
pub struct SessionImpl<T: SessionTransport> {
|
||||
/// Session core.
|
||||
core: SessionCore<T>,
|
||||
/// Session data.
|
||||
data: Mutex<SessionData>,
|
||||
}
|
||||
|
||||
/// Action after key version is negotiated.
|
||||
#[derive(Clone)]
|
||||
pub enum ContinueAction {
|
||||
/// Decryption session + is_shadow_decryption.
|
||||
Decrypt(Arc<DecryptionSession>, bool),
|
||||
/// Signing session + message hash.
|
||||
Sign(Arc<SigningSession>, H256),
|
||||
}
|
||||
|
||||
/// Immutable session data.
|
||||
struct SessionCore<T: SessionTransport> {
|
||||
/// Session meta.
|
||||
pub meta: ShareChangeSessionMeta,
|
||||
/// Sub-session id.
|
||||
pub sub_session: Secret,
|
||||
/// Key share.
|
||||
pub key_share: Option<DocumentKeyShare>,
|
||||
/// Session result computer.
|
||||
pub result_computer: Arc<SessionResultComputer>,
|
||||
/// Session transport.
|
||||
pub transport: T,
|
||||
/// Session nonce.
|
||||
pub nonce: u64,
|
||||
/// SessionImpl completion condvar.
|
||||
pub completed: Condvar,
|
||||
}
|
||||
|
||||
/// Mutable session data.
|
||||
struct SessionData {
|
||||
/// Session state.
|
||||
pub state: SessionState,
|
||||
/// Initialization confirmations.
|
||||
pub confirmations: Option<BTreeSet<NodeId>>,
|
||||
/// Key threshold.
|
||||
pub threshold: Option<usize>,
|
||||
/// { Version => Nodes }
|
||||
pub versions: Option<BTreeMap<H256, BTreeSet<NodeId>>>,
|
||||
/// Session result.
|
||||
pub result: Option<Result<(H256, NodeId), Error>>,
|
||||
/// Continue action.
|
||||
pub continue_with: Option<ContinueAction>,
|
||||
}
|
||||
|
||||
/// SessionImpl creation parameters
|
||||
pub struct SessionParams<T: SessionTransport> {
|
||||
/// Session meta.
|
||||
pub meta: ShareChangeSessionMeta,
|
||||
/// Sub-session id.
|
||||
pub sub_session: Secret,
|
||||
/// Key share.
|
||||
pub key_share: Option<DocumentKeyShare>,
|
||||
/// Session result computer.
|
||||
pub result_computer: Arc<SessionResultComputer>,
|
||||
/// Session transport to communicate to other cluster nodes.
|
||||
pub transport: T,
|
||||
/// Session nonce.
|
||||
pub nonce: u64,
|
||||
}
|
||||
|
||||
/// Signing session state.
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum SessionState {
|
||||
/// Waiting for initialization.
|
||||
WaitingForInitialization,
|
||||
/// Waiting for responses.
|
||||
WaitingForResponses,
|
||||
/// Session is completed.
|
||||
Finished,
|
||||
}
|
||||
|
||||
/// Isolated session transport.
|
||||
pub struct IsolatedSessionTransport {
|
||||
/// Cluster.
|
||||
pub cluster: Arc<Cluster>,
|
||||
/// Key id.
|
||||
pub key_id: SessionId,
|
||||
/// Sub session id.
|
||||
pub sub_session: Secret,
|
||||
/// Session-level nonce.
|
||||
pub nonce: u64,
|
||||
}
|
||||
|
||||
/// Fastest session result computer. Computes first possible version that can be recovered on this node.
|
||||
/// If there's no such version, selects version with the most support.
|
||||
pub struct FastestResultComputer {
|
||||
/// This node id.
|
||||
self_node_id: NodeId,
|
||||
/// Threshold (if known).
|
||||
threshold: Option<usize>,
|
||||
}
|
||||
|
||||
/// Selects version with most support, waiting for responses from all nodes.
|
||||
pub struct LargestSupportResultComputer;
|
||||
|
||||
impl<T> SessionImpl<T> where T: SessionTransport {
|
||||
/// Create new session.
|
||||
pub fn new(params: SessionParams<T>) -> Self {
|
||||
SessionImpl {
|
||||
core: SessionCore {
|
||||
meta: params.meta,
|
||||
sub_session: params.sub_session,
|
||||
key_share: params.key_share,
|
||||
result_computer: params.result_computer,
|
||||
transport: params.transport,
|
||||
nonce: params.nonce,
|
||||
completed: Condvar::new(),
|
||||
},
|
||||
data: Mutex::new(SessionData {
|
||||
state: SessionState::WaitingForInitialization,
|
||||
confirmations: None,
|
||||
threshold: None,
|
||||
versions: None,
|
||||
result: None,
|
||||
continue_with: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Return session meta.
|
||||
pub fn meta(&self) -> &ShareChangeSessionMeta {
|
||||
&self.core.meta
|
||||
}
|
||||
|
||||
/// Return key threshold.
|
||||
pub fn key_threshold(&self) -> Result<usize, Error> {
|
||||
Ok(self.data.lock().threshold.clone().ok_or(Error::InvalidStateForRequest)?)
|
||||
}
|
||||
|
||||
/// Return result computer reference.
|
||||
pub fn version_holders(&self, version: &H256) -> Result<BTreeSet<NodeId>, Error> {
|
||||
Ok(self.data.lock().versions.as_ref().ok_or(Error::InvalidStateForRequest)?
|
||||
.get(version).ok_or(Error::KeyStorage("key version not found".into()))?
|
||||
.clone())
|
||||
}
|
||||
|
||||
/// Initialize session.
|
||||
pub fn initialize(&self, connected_nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
// check state
|
||||
let mut data = self.data.lock();
|
||||
if data.state != SessionState::WaitingForInitialization {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
// update state
|
||||
let mut confirmations = connected_nodes;
|
||||
let mut versions: BTreeMap<H256, BTreeSet<NodeId>> = BTreeMap::new();
|
||||
let received_own_confirmation = confirmations.remove(&self.core.meta.self_node_id);
|
||||
if received_own_confirmation {
|
||||
if let Some(key_share) = self.core.key_share.as_ref() {
|
||||
for version in &key_share.versions {
|
||||
versions.entry(version.hash.clone())
|
||||
.or_insert_with(Default::default)
|
||||
.insert(self.core.meta.self_node_id.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update state
|
||||
let no_confirmations_required = confirmations.is_empty();
|
||||
data.state = SessionState::WaitingForResponses;
|
||||
data.confirmations = Some(confirmations);
|
||||
data.versions = Some(versions);
|
||||
|
||||
// try to complete session
|
||||
Self::try_complete(&self.core, &mut *data);
|
||||
if no_confirmations_required && data.state != SessionState::Finished {
|
||||
return Err(Error::ConsensusUnreachable);
|
||||
} else if data.state == SessionState::Finished {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// send requests
|
||||
let confirmations = data.confirmations.as_ref().expect("dilled couple of lines above; qed");
|
||||
for connected_node in confirmations {
|
||||
self.core.transport.send(connected_node, KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
||||
session: self.core.meta.id.clone().into(),
|
||||
sub_session: self.core.sub_session.clone().into(),
|
||||
session_nonce: self.core.nonce,
|
||||
}))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process single message.
|
||||
pub fn process_message(&self, sender: &NodeId, message: &KeyVersionNegotiationMessage) -> Result<(), Error> {
|
||||
if self.core.nonce != message.session_nonce() {
|
||||
return Err(Error::ReplayProtection);
|
||||
}
|
||||
|
||||
match message {
|
||||
&KeyVersionNegotiationMessage::RequestKeyVersions(ref message) =>
|
||||
self.on_key_versions_request(sender, message),
|
||||
&KeyVersionNegotiationMessage::KeyVersions(ref message) =>
|
||||
self.on_key_versions(sender, message),
|
||||
&KeyVersionNegotiationMessage::KeyVersionsError(ref message) => {
|
||||
self.on_session_error(sender, Error::Io(message.error.clone()));
|
||||
Ok(())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Process key versions request.
|
||||
pub fn on_key_versions_request(&self, sender: &NodeId, _message: &RequestKeyVersions) -> Result<(), Error> {
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
// check message
|
||||
if *sender != self.core.meta.master_node_id {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
// check state
|
||||
let mut data = self.data.lock();
|
||||
if data.state != SessionState::WaitingForInitialization {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
// send response
|
||||
self.core.transport.send(sender, KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||
session: self.core.meta.id.clone().into(),
|
||||
sub_session: self.core.sub_session.clone().into(),
|
||||
session_nonce: self.core.nonce,
|
||||
threshold: self.core.key_share.as_ref().map(|key_share| key_share.threshold),
|
||||
versions: self.core.key_share.as_ref().map(|key_share|
|
||||
key_share.versions.iter().rev()
|
||||
.filter(|v| v.id_numbers.contains_key(sender))
|
||||
.chain(key_share.versions.iter().rev().filter(|v| !v.id_numbers.contains_key(sender)))
|
||||
.map(|v| v.hash.clone().into())
|
||||
.take(VERSIONS_PER_MESSAGE)
|
||||
.collect())
|
||||
.unwrap_or_else(|| Default::default())
|
||||
}))?;
|
||||
|
||||
// update state
|
||||
data.state = SessionState::Finished;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process key versions response.
|
||||
pub fn on_key_versions(&self, sender: &NodeId, message: &KeyVersions) -> Result<(), Error> {
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
// check state
|
||||
let mut data = self.data.lock();
|
||||
if data.state != SessionState::WaitingForResponses && data.state != SessionState::Finished {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
let reason = "this field is filled on master node when initializing; this is initialized master node; qed";
|
||||
if !data.confirmations.as_mut().expect(reason).remove(sender) {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
// remember versions that sender have
|
||||
{
|
||||
match message.threshold.clone() {
|
||||
Some(threshold) if data.threshold.is_none() => {
|
||||
data.threshold = Some(threshold);
|
||||
},
|
||||
Some(threshold) if data.threshold.as_ref() == Some(&threshold) => (),
|
||||
Some(_) => return Err(Error::InvalidMessage),
|
||||
None if message.versions.is_empty() => (),
|
||||
None => return Err(Error::InvalidMessage),
|
||||
}
|
||||
|
||||
let versions = data.versions.as_mut().expect(reason);
|
||||
for version in &message.versions {
|
||||
versions.entry(version.clone().into())
|
||||
.or_insert_with(Default::default)
|
||||
.insert(sender.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// try to compute result
|
||||
if data.state != SessionState::Finished {
|
||||
Self::try_complete(&self.core, &mut *data);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Try to complete result && finish session.
|
||||
fn try_complete(core: &SessionCore<T>, data: &mut SessionData) {
|
||||
let reason = "this field is filled on master node when initializing; try_complete is only called on initialized master node; qed";
|
||||
let confirmations = data.confirmations.as_ref().expect(reason);
|
||||
let versions = data.versions.as_ref().expect(reason);
|
||||
if let Some(result) = core.result_computer.compute_result(data.threshold.clone(), confirmations, versions) {
|
||||
data.state = SessionState::Finished;
|
||||
data.result = Some(result);
|
||||
core.completed.notify_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Session for SessionImpl<T> where T: SessionTransport + Send + Sync + 'static {
|
||||
fn set_continue_action(&self, action: ContinueAction) {
|
||||
self.data.lock().continue_with = Some(action);
|
||||
}
|
||||
|
||||
fn continue_action(&self) -> Option<ContinueAction> {
|
||||
self.data.lock().continue_with.clone()
|
||||
}
|
||||
|
||||
fn wait(&self) -> Result<(H256, NodeId), Error> {
|
||||
let mut data = self.data.lock();
|
||||
if !data.result.is_some() {
|
||||
self.core.completed.wait(&mut data);
|
||||
}
|
||||
|
||||
data.result.as_ref()
|
||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
||||
.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
||||
type Id = SessionIdWithSubSession;
|
||||
|
||||
fn type_name() -> &'static str {
|
||||
"version negotiation"
|
||||
}
|
||||
|
||||
fn id(&self) -> SessionIdWithSubSession {
|
||||
SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.sub_session.clone())
|
||||
}
|
||||
|
||||
fn is_finished(&self) -> bool {
|
||||
self.data.lock().state == SessionState::Finished
|
||||
}
|
||||
|
||||
fn on_session_timeout(&self) {
|
||||
let mut data = self.data.lock();
|
||||
|
||||
if data.confirmations.is_some() {
|
||||
data.confirmations.as_mut().expect("checked a line above; qed").clear();
|
||||
Self::try_complete(&self.core, &mut *data);
|
||||
if data.state != SessionState::Finished {
|
||||
warn!("{}: key version negotiation session failed with timeout", self.core.meta.self_node_id);
|
||||
|
||||
data.result = Some(Err(Error::ConsensusUnreachable));
|
||||
self.core.completed.notify_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn on_node_timeout(&self, node: &NodeId) {
|
||||
self.on_session_error(node, Error::NodeDisconnected)
|
||||
}
|
||||
|
||||
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||
let mut data = self.data.lock();
|
||||
|
||||
if data.confirmations.is_some() {
|
||||
let is_waiting_for_confirmation = data.confirmations.as_mut().expect("checked a line above; qed").remove(node);
|
||||
if is_waiting_for_confirmation {
|
||||
Self::try_complete(&self.core, &mut *data);
|
||||
if data.state != SessionState::Finished {
|
||||
warn!("{}: key version negotiation session failed because {} connection has timeouted", self.core.meta.self_node_id, node);
|
||||
|
||||
data.state = SessionState::Finished;
|
||||
data.result = Some(Err(error));
|
||||
self.core.completed.notify_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||
match *message {
|
||||
Message::KeyVersionNegotiation(ref message) => self.process_message(sender, message),
|
||||
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionTransport for IsolatedSessionTransport {
|
||||
fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> {
|
||||
self.cluster.send(node, Message::KeyVersionNegotiation(message))
|
||||
}
|
||||
}
|
||||
|
||||
impl FastestResultComputer {
|
||||
pub fn new(self_node_id: NodeId, key_share: Option<&DocumentKeyShare>) -> Self {
|
||||
let threshold = key_share.map(|ks| ks.threshold);
|
||||
FastestResultComputer {
|
||||
self_node_id: self_node_id,
|
||||
threshold: threshold,
|
||||
}
|
||||
}}
|
||||
|
||||
impl SessionResultComputer for FastestResultComputer {
|
||||
fn compute_result(&self, threshold: Option<usize>, confirmations: &BTreeSet<NodeId>, versions: &BTreeMap<H256, BTreeSet<NodeId>>) -> Option<Result<(H256, NodeId), Error>> {
|
||||
match self.threshold.or(threshold) {
|
||||
// if we have key share on this node
|
||||
Some(threshold) => {
|
||||
// select version this node have, with enough participants
|
||||
let has_key_share = self.threshold.is_some();
|
||||
let version = versions.iter().find(|&(_, ref n)| !has_key_share || n.contains(&self.self_node_id) && n.len() >= threshold + 1);
|
||||
// if there's no such version, wait for more confirmations
|
||||
match version {
|
||||
Some((version, nodes)) => Some(Ok((version.clone(), if has_key_share { self.self_node_id.clone() } else { nodes.iter().cloned().nth(0)
|
||||
.expect("version is only inserted when there's at least one owner; qed") }))),
|
||||
None if !confirmations.is_empty() => None,
|
||||
// otherwise - try to find any version
|
||||
None => Some(versions.iter()
|
||||
.find(|&(_, ref n)| n.len() >= threshold + 1)
|
||||
.map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0)
|
||||
.expect("version is only inserted when there's at least one owner; qed"))))
|
||||
.unwrap_or(Err(Error::ConsensusUnreachable))),
|
||||
}
|
||||
},
|
||||
// if we do not have share, then wait for all confirmations
|
||||
None if !confirmations.is_empty() => None,
|
||||
// ...and select version with largest support
|
||||
None => Some(versions.iter()
|
||||
.max_by_key(|&(_, ref n)| n.len())
|
||||
.map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0)
|
||||
.expect("version is only inserted when there's at least one owner; qed"))))
|
||||
.unwrap_or(Err(Error::ConsensusUnreachable))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionResultComputer for LargestSupportResultComputer {
|
||||
fn compute_result(&self, _threshold: Option<usize>, confirmations: &BTreeSet<NodeId>, versions: &BTreeMap<H256, BTreeSet<NodeId>>) -> Option<Result<(H256, NodeId), Error>> {
|
||||
if !confirmations.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
versions.iter()
|
||||
.max_by_key(|&(_, ref n)| n.len())
|
||||
.map(|(version, nodes)| Ok((version.clone(), nodes.iter().cloned().nth(0)
|
||||
.expect("version is only inserted when there's at least one owner; qed"))))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::collections::{VecDeque, BTreeMap, BTreeSet};
|
||||
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion};
|
||||
use key_server_cluster::math;
|
||||
use key_server_cluster::cluster::Cluster;
|
||||
use key_server_cluster::cluster::tests::DummyCluster;
|
||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||
use key_server_cluster::message::{Message, KeyVersionNegotiationMessage, RequestKeyVersions, KeyVersions};
|
||||
use super::{SessionImpl, SessionTransport, SessionParams, FastestResultComputer, SessionState};
|
||||
|
||||
struct DummyTransport {
|
||||
cluster: Arc<DummyCluster>,
|
||||
}
|
||||
|
||||
impl SessionTransport for DummyTransport {
|
||||
fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> {
|
||||
self.cluster.send(node, Message::KeyVersionNegotiation(message))
|
||||
}
|
||||
}
|
||||
|
||||
struct Node {
|
||||
pub cluster: Arc<DummyCluster>,
|
||||
pub key_storage: Arc<DummyKeyStorage>,
|
||||
pub session: SessionImpl<DummyTransport>,
|
||||
}
|
||||
|
||||
struct MessageLoop {
|
||||
pub session_id: SessionId,
|
||||
pub nodes: BTreeMap<NodeId, Node>,
|
||||
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
||||
}
|
||||
|
||||
impl MessageLoop {
|
||||
pub fn prepare_nodes(nodes_num: usize) -> BTreeMap<NodeId, Arc<DummyKeyStorage>> {
|
||||
(0..nodes_num).map(|_| (math::generate_random_point().unwrap(),
|
||||
Arc::new(DummyKeyStorage::default()))).collect()
|
||||
}
|
||||
|
||||
pub fn empty(nodes_num: usize) -> Self {
|
||||
Self::new(Self::prepare_nodes(nodes_num))
|
||||
}
|
||||
|
||||
pub fn new(nodes: BTreeMap<NodeId, Arc<DummyKeyStorage>>) -> Self {
|
||||
let master_node_id = nodes.keys().cloned().nth(0).unwrap();
|
||||
let sub_sesion = math::generate_random_scalar().unwrap();
|
||||
let all_nodes_ids: BTreeSet<_> = nodes.keys().cloned().collect();
|
||||
MessageLoop {
|
||||
session_id: Default::default(),
|
||||
nodes: nodes.iter().map(|(node_id, key_storage)| {
|
||||
let cluster = Arc::new(DummyCluster::new(node_id.clone()));
|
||||
cluster.add_nodes(all_nodes_ids.iter().cloned());
|
||||
(node_id.clone(), Node {
|
||||
cluster: cluster.clone(),
|
||||
key_storage: key_storage.clone(),
|
||||
session: SessionImpl::new(SessionParams {
|
||||
meta: ShareChangeSessionMeta {
|
||||
id: Default::default(),
|
||||
self_node_id: node_id.clone(),
|
||||
master_node_id: master_node_id.clone(),
|
||||
},
|
||||
sub_session: sub_sesion.clone(),
|
||||
key_share: key_storage.get(&Default::default()).unwrap(),
|
||||
result_computer: Arc::new(FastestResultComputer::new(
|
||||
node_id.clone(),
|
||||
key_storage.get(&Default::default()).unwrap().as_ref(),
|
||||
)),
|
||||
transport: DummyTransport {
|
||||
cluster: cluster,
|
||||
},
|
||||
nonce: 0,
|
||||
}),
|
||||
})
|
||||
}).collect(),
|
||||
queue: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn node_id(&self, idx: usize) -> &NodeId {
|
||||
self.nodes.keys().nth(idx).unwrap()
|
||||
}
|
||||
|
||||
pub fn session(&self, idx: usize) -> &SessionImpl<DummyTransport> {
|
||||
&self.nodes.values().nth(idx).unwrap().session
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn negotiation_fails_if_initialized_twice() {
|
||||
let ml = MessageLoop::empty(1);
|
||||
assert_eq!(ml.session(0).initialize(BTreeSet::new()), Ok(()));
|
||||
assert_eq!(ml.session(0).initialize(BTreeSet::new()), Err(Error::InvalidStateForRequest));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn negotiation_fails_if_message_contains_wrong_nonce() {
|
||||
let ml = MessageLoop::empty(2);
|
||||
assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
||||
session: Default::default(),
|
||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||
session_nonce: 100,
|
||||
})), Err(Error::ReplayProtection));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn negotiation_fails_if_versions_request_received_from_non_master() {
|
||||
let ml = MessageLoop::empty(3);
|
||||
assert_eq!(ml.session(2).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
||||
session: Default::default(),
|
||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||
session_nonce: 0,
|
||||
})), Err(Error::InvalidMessage));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn negotiation_fails_if_versions_request_received_twice() {
|
||||
let ml = MessageLoop::empty(2);
|
||||
assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
||||
session: Default::default(),
|
||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||
session_nonce: 0,
|
||||
})), Ok(()));
|
||||
assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions {
|
||||
session: Default::default(),
|
||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||
session_nonce: 0,
|
||||
})), Err(Error::InvalidStateForRequest));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn negotiation_fails_if_versions_received_before_initialization() {
|
||||
let ml = MessageLoop::empty(2);
|
||||
assert_eq!(ml.session(1).process_message(ml.node_id(0), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||
session: Default::default(),
|
||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||
session_nonce: 0,
|
||||
threshold: Some(10),
|
||||
versions: Vec::new(),
|
||||
})), Err(Error::InvalidStateForRequest));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn negotiation_does_not_fails_if_versions_received_after_completion() {
|
||||
let ml = MessageLoop::empty(3);
|
||||
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
||||
assert_eq!(ml.session(0).data.lock().state, SessionState::WaitingForResponses);
|
||||
|
||||
let version_id = (*math::generate_random_scalar().unwrap()).clone();
|
||||
assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||
session: Default::default(),
|
||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||
session_nonce: 0,
|
||||
threshold: Some(0),
|
||||
versions: vec![version_id.clone().into()]
|
||||
})), Ok(()));
|
||||
assert_eq!(ml.session(0).data.lock().state, SessionState::Finished);
|
||||
|
||||
assert_eq!(ml.session(0).process_message(ml.node_id(2), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||
session: Default::default(),
|
||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||
session_nonce: 0,
|
||||
threshold: Some(0),
|
||||
versions: vec![version_id.clone().into()]
|
||||
})), Ok(()));
|
||||
assert_eq!(ml.session(0).data.lock().state, SessionState::Finished);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn negotiation_fails_if_wrong_threshold_sent() {
|
||||
let ml = MessageLoop::empty(3);
|
||||
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
||||
|
||||
let version_id = (*math::generate_random_scalar().unwrap()).clone();
|
||||
assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||
session: Default::default(),
|
||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||
session_nonce: 0,
|
||||
threshold: Some(1),
|
||||
versions: vec![version_id.clone().into()]
|
||||
})), Ok(()));
|
||||
assert_eq!(ml.session(0).process_message(ml.node_id(2), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||
session: Default::default(),
|
||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||
session_nonce: 0,
|
||||
threshold: Some(2),
|
||||
versions: vec![version_id.clone().into()]
|
||||
})), Err(Error::InvalidMessage));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn negotiation_fails_if_threshold_empty_when_versions_are_not_empty() {
|
||||
let ml = MessageLoop::empty(2);
|
||||
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
||||
|
||||
let version_id = (*math::generate_random_scalar().unwrap()).clone();
|
||||
assert_eq!(ml.session(0).process_message(ml.node_id(1), &KeyVersionNegotiationMessage::KeyVersions(KeyVersions {
|
||||
session: Default::default(),
|
||||
sub_session: math::generate_random_scalar().unwrap().into(),
|
||||
session_nonce: 0,
|
||||
threshold: None,
|
||||
versions: vec![version_id.clone().into()]
|
||||
})), Err(Error::InvalidMessage));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fast_negotiation_does_not_completes_instantly_when_enough_share_owners_are_connected() {
|
||||
let nodes = MessageLoop::prepare_nodes(2);
|
||||
let version_id = (*math::generate_random_scalar().unwrap()).clone();
|
||||
nodes.values().nth(0).unwrap().insert(Default::default(), DocumentKeyShare {
|
||||
author: Default::default(),
|
||||
threshold: 1,
|
||||
common_point: None,
|
||||
encrypted_point: None,
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
hash: version_id,
|
||||
id_numbers: vec![(nodes.keys().cloned().nth(0).unwrap(), math::generate_random_scalar().unwrap())].into_iter().collect(),
|
||||
secret_share: math::generate_random_scalar().unwrap(),
|
||||
}],
|
||||
}).unwrap();
|
||||
let ml = MessageLoop::new(nodes);
|
||||
ml.session(0).initialize(ml.nodes.keys().cloned().collect()).unwrap();
|
||||
// we can't be sure that node has given key version because previous ShareAdd session could fail
|
||||
assert!(ml.session(0).data.lock().state != SessionState::Finished);
|
||||
}
|
||||
}
|
@ -14,11 +14,10 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
pub mod key_version_negotiation_session;
|
||||
pub mod servers_set_change_session;
|
||||
pub mod share_add_session;
|
||||
pub mod share_change_session;
|
||||
pub mod share_move_session;
|
||||
pub mod share_remove_session;
|
||||
|
||||
mod sessions_queue;
|
||||
|
||||
|
@ -20,22 +20,25 @@ use std::collections::btree_map::Entry;
|
||||
use parking_lot::{Mutex, Condvar};
|
||||
use ethkey::{Public, Signature};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage};
|
||||
use key_server_cluster::math;
|
||||
use key_server_cluster::cluster::Cluster;
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::message::{Message, ServersSetChangeMessage,
|
||||
ConsensusMessageWithServersSet, InitializeConsensusSessionWithServersSet,
|
||||
ServersSetChangeConsensusMessage, ConfirmConsensusInitialization, UnknownSessionsRequest, UnknownSessions,
|
||||
ServersSetChangeShareAddMessage, ServersSetChangeError, ServersSetChangeCompleted,
|
||||
ServersSetChangeShareMoveMessage, ServersSetChangeShareRemoveMessage,
|
||||
ServersSetChangeDelegate, ServersSetChangeDelegateResponse, InitializeShareChangeSession,
|
||||
ConfirmShareChangeSessionInitialization};
|
||||
ConfirmShareChangeSessionInitialization, KeyVersionNegotiationMessage, ShareChangeKeyVersionNegotiation};
|
||||
use key_server_cluster::share_change_session::{ShareChangeSession, ShareChangeSessionParams, ShareChangeSessionPlan,
|
||||
prepare_share_change_session_plan};
|
||||
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl,
|
||||
SessionParams as KeyVersionNegotiationSessionParams, LargestSupportResultComputer,
|
||||
SessionTransport as KeyVersionNegotiationTransport, Session as KeyVersionNegotiationSession};
|
||||
use key_server_cluster::jobs::job_session::JobTransport;
|
||||
use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest};
|
||||
use key_server_cluster::jobs::unknown_sessions_job::{UnknownSessionsJob};
|
||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||
use key_server_cluster::admin_sessions::sessions_queue::{SessionsQueue, QueuedSession};
|
||||
use key_server_cluster::admin_sessions::sessions_queue::SessionsQueue;
|
||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||
|
||||
/// Maximal number of active share change sessions.
|
||||
@ -110,6 +113,8 @@ struct SessionData {
|
||||
pub new_nodes_set: Option<BTreeSet<NodeId>>,
|
||||
/// Share change sessions queue (valid on master nodes only).
|
||||
pub sessions_queue: Option<SessionsQueue>,
|
||||
/// Share change sessions key version negotiation.
|
||||
pub negotiation_sessions: BTreeMap<SessionId, KeyVersionNegotiationSessionImpl<ServersSetChangeKeyVersionNegotiationTransport>>,
|
||||
/// Share change sessions initialization state (valid on master nodes only).
|
||||
pub sessions_initialization_state: BTreeMap<SessionId, SessionInitializationData>,
|
||||
/// Sessions delegated to other nodes (valid on master node only).
|
||||
@ -164,6 +169,16 @@ struct UnknownSessionsJobTransport {
|
||||
cluster: Arc<Cluster>,
|
||||
}
|
||||
|
||||
/// Key version negotiation transport.
|
||||
struct ServersSetChangeKeyVersionNegotiationTransport {
|
||||
/// Session id.
|
||||
id: SessionId,
|
||||
/// Session-level nonce.
|
||||
nonce: u64,
|
||||
/// Cluster.
|
||||
cluster: Arc<Cluster>,
|
||||
}
|
||||
|
||||
impl SessionImpl {
|
||||
/// Create new servers set change session.
|
||||
pub fn new(params: SessionParams) -> Result<Self, Error> {
|
||||
@ -182,6 +197,7 @@ impl SessionImpl {
|
||||
consensus_session: None,
|
||||
new_nodes_set: None,
|
||||
sessions_queue: None,
|
||||
negotiation_sessions: BTreeMap::new(),
|
||||
sessions_initialization_state: BTreeMap::new(),
|
||||
delegated_key_sessions: BTreeMap::new(),
|
||||
active_key_sessions: BTreeMap::new(),
|
||||
@ -207,7 +223,6 @@ impl SessionImpl {
|
||||
let mut consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
||||
meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len())?,
|
||||
consensus_executor: ServersSetChangeAccessJob::new_on_master(self.core.admin_public.clone(),
|
||||
self.core.all_nodes_set.clone(),
|
||||
self.core.all_nodes_set.clone(),
|
||||
new_nodes_set.clone(),
|
||||
all_set_signature,
|
||||
@ -240,6 +255,8 @@ impl SessionImpl {
|
||||
self.on_unknown_sessions_requested(sender, message),
|
||||
&ServersSetChangeMessage::UnknownSessions(ref message) =>
|
||||
self.on_unknown_sessions(sender, message),
|
||||
&ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref message) =>
|
||||
self.on_key_version_negotiation(sender, message),
|
||||
&ServersSetChangeMessage::InitializeShareChangeSession(ref message) =>
|
||||
self.on_initialize_share_change_session(sender, message),
|
||||
&ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref message) =>
|
||||
@ -250,12 +267,10 @@ impl SessionImpl {
|
||||
self.on_delegated_session_completed(sender, message),
|
||||
&ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref message) =>
|
||||
self.on_share_add_message(sender, message),
|
||||
&ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ref message) =>
|
||||
self.on_share_move_message(sender, message),
|
||||
&ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ref message) =>
|
||||
self.on_share_remove_message(sender, message),
|
||||
&ServersSetChangeMessage::ServersSetChangeError(ref message) =>
|
||||
self.on_session_error(sender, message),
|
||||
&ServersSetChangeMessage::ServersSetChangeError(ref message) => {
|
||||
self.on_session_error(sender, Error::Io(message.error.clone()));
|
||||
Ok(())
|
||||
},
|
||||
&ServersSetChangeMessage::ServersSetChangeCompleted(ref message) =>
|
||||
self.on_session_completed(sender, message),
|
||||
}
|
||||
@ -278,9 +293,7 @@ impl SessionImpl {
|
||||
&ConsensusMessageWithServersSet::InitializeConsensusSession(_) => {
|
||||
data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams {
|
||||
meta: self.core.meta.clone().into_consensus_meta(self.core.all_nodes_set.len())?,
|
||||
consensus_executor: ServersSetChangeAccessJob::new_on_slave(self.core.admin_public.clone(),
|
||||
self.core.all_nodes_set.clone(),
|
||||
),
|
||||
consensus_executor: ServersSetChangeAccessJob::new_on_slave(self.core.admin_public.clone()),
|
||||
consensus_transport: ServersSetChangeConsensusTransport {
|
||||
id: self.core.meta.id.clone(),
|
||||
nonce: self.core.nonce,
|
||||
@ -367,12 +380,69 @@ impl SessionImpl {
|
||||
|
||||
// initialize sessions queue
|
||||
data.state = SessionState::RunningShareChangeSessions;
|
||||
data.sessions_queue = Some(SessionsQueue::new(self.core.key_storage.clone(), unknown_sessions));
|
||||
data.sessions_queue = Some(SessionsQueue::new(&self.core.key_storage, unknown_sessions.keys().cloned().collect()));
|
||||
|
||||
// and disseminate session initialization requests
|
||||
Self::disseminate_session_initialization_requests(&self.core, &mut *data)
|
||||
}
|
||||
|
||||
/// When key version negotiation message is received.
|
||||
pub fn on_key_version_negotiation(&self, sender: &NodeId, message: &ShareChangeKeyVersionNegotiation) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
// check state
|
||||
let mut data = self.data.lock();
|
||||
if data.state != SessionState::RunningShareChangeSessions {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
// process message
|
||||
match &message.message {
|
||||
&KeyVersionNegotiationMessage::RequestKeyVersions(ref message) if sender == &self.core.meta.master_node_id => {
|
||||
let key_id = message.session.clone().into();
|
||||
let key_share = self.core.key_storage.get(&key_id).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||
let negotiation_session = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams {
|
||||
meta: ShareChangeSessionMeta {
|
||||
id: key_id.clone(),
|
||||
self_node_id: self.core.meta.self_node_id.clone(),
|
||||
master_node_id: sender.clone(),
|
||||
},
|
||||
sub_session: message.sub_session.clone().into(),
|
||||
key_share: key_share,
|
||||
result_computer: Arc::new(LargestSupportResultComputer {}),
|
||||
transport: ServersSetChangeKeyVersionNegotiationTransport {
|
||||
id: key_id,
|
||||
nonce: self.core.nonce,
|
||||
cluster: self.core.cluster.clone(),
|
||||
},
|
||||
nonce: message.session_nonce,
|
||||
});
|
||||
negotiation_session.on_key_versions_request(sender, message)?;
|
||||
debug_assert!(negotiation_session.is_finished());
|
||||
Ok(())
|
||||
},
|
||||
&KeyVersionNegotiationMessage::KeyVersions(ref message) if self.core.meta.self_node_id == self.core.meta.master_node_id => {
|
||||
let key_id = message.session.clone().into();
|
||||
{
|
||||
let negotiation_session = data.negotiation_sessions.get(&key_id).ok_or(Error::InvalidMessage)?;
|
||||
negotiation_session.on_key_versions(sender, message)?;
|
||||
if !negotiation_session.is_finished() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// else prepare plan && start share change session
|
||||
if !Self::initialize_share_change_session(&self.core, &mut *data, key_id)? {
|
||||
Self::disseminate_session_initialization_requests(&self.core, &mut *data)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
},
|
||||
_ => Err(Error::InvalidMessage),
|
||||
}
|
||||
}
|
||||
|
||||
/// When share change session initialization is requested.
|
||||
pub fn on_initialize_share_change_session(&self, sender: &NodeId, message: &InitializeShareChangeSession) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
@ -395,10 +465,9 @@ impl SessionImpl {
|
||||
true => return Err(Error::InvalidMessage),
|
||||
false => {
|
||||
let master_plan = ShareChangeSessionPlan {
|
||||
isolated_nodes: message.isolated_nodes.iter().cloned().map(Into::into).collect(),
|
||||
nodes_to_add: message.shares_to_add.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(),
|
||||
nodes_to_move: message.shares_to_move.iter().map(|(k, v)| (k.clone().into(), v.clone().into())).collect(),
|
||||
nodes_to_remove: message.shares_to_remove.iter().cloned().map(Into::into).collect(),
|
||||
key_version: message.version.clone().into(),
|
||||
consensus_group: message.consensus_group.iter().cloned().map(Into::into).collect(),
|
||||
new_nodes_map: message.new_nodes_map.iter().map(|(k, v)| (k.clone().into(), v.clone().map(Into::into))).collect(),
|
||||
};
|
||||
|
||||
// if master plan is empty, it is cheating
|
||||
@ -406,24 +475,29 @@ impl SessionImpl {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
// on nodes, which have their own key share, we could check if master node plan is correct
|
||||
if let Ok(key_share) = self.core.key_storage.get(&key_id) {
|
||||
// on nodes, holding selected key share version, we could check if master node plan is correct
|
||||
let master_node_id = message.master_node_id.clone().into();
|
||||
if let Some(key_share) = self.core.key_storage.get(&key_id).map_err(|e| Error::KeyStorage(e.into()))? {
|
||||
let version = message.version.clone().into();
|
||||
if let Ok(key_version) = key_share.version(&version) {
|
||||
let key_share_owners = key_version.id_numbers.keys().cloned().collect();
|
||||
let new_nodes_set = data.new_nodes_set.as_ref()
|
||||
.expect("new_nodes_set is filled during consensus establishing; change sessions are running after this; qed");
|
||||
let local_plan = prepare_share_change_session_plan(&self.core.all_nodes_set, &key_share.id_numbers.keys().cloned().collect(), new_nodes_set)?;
|
||||
if local_plan.isolated_nodes != master_plan.isolated_nodes
|
||||
|| local_plan.nodes_to_add.keys().any(|n| !local_plan.nodes_to_add.contains_key(n))
|
||||
|| local_plan.nodes_to_add.keys().any(|n| !master_plan.nodes_to_add.contains_key(n))
|
||||
|| local_plan.nodes_to_move != master_plan.nodes_to_move
|
||||
|| local_plan.nodes_to_remove != master_plan.nodes_to_remove {
|
||||
let local_plan = prepare_share_change_session_plan(
|
||||
&self.core.all_nodes_set,
|
||||
key_share.threshold,
|
||||
version,
|
||||
&master_node_id,
|
||||
&key_share_owners,
|
||||
new_nodes_set)?;
|
||||
|
||||
if local_plan.new_nodes_map.keys().collect::<BTreeSet<_>>() != master_plan.new_nodes_map.keys().collect::<BTreeSet<_>>() {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let session = Self::create_share_change_session(&self.core, key_id,
|
||||
message.master_node_id.clone().into(),
|
||||
message.old_shares_set.iter().cloned().map(Into::into).collect(),
|
||||
master_plan)?;
|
||||
let session = Self::create_share_change_session(&self.core, key_id, master_node_id, master_plan)?;
|
||||
if !session.is_finished() {
|
||||
data.active_key_sessions.insert(key_id.clone(), session);
|
||||
}
|
||||
@ -551,31 +625,6 @@ impl SessionImpl {
|
||||
session.on_share_add_message(sender, &message.message))
|
||||
}
|
||||
|
||||
/// When share move message is received.
|
||||
pub fn on_share_move_message(&self, sender: &NodeId, message: &ServersSetChangeShareMoveMessage) -> Result<(), Error> {
|
||||
self.on_share_change_message(message.message.session_id().clone().into(), |session|
|
||||
session.on_share_move_message(sender, &message.message))
|
||||
}
|
||||
|
||||
/// When share remove message is received.
|
||||
pub fn on_share_remove_message(&self, sender: &NodeId, message: &ServersSetChangeShareRemoveMessage) -> Result<(), Error> {
|
||||
self.on_share_change_message(message.message.session_id().clone().into(), |session|
|
||||
session.on_share_remove_message(sender, &message.message))
|
||||
}
|
||||
|
||||
/// When error has occured on another node.
|
||||
pub fn on_session_error(&self, sender: &NodeId, message: &ServersSetChangeError) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
|
||||
warn!("{}: servers set change session failed with error: {} from {}", self.core.meta.self_node_id, message.error, sender);
|
||||
|
||||
data.state = SessionState::Finished;
|
||||
data.result = Some(Err(Error::Io(message.error.clone())));
|
||||
self.core.completed.notify_all();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When session completion message is received.
|
||||
pub fn on_session_completed(&self, sender: &NodeId, message: &ServersSetChangeCompleted) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
@ -591,6 +640,13 @@ impl SessionImpl {
|
||||
return Err(Error::TooEarlyForRequest);
|
||||
}
|
||||
|
||||
// if we are on the set of nodes that are being removed from the cluster, let's clear database
|
||||
if !data.new_nodes_set.as_ref()
|
||||
.expect("new_nodes_set is filled during initialization; session is completed after initialization; qed")
|
||||
.contains(&self.core.meta.self_node_id) {
|
||||
self.core.key_storage.clear().map_err(|e| Error::KeyStorage(e.into()))?;
|
||||
}
|
||||
|
||||
data.state = SessionState::Finished;
|
||||
self.core.completed.notify_all();
|
||||
|
||||
@ -629,7 +685,7 @@ impl SessionImpl {
|
||||
}
|
||||
|
||||
/// Create share change session.
|
||||
fn create_share_change_session(core: &SessionCore, key_id: SessionId, master_node_id: NodeId, old_nodes_set: BTreeSet<NodeId>, session_plan: ShareChangeSessionPlan) -> Result<ShareChangeSession, Error> {
|
||||
fn create_share_change_session(core: &SessionCore, key_id: SessionId, master_node_id: NodeId, session_plan: ShareChangeSessionPlan) -> Result<ShareChangeSession, Error> {
|
||||
ShareChangeSession::new(ShareChangeSessionParams {
|
||||
session_id: key_id.clone(),
|
||||
nonce: core.nonce,
|
||||
@ -640,8 +696,6 @@ impl SessionImpl {
|
||||
},
|
||||
cluster: core.cluster.clone(),
|
||||
key_storage: core.key_storage.clone(),
|
||||
old_nodes_set: old_nodes_set,
|
||||
cluster_nodes_set: core.all_nodes_set.clone(),
|
||||
plan: session_plan,
|
||||
})
|
||||
}
|
||||
@ -649,77 +703,43 @@ impl SessionImpl {
|
||||
/// Disseminate session initialization requests.
|
||||
fn disseminate_session_initialization_requests(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> {
|
||||
debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id);
|
||||
if let Some(sessions_queue) = data.sessions_queue.as_mut() {
|
||||
let mut number_of_sessions_to_start = MAX_ACTIVE_KEY_SESSIONS.saturating_sub(data.active_key_sessions.len() + data.delegated_key_sessions.len());
|
||||
let new_nodes_set = data.new_nodes_set.as_ref()
|
||||
.expect("this method is called after consensus estabished; new_nodes_set is a result of consensus session; qed");
|
||||
if data.sessions_queue.is_some() {
|
||||
let number_of_sessions_active = data.active_key_sessions.len()
|
||||
+ data.delegated_key_sessions.len()
|
||||
+ data.negotiation_sessions.len();
|
||||
let mut number_of_sessions_to_start = MAX_ACTIVE_KEY_SESSIONS.saturating_sub(number_of_sessions_active);
|
||||
while number_of_sessions_to_start > 0 {
|
||||
let queued_session = match sessions_queue.next() {
|
||||
let key_id = match data.sessions_queue.as_mut().expect("checked before beginning of the loop; qed").next() {
|
||||
None => break, // complete session
|
||||
Some(Err(e)) => return Err(e),
|
||||
Some(Ok(session)) => session,
|
||||
Some(Ok(key_id)) => key_id,
|
||||
};
|
||||
|
||||
// prepare session change plan && check if something needs to be changed
|
||||
let old_nodes_set = queued_session.nodes();
|
||||
let session_plan = prepare_share_change_session_plan(&core.all_nodes_set, &old_nodes_set, new_nodes_set)?;
|
||||
if session_plan.is_empty() {
|
||||
let key_share = core.key_storage.get(&key_id).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||
let negotiation_session = KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams {
|
||||
meta: ShareChangeSessionMeta {
|
||||
id: key_id,
|
||||
self_node_id: core.meta.self_node_id.clone(),
|
||||
master_node_id: core.meta.self_node_id.clone(),
|
||||
},
|
||||
sub_session: math::generate_random_scalar()?,
|
||||
key_share: key_share,
|
||||
result_computer: Arc::new(LargestSupportResultComputer {}), // TODO: optimizations: could use modified Fast version
|
||||
transport: ServersSetChangeKeyVersionNegotiationTransport {
|
||||
id: key_id,
|
||||
nonce: core.nonce,
|
||||
cluster: core.cluster.clone(),
|
||||
},
|
||||
nonce: 0,
|
||||
});
|
||||
negotiation_session.initialize(core.cluster.nodes())?;
|
||||
if !negotiation_session.is_finished() {
|
||||
data.negotiation_sessions.insert(key_id, negotiation_session);
|
||||
continue;
|
||||
}
|
||||
|
||||
// select master for this session
|
||||
let session_master = match &queued_session {
|
||||
&QueuedSession::Known(_, _) => core.meta.self_node_id.clone(),
|
||||
&QueuedSession::Unknown(_, ref nodes) => nodes.iter().cloned().nth(0)
|
||||
.expect("unknown session is received is reported by at least one node; qed"),
|
||||
};
|
||||
|
||||
// send key session initialization requests
|
||||
let key_id = queued_session.id().clone();
|
||||
let mut confirmations: BTreeSet<_> = old_nodes_set.iter().cloned()
|
||||
.chain(session_plan.nodes_to_add.keys().cloned())
|
||||
.chain(session_plan.nodes_to_move.keys().cloned())
|
||||
.filter(|n| core.all_nodes_set.contains(n))
|
||||
.collect();
|
||||
let need_create_session = confirmations.remove(&core.meta.self_node_id);
|
||||
let initialization_message = Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(InitializeShareChangeSession {
|
||||
session: core.meta.id.clone().into(),
|
||||
session_nonce: core.nonce,
|
||||
key_id: key_id.clone().into(),
|
||||
master_node_id: session_master.clone().into(),
|
||||
old_shares_set: old_nodes_set.iter().cloned().map(Into::into).collect(),
|
||||
isolated_nodes: session_plan.isolated_nodes.iter().cloned().map(Into::into).collect(),
|
||||
shares_to_add: session_plan.nodes_to_add.iter()
|
||||
.map(|(n, nid)| (n.clone().into(), nid.clone().into()))
|
||||
.collect(),
|
||||
shares_to_move: session_plan.nodes_to_move.iter()
|
||||
.map(|(source, target)| (source.clone().into(), target.clone().into()))
|
||||
.collect(),
|
||||
shares_to_remove: session_plan.nodes_to_remove.iter().cloned().map(Into::into).collect(),
|
||||
}));
|
||||
for node in &confirmations {
|
||||
core.cluster.send(&node, initialization_message.clone())?;
|
||||
}
|
||||
|
||||
// create session on this node if required
|
||||
if need_create_session {
|
||||
data.active_key_sessions.insert(key_id.clone(), Self::create_share_change_session(core, key_id,
|
||||
session_master.clone(),
|
||||
queued_session.nodes(),
|
||||
session_plan)?);
|
||||
}
|
||||
|
||||
// initialize session if required
|
||||
let wait_for_confirmations = !confirmations.is_empty();
|
||||
if !wait_for_confirmations {
|
||||
data.active_key_sessions.get_mut(&key_id)
|
||||
.expect("!wait_for_confirmations is true only if this is the only session participant; if this is session participant, session is created above; qed")
|
||||
.initialize()?;
|
||||
} else {
|
||||
data.sessions_initialization_state.insert(key_id, SessionInitializationData {
|
||||
master: session_master,
|
||||
confirmations: confirmations,
|
||||
});
|
||||
if !Self::initialize_share_change_session(core, data, key_id)? {
|
||||
continue;
|
||||
}
|
||||
|
||||
number_of_sessions_to_start = number_of_sessions_to_start - 1;
|
||||
@ -734,7 +754,9 @@ impl SessionImpl {
|
||||
// iteration is finished => complete session
|
||||
if data.state != SessionState::Finished {
|
||||
data.sessions_queue = None;
|
||||
if data.active_key_sessions.len() == 0 && data.delegated_key_sessions.len() == 0 {
|
||||
if data.active_key_sessions.len() == 0 &&
|
||||
data.delegated_key_sessions.len() == 0 &&
|
||||
data.negotiation_sessions.len() == 0 {
|
||||
Self::complete_session(core, data)?;
|
||||
}
|
||||
}
|
||||
@ -742,6 +764,65 @@ impl SessionImpl {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize share change session.
|
||||
fn initialize_share_change_session(core: &SessionCore, data: &mut SessionData, key_id: SessionId) -> Result<bool, Error> {
|
||||
// get selected version && old nodes set from key negotiation session
|
||||
let negotiation_session = data.negotiation_sessions.remove(&key_id)
|
||||
.expect("share change session is only initialized when negotiation is completed; qed");
|
||||
let (selected_version, selected_master) = negotiation_session.wait()?;
|
||||
let selected_version_holders = negotiation_session.version_holders(&selected_version)?;
|
||||
let selected_version_threshold = negotiation_session.key_threshold()?;
|
||||
|
||||
// prepare session change plan && check if something needs to be changed
|
||||
let old_nodes_set = selected_version_holders;
|
||||
let new_nodes_set = data.new_nodes_set.as_ref()
|
||||
.expect("this method is called after consensus estabished; new_nodes_set is a result of consensus session; qed");
|
||||
let session_plan = prepare_share_change_session_plan(&core.all_nodes_set, selected_version_threshold, selected_version.clone(), &selected_master, &old_nodes_set, new_nodes_set)?;
|
||||
if session_plan.is_empty() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// send key session initialization requests
|
||||
let mut confirmations: BTreeSet<_> = session_plan.new_nodes_map.keys().cloned().collect();
|
||||
let need_create_session = confirmations.remove(&core.meta.self_node_id);
|
||||
let initialization_message = Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(InitializeShareChangeSession {
|
||||
session: core.meta.id.clone().into(),
|
||||
session_nonce: core.nonce,
|
||||
key_id: key_id.clone().into(),
|
||||
version: selected_version.into(),
|
||||
master_node_id: selected_master.clone().into(),
|
||||
consensus_group: session_plan.consensus_group.iter().cloned().map(Into::into).collect(),
|
||||
new_nodes_map: session_plan.new_nodes_map.iter()
|
||||
.map(|(n, nid)| (n.clone().into(), nid.clone().map(Into::into)))
|
||||
.collect(),
|
||||
}));
|
||||
for node in &confirmations {
|
||||
core.cluster.send(&node, initialization_message.clone())?;
|
||||
}
|
||||
|
||||
// create session on this node if required
|
||||
if need_create_session {
|
||||
data.active_key_sessions.insert(key_id.clone(), Self::create_share_change_session(core, key_id,
|
||||
selected_master.clone(),
|
||||
session_plan)?);
|
||||
}
|
||||
|
||||
// initialize session if required
|
||||
let wait_for_confirmations = !confirmations.is_empty();
|
||||
if !wait_for_confirmations {
|
||||
data.active_key_sessions.get_mut(&key_id)
|
||||
.expect("!wait_for_confirmations is true only if this is the only session participant; if this is session participant, session is created above; qed")
|
||||
.initialize()?;
|
||||
} else {
|
||||
data.sessions_initialization_state.insert(key_id, SessionInitializationData {
|
||||
master: selected_master,
|
||||
confirmations: confirmations,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Return delegated session to master.
|
||||
fn return_delegated_session(core: &SessionCore, key_id: &SessionId) -> Result<(), Error> {
|
||||
assert!(core.meta.self_node_id != core.meta.master_node_id);
|
||||
@ -800,29 +881,55 @@ impl Session for SessionImpl {
|
||||
}
|
||||
|
||||
impl ClusterSession for SessionImpl {
|
||||
type Id = SessionId;
|
||||
|
||||
fn type_name() -> &'static str {
|
||||
"servers set change"
|
||||
}
|
||||
|
||||
fn id(&self) -> SessionId {
|
||||
self.core.meta.id.clone()
|
||||
}
|
||||
|
||||
fn is_finished(&self) -> bool {
|
||||
self.data.lock().state == SessionState::Finished
|
||||
}
|
||||
|
||||
fn on_session_timeout(&self) {
|
||||
let mut data = self.data.lock();
|
||||
|
||||
warn!("{}: servers set change session failed with timeout", self.core.meta.self_node_id);
|
||||
|
||||
data.state = SessionState::Finished;
|
||||
data.result = Some(Err(Error::NodeDisconnected));
|
||||
self.core.completed.notify_all();
|
||||
self.on_session_error(&self.core.meta.self_node_id, Error::NodeDisconnected);
|
||||
}
|
||||
|
||||
fn on_node_timeout(&self, node: &NodeId) {
|
||||
self.on_session_error(node, Error::NodeDisconnected);
|
||||
}
|
||||
|
||||
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||
// error in generation session is considered fatal
|
||||
// => broadcast error if error occured on this node
|
||||
if *node == self.core.meta.self_node_id {
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = self.core.cluster.broadcast(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(ServersSetChangeError {
|
||||
session: self.core.meta.id.clone().into(),
|
||||
session_nonce: self.core.nonce,
|
||||
error: error.clone().into(),
|
||||
})));
|
||||
}
|
||||
|
||||
let mut data = self.data.lock();
|
||||
|
||||
warn!("{}: servers set change session failed because {} connection has timeouted", self.core.meta.self_node_id, node);
|
||||
warn!("{}: servers set change session failed: {} on {}", self.core.meta.self_node_id, error, node);
|
||||
|
||||
data.state = SessionState::Finished;
|
||||
data.result = Some(Err(Error::NodeDisconnected));
|
||||
data.result = Some(Err(error));
|
||||
self.core.completed.notify_all();
|
||||
}
|
||||
|
||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||
match *message {
|
||||
Message::ServersSetChange(ref message) => self.process_message(sender, message),
|
||||
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl JobTransport for ServersSetChangeConsensusTransport {
|
||||
@ -873,6 +980,16 @@ impl JobTransport for UnknownSessionsJobTransport {
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyVersionNegotiationTransport for ServersSetChangeKeyVersionNegotiationTransport {
|
||||
fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> {
|
||||
self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ShareChangeKeyVersionNegotiation {
|
||||
session: self.id.clone().into(),
|
||||
session_nonce: self.nonce,
|
||||
message: message,
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
fn check_nodes_set(all_nodes_set: &BTreeSet<NodeId>, new_nodes_set: &BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
// all new nodes must be a part of all nodes set
|
||||
match new_nodes_set.iter().any(|n| !all_nodes_set.contains(n)) {
|
||||
@ -891,7 +1008,6 @@ pub mod tests {
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::cluster::tests::DummyCluster;
|
||||
use key_server_cluster::generation_session::tests::{MessageLoop as GenerationMessageLoop, Node as GenerationNode, generate_nodes_ids};
|
||||
use key_server_cluster::math;
|
||||
use key_server_cluster::message::Message;
|
||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||
use key_server_cluster::admin_sessions::share_add_session::tests::check_secret_is_preserved;
|
||||
@ -946,11 +1062,7 @@ pub mod tests {
|
||||
let admin_public = admin_key_pair.public().clone();
|
||||
|
||||
// compute original secret key
|
||||
let original_secret = math::compute_joint_secret(gml.nodes.values()
|
||||
.map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().polynom1[0].clone())
|
||||
.collect::<Vec<_>>()
|
||||
.iter()).unwrap();
|
||||
let original_key_pair = KeyPair::from_secret(original_secret).unwrap();
|
||||
let original_key_pair = gml.compute_key_pair(1);
|
||||
|
||||
// all active nodes set
|
||||
let mut all_nodes_set: BTreeSet<_> = gml.nodes.keys()
|
||||
@ -1108,7 +1220,7 @@ pub mod tests {
|
||||
.collect());
|
||||
|
||||
// check that all removed nodes do not own key share
|
||||
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).is_err()));
|
||||
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_none()));
|
||||
|
||||
// check that all sessions have finished
|
||||
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
||||
@ -1134,7 +1246,7 @@ pub mod tests {
|
||||
.collect());
|
||||
|
||||
// check that all removed nodes do not own key share
|
||||
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).is_err()));
|
||||
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_remove.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_none()));
|
||||
|
||||
// check that all sessions have finished
|
||||
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
||||
@ -1160,7 +1272,7 @@ pub mod tests {
|
||||
.collect());
|
||||
|
||||
// check that all isolated nodes still OWN key share
|
||||
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_isolate.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).is_ok()));
|
||||
assert!(ml.nodes.iter().filter(|&(k, _)| nodes_to_isolate.contains(k)).all(|(_, v)| v.key_storage.get(&SessionId::default()).unwrap().is_some()));
|
||||
|
||||
// check that all sessions have finished
|
||||
assert!(ml.nodes.iter().filter(|&(k, _)| !nodes_to_isolate.contains(k)).all(|(_, v)| v.session.is_finished()));
|
||||
|
@ -15,35 +15,24 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::collections::{VecDeque, BTreeSet, BTreeMap};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare};
|
||||
|
||||
/// Session, queued for change.
|
||||
pub enum QueuedSession {
|
||||
/// Session is known on this node.
|
||||
Known(SessionId, DocumentKeyShare),
|
||||
/// Session is unknown on this node.
|
||||
Unknown(SessionId, BTreeSet<NodeId>),
|
||||
}
|
||||
use std::collections::{VecDeque, BTreeSet};
|
||||
use key_server_cluster::{Error, SessionId, KeyStorage};
|
||||
|
||||
/// Queue of share change sessions.
|
||||
pub struct SessionsQueue {
|
||||
/// Key storage.
|
||||
key_storage: Arc<KeyStorage>,
|
||||
/// Sessions, known on this node.
|
||||
known_sessions: VecDeque<SessionId>,
|
||||
/// Unknown sessions.
|
||||
unknown_sessions: VecDeque<(SessionId, BTreeSet<NodeId>)>,
|
||||
unknown_sessions: VecDeque<SessionId>,
|
||||
}
|
||||
|
||||
impl SessionsQueue {
|
||||
/// Create new sessions queue.
|
||||
pub fn new(key_storage: Arc<KeyStorage>, unknown_sessions: BTreeMap<SessionId, BTreeSet<NodeId>>) -> Self {
|
||||
pub fn new(key_storage: &Arc<KeyStorage>, unknown_sessions: BTreeSet<SessionId>) -> Self {
|
||||
// TODO: optimizations:
|
||||
// 1) known sessions - change to iter
|
||||
// 2) unknown sesions - request chunk-by-chunk
|
||||
SessionsQueue {
|
||||
key_storage: key_storage.clone(),
|
||||
known_sessions: key_storage.iter().map(|(k, _)| k).collect(),
|
||||
unknown_sessions: unknown_sessions.into_iter().collect(),
|
||||
}
|
||||
@ -51,37 +40,17 @@ impl SessionsQueue {
|
||||
}
|
||||
|
||||
impl Iterator for SessionsQueue {
|
||||
type Item = Result<QueuedSession, Error>;
|
||||
type Item = Result<SessionId, Error>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if let Some(known_session) = self.known_sessions.pop_front() {
|
||||
return Some(self.key_storage.get(&known_session)
|
||||
.map(|session| QueuedSession::Known(known_session, session))
|
||||
.map_err(|e| Error::KeyStorage(e.into())));
|
||||
return Some(Ok(known_session));
|
||||
}
|
||||
|
||||
if let Some(unknown_session) = self.unknown_sessions.pop_front() {
|
||||
return Some(Ok(QueuedSession::Unknown(unknown_session.0, unknown_session.1)));
|
||||
return Some(Ok(unknown_session));
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl QueuedSession {
|
||||
/// Queued session (key) id.
|
||||
pub fn id(&self) -> &SessionId {
|
||||
match *self {
|
||||
QueuedSession::Known(ref session_id, _) => session_id,
|
||||
QueuedSession::Unknown(ref session_id, _) => session_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// OWners of key shares (aka session nodes).
|
||||
pub fn nodes(&self) -> BTreeSet<NodeId> {
|
||||
match *self {
|
||||
QueuedSession::Known(_, ref key_share) => key_share.id_numbers.keys().cloned().collect(),
|
||||
QueuedSession::Unknown(_, ref nodes) => nodes.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -16,6 +16,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::collections::{BTreeSet, BTreeMap};
|
||||
use bigint::hash::H256;
|
||||
use ethkey::Secret;
|
||||
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage};
|
||||
use key_server_cluster::cluster::Cluster;
|
||||
@ -23,15 +24,10 @@ use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::math;
|
||||
use key_server_cluster::jobs::servers_set_change_access_job::ServersSetChangeAccessRequest;
|
||||
use key_server_cluster::jobs::job_session::JobTransport;
|
||||
use key_server_cluster::message::{Message, ServersSetChangeMessage, ServersSetChangeShareAddMessage, ServersSetChangeShareMoveMessage,
|
||||
ServersSetChangeShareRemoveMessage};
|
||||
use key_server_cluster::message::{Message, ServersSetChangeMessage, ServersSetChangeShareAddMessage};
|
||||
use key_server_cluster::share_add_session::{SessionTransport as ShareAddSessionTransport,
|
||||
SessionImpl as ShareAddSessionImpl, SessionParams as ShareAddSessionParams};
|
||||
use key_server_cluster::share_move_session::{SessionTransport as ShareMoveSessionTransport,
|
||||
SessionImpl as ShareMoveSessionImpl, SessionParams as ShareMoveSessionParams};
|
||||
use key_server_cluster::share_remove_session::{SessionTransport as ShareRemoveSessionTransport,
|
||||
SessionImpl as ShareRemoveSessionImpl, SessionParams as ShareRemoveSessionParams};
|
||||
use key_server_cluster::message::{ShareAddMessage, ShareMoveMessage, ShareRemoveMessage};
|
||||
use key_server_cluster::message::ShareAddMessage;
|
||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||
|
||||
/// Single session meta-change session. Brief overview:
|
||||
@ -50,22 +46,14 @@ pub struct ShareChangeSession {
|
||||
cluster: Arc<Cluster>,
|
||||
/// Key storage.
|
||||
key_storage: Arc<KeyStorage>,
|
||||
/// Old nodes set.
|
||||
old_nodes_set: BTreeSet<NodeId>,
|
||||
/// All cluster nodes set.
|
||||
cluster_nodes_set: BTreeSet<NodeId>,
|
||||
/// Key version.
|
||||
key_version: H256,
|
||||
/// Consensus group to use in ShareAdd session.
|
||||
consensus_group: Option<BTreeSet<NodeId>>,
|
||||
/// Nodes to add shares for.
|
||||
nodes_to_add: Option<BTreeMap<NodeId, Secret>>,
|
||||
/// Nodes to move shares from/to.
|
||||
nodes_to_move: Option<BTreeMap<NodeId, NodeId>>,
|
||||
/// Nodes to remove shares from.
|
||||
nodes_to_remove: Option<BTreeSet<NodeId>>,
|
||||
new_nodes_map: Option<BTreeMap<NodeId, Option<Secret>>>,
|
||||
/// Share add session.
|
||||
share_add_session: Option<ShareAddSessionImpl<ShareChangeTransport>>,
|
||||
/// Share move session.
|
||||
share_move_session: Option<ShareMoveSessionImpl<ShareChangeTransport>>,
|
||||
/// Share remove session.
|
||||
share_remove_session: Option<ShareRemoveSessionImpl<ShareChangeTransport>>,
|
||||
/// Is finished.
|
||||
is_finished: bool,
|
||||
}
|
||||
@ -73,14 +61,12 @@ pub struct ShareChangeSession {
|
||||
/// Share change session plan.
|
||||
#[derive(Debug)]
|
||||
pub struct ShareChangeSessionPlan {
|
||||
/// Nodes that are isolated and need to be removed before share addition.
|
||||
pub isolated_nodes: BTreeSet<NodeId>,
|
||||
/// Key version that plan is valid for.
|
||||
pub key_version: H256,
|
||||
/// Consensus group to use in ShareAdd session.
|
||||
pub consensus_group: BTreeSet<NodeId>,
|
||||
/// Nodes to add shares for.
|
||||
pub nodes_to_add: BTreeMap<NodeId, Secret>,
|
||||
/// Nodes to move shares from/to (keys = target nodes, values = source nodes).
|
||||
pub nodes_to_move: BTreeMap<NodeId, NodeId>,
|
||||
/// Nodes to remove shares from.
|
||||
pub nodes_to_remove: BTreeSet<NodeId>,
|
||||
pub new_nodes_map: BTreeMap<NodeId, Option<Secret>>,
|
||||
}
|
||||
|
||||
/// Session parameters.
|
||||
@ -95,10 +81,6 @@ pub struct ShareChangeSessionParams {
|
||||
pub cluster: Arc<Cluster>,
|
||||
/// Keys storage.
|
||||
pub key_storage: Arc<KeyStorage>,
|
||||
/// All cluster nodes set.
|
||||
pub cluster_nodes_set: BTreeSet<NodeId>,
|
||||
/// Old nodes set.
|
||||
pub old_nodes_set: BTreeSet<NodeId>,
|
||||
/// Session plan.
|
||||
pub plan: ShareChangeSessionPlan,
|
||||
}
|
||||
@ -118,33 +100,22 @@ impl ShareChangeSession {
|
||||
/// Create new share change session.
|
||||
pub fn new(params: ShareChangeSessionParams) -> Result<Self, Error> {
|
||||
// we can't create sessions right now, because key share is read when session is created, but it can change in previous session
|
||||
let isolated_nodes = if !params.plan.isolated_nodes.is_empty() { Some(params.plan.isolated_nodes) } else { None };
|
||||
let nodes_to_add = if !params.plan.nodes_to_add.is_empty() { Some(params.plan.nodes_to_add) } else { None };
|
||||
let nodes_to_remove = if !params.plan.nodes_to_remove.is_empty() { Some(params.plan.nodes_to_remove) } else { None };
|
||||
let nodes_to_move = if !params.plan.nodes_to_move.is_empty() { Some(params.plan.nodes_to_move) } else { None };
|
||||
debug_assert!(isolated_nodes.is_some() || nodes_to_add.is_some() || nodes_to_move.is_some() || nodes_to_remove.is_some());
|
||||
let key_version = params.plan.key_version;
|
||||
let consensus_group = if !params.plan.consensus_group.is_empty() { Some(params.plan.consensus_group) } else { None };
|
||||
let new_nodes_map = if !params.plan.new_nodes_map.is_empty() { Some(params.plan.new_nodes_map) } else { None };
|
||||
debug_assert!(new_nodes_map.is_some());
|
||||
|
||||
// if it is degenerated session (only isolated nodes are removed && no network communication required)
|
||||
// => remove isolated nodes && finish session
|
||||
if let Some(isolated_nodes) = isolated_nodes {
|
||||
Self::remove_isolated_nodes(¶ms.meta, ¶ms.key_storage, isolated_nodes)?;
|
||||
}
|
||||
|
||||
let is_finished = nodes_to_add.is_none() && nodes_to_remove.is_none() && nodes_to_move.is_none();
|
||||
let is_finished = new_nodes_map.is_none();
|
||||
Ok(ShareChangeSession {
|
||||
session_id: params.session_id,
|
||||
nonce: params.nonce,
|
||||
meta: params.meta,
|
||||
cluster: params.cluster,
|
||||
key_storage: params.key_storage,
|
||||
old_nodes_set: params.old_nodes_set,
|
||||
cluster_nodes_set: params.cluster_nodes_set,
|
||||
nodes_to_add: nodes_to_add,
|
||||
nodes_to_remove: nodes_to_remove,
|
||||
nodes_to_move: nodes_to_move,
|
||||
key_version: key_version,
|
||||
consensus_group: consensus_group,
|
||||
new_nodes_map: new_nodes_map,
|
||||
share_add_session: None,
|
||||
share_move_session: None,
|
||||
share_remove_session: None,
|
||||
is_finished: is_finished,
|
||||
})
|
||||
}
|
||||
@ -184,52 +155,10 @@ impl ShareChangeSession {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When share-move message is received.
|
||||
pub fn on_share_move_message(&mut self, sender: &NodeId, message: &ShareMoveMessage) -> Result<(), Error> {
|
||||
if self.share_move_session.is_none() {
|
||||
self.create_share_move_session()?;
|
||||
}
|
||||
|
||||
let change_state_needed = self.share_move_session.as_ref()
|
||||
.map(|share_move_session| {
|
||||
let was_finished = share_move_session.is_finished();
|
||||
share_move_session.process_message(sender, message)
|
||||
.map(|_| share_move_session.is_finished() && !was_finished)
|
||||
})
|
||||
.unwrap_or(Err(Error::InvalidMessage))?;
|
||||
if change_state_needed {
|
||||
self.proceed_to_next_state()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When share-remove message is received.
|
||||
pub fn on_share_remove_message(&mut self, sender: &NodeId, message: &ShareRemoveMessage) -> Result<(), Error> {
|
||||
if self.share_remove_session.is_none() {
|
||||
self.create_share_remove_session()?;
|
||||
}
|
||||
|
||||
let change_state_needed = self.share_remove_session.as_ref()
|
||||
.map(|share_remove_session| {
|
||||
let was_finished = share_remove_session.is_finished();
|
||||
share_remove_session.process_message(sender, message)
|
||||
.map(|_| share_remove_session.is_finished() && !was_finished)
|
||||
})
|
||||
.unwrap_or(Err(Error::InvalidMessage))?;
|
||||
if change_state_needed {
|
||||
self.proceed_to_next_state()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create new share add session.
|
||||
fn create_share_add_session(&mut self) -> Result<(), Error> {
|
||||
let nodes_to_add = self.nodes_to_add.take().ok_or(Error::InvalidStateForRequest)?;
|
||||
let new_nodes_set = self.old_nodes_set.iter().map(|n| (n.clone(), None))
|
||||
.chain(nodes_to_add.clone().into_iter().map(|(k, v)| (k, Some(v))))
|
||||
.collect();
|
||||
let consensus_group = self.consensus_group.take().ok_or(Error::InvalidStateForRequest)?;
|
||||
let new_nodes_map = self.new_nodes_map.take().ok_or(Error::InvalidStateForRequest)?;
|
||||
let share_add_session = ShareAddSessionImpl::new(ShareAddSessionParams {
|
||||
meta: self.meta.clone(),
|
||||
nonce: self.nonce,
|
||||
@ -237,88 +166,31 @@ impl ShareChangeSession {
|
||||
key_storage: self.key_storage.clone(),
|
||||
admin_public: None,
|
||||
})?;
|
||||
share_add_session.set_consensus_output(self.old_nodes_set.clone(), new_nodes_set)?;
|
||||
share_add_session.set_consensus_output(&self.key_version, consensus_group, new_nodes_map)?;
|
||||
self.share_add_session = Some(share_add_session);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create new share move session.
|
||||
fn create_share_move_session(&mut self) -> Result<(), Error> {
|
||||
let nodes_to_move = self.nodes_to_move.take().ok_or(Error::InvalidStateForRequest)?;
|
||||
let share_move_session = ShareMoveSessionImpl::new(ShareMoveSessionParams {
|
||||
meta: self.meta.clone(),
|
||||
nonce: self.nonce,
|
||||
transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()),
|
||||
key_storage: self.key_storage.clone(),
|
||||
admin_public: None,
|
||||
})?;
|
||||
share_move_session.set_consensus_output(nodes_to_move)?;
|
||||
self.share_move_session = Some(share_move_session);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create new share remove session.
|
||||
fn create_share_remove_session(&mut self) -> Result<(), Error> {
|
||||
let nodes_to_remove = self.nodes_to_remove.take().ok_or(Error::InvalidStateForRequest)?;
|
||||
let share_remove_session = ShareRemoveSessionImpl::new(ShareRemoveSessionParams {
|
||||
meta: self.meta.clone(),
|
||||
nonce: self.nonce,
|
||||
cluster_nodes_set: self.cluster_nodes_set.clone(),
|
||||
transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()),
|
||||
key_storage: self.key_storage.clone(),
|
||||
admin_public: None,
|
||||
})?;
|
||||
share_remove_session.set_consensus_output(nodes_to_remove)?;
|
||||
self.share_remove_session = Some(share_remove_session);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Proceed to the next state.
|
||||
fn proceed_to_next_state(&mut self) -> Result<(), Error> {
|
||||
if self.meta.self_node_id != self.meta.master_node_id {
|
||||
if self.nodes_to_add.is_none() && self.nodes_to_move.is_none() && self.nodes_to_remove.is_none() {
|
||||
if self.new_nodes_map.is_none() {
|
||||
self.is_finished = true;
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if self.nodes_to_add.is_some() {
|
||||
if self.new_nodes_map.is_some() {
|
||||
self.create_share_add_session()?;
|
||||
return self.share_add_session.as_ref()
|
||||
.expect("either create_share_add_session fails, or session is created; qed")
|
||||
.initialize(None, None, None);
|
||||
}
|
||||
|
||||
if self.nodes_to_move.is_some() {
|
||||
self.create_share_move_session()?;
|
||||
return self.share_move_session.as_ref()
|
||||
.expect("either create_share_move_session fails, or session is created; qed")
|
||||
.initialize(None, None, None);
|
||||
}
|
||||
|
||||
if self.nodes_to_remove.is_some() {
|
||||
self.create_share_remove_session()?;
|
||||
return self.share_remove_session.as_ref()
|
||||
.expect("either create_share_remove_session fails, or session is created; qed")
|
||||
.initialize(None, None, None);
|
||||
.initialize(None, None, None, None);
|
||||
}
|
||||
|
||||
self.is_finished = true;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove isolated nodes from key share.
|
||||
fn remove_isolated_nodes(meta: &ShareChangeSessionMeta, key_storage: &Arc<KeyStorage>, isolated_nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
let mut key_share = key_storage.get(&meta.id).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||
for isolated_node in &isolated_nodes {
|
||||
key_share.id_numbers.remove(isolated_node);
|
||||
}
|
||||
if key_share.id_numbers.len() < key_share.threshold + 1 {
|
||||
return Err(Error::InvalidNodesConfiguration);
|
||||
}
|
||||
key_storage.update(meta.id.clone(), key_share).map_err(|e| Error::KeyStorage(e.into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl ShareChangeTransport {
|
||||
@ -345,7 +217,11 @@ impl JobTransport for ShareChangeTransport {
|
||||
}
|
||||
|
||||
impl ShareAddSessionTransport for ShareChangeTransport {
|
||||
fn set_id_numbers(&mut self, _id_numbers: BTreeMap<NodeId, Secret>) {
|
||||
fn nodes(&self) -> BTreeSet<NodeId> {
|
||||
self.cluster.nodes()
|
||||
}
|
||||
|
||||
fn set_master_data(&mut self, _consensus_group: BTreeSet<NodeId>, _id_numbers: BTreeMap<NodeId, Option<Secret>>) {
|
||||
unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed")
|
||||
}
|
||||
|
||||
@ -358,69 +234,72 @@ impl ShareAddSessionTransport for ShareChangeTransport {
|
||||
}
|
||||
}
|
||||
|
||||
impl ShareMoveSessionTransport for ShareChangeTransport {
|
||||
fn set_shares_to_move_reversed(&mut self, _shares_to_move: BTreeMap<NodeId, NodeId>) {
|
||||
unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed")
|
||||
}
|
||||
|
||||
fn send(&self, node: &NodeId, message: ShareMoveMessage) -> Result<(), Error> {
|
||||
self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareMoveMessage(ServersSetChangeShareMoveMessage {
|
||||
session: self.session_id.clone().into(),
|
||||
session_nonce: self.nonce,
|
||||
message: message,
|
||||
})))
|
||||
/// Prepare share change plan for moving from old `old_key_version_owners` to `new_nodes_set`.
|
||||
pub fn prepare_share_change_session_plan(cluster_nodes: &BTreeSet<NodeId>, threshold: usize, key_version: H256, master: &NodeId, old_key_version_owners: &BTreeSet<NodeId>, new_nodes_set: &BTreeSet<NodeId>) -> Result<ShareChangeSessionPlan, Error> {
|
||||
// make new nodes map, so that:
|
||||
// all non-isolated old nodes will have their id number preserved
|
||||
// all new nodes will have new id number
|
||||
let mut new_nodes_map = new_nodes_set.difference(&old_key_version_owners)
|
||||
.map(|n| math::generate_random_scalar().map(|id| (n.clone(), Some(id))))
|
||||
.collect::<Result<BTreeMap<_, _>, _>>()?;
|
||||
if !new_nodes_map.is_empty() {
|
||||
for old_node in old_key_version_owners.iter().filter(|n| cluster_nodes.contains(n)) {
|
||||
new_nodes_map.insert(old_node.clone(), None);
|
||||
}
|
||||
}
|
||||
|
||||
impl ShareRemoveSessionTransport for ShareChangeTransport {
|
||||
fn send(&self, node: &NodeId, message: ShareRemoveMessage) -> Result<(), Error> {
|
||||
self.cluster.send(node, Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(ServersSetChangeShareRemoveMessage {
|
||||
session: self.session_id.clone().into(),
|
||||
session_nonce: self.nonce,
|
||||
message: message,
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
/// Prepare share change plan for moving from old `session_nodes` to `new_nodes_set`.
|
||||
pub fn prepare_share_change_session_plan(cluster_nodes_set: &BTreeSet<NodeId>, session_nodes: &BTreeSet<NodeId>, new_nodes_set: &BTreeSet<NodeId>) -> Result<ShareChangeSessionPlan, Error> {
|
||||
let mut nodes_to_add: BTreeSet<_> = new_nodes_set.difference(&session_nodes).cloned().collect();
|
||||
let mut nodes_to_move = BTreeMap::new();
|
||||
// isolated nodes are the nodes that are not currently in cluster + that are in new nodes set
|
||||
let isolated_nodes: BTreeSet<_> = session_nodes.difference(&cluster_nodes_set)
|
||||
.filter(|n| !new_nodes_set.contains(n))
|
||||
.cloned()
|
||||
.collect();
|
||||
// removed nodes are all old session nodes, except nodes that are in new set + except isolated nodes
|
||||
let mut nodes_to_remove: BTreeSet<_> = session_nodes.difference(&new_nodes_set)
|
||||
.filter(|n| !isolated_nodes.contains(n))
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
while !nodes_to_remove.is_empty() && !nodes_to_add.is_empty() {
|
||||
let source_node = nodes_to_remove.iter().cloned().nth(0).expect("nodes_to_remove.is_empty is checked in while condition; qed");
|
||||
let target_node = nodes_to_add.iter().cloned().nth(0).expect("nodes_to_add.is_empty is checked in while condition; qed");
|
||||
nodes_to_remove.remove(&source_node);
|
||||
nodes_to_add.remove(&target_node);
|
||||
nodes_to_move.insert(target_node, source_node);
|
||||
}
|
||||
// select consensus group if there are some nodes to add
|
||||
let consensus_group = if !new_nodes_map.is_empty() {
|
||||
::std::iter::once(master.clone())
|
||||
.chain(old_key_version_owners.iter()
|
||||
.filter(|n| *n != master && cluster_nodes.contains(*n))
|
||||
.take(threshold)
|
||||
.cloned())
|
||||
.collect()
|
||||
} else {
|
||||
BTreeSet::new()
|
||||
};
|
||||
|
||||
Ok(ShareChangeSessionPlan {
|
||||
isolated_nodes: isolated_nodes,
|
||||
nodes_to_add: nodes_to_add.into_iter()
|
||||
.map(|n| math::generate_random_scalar().map(|s| (n, s)))
|
||||
.collect::<Result<BTreeMap<_, _>, _>>()?,
|
||||
nodes_to_move: nodes_to_move,
|
||||
nodes_to_remove: nodes_to_remove,
|
||||
key_version: key_version,
|
||||
consensus_group: consensus_group,
|
||||
new_nodes_map: new_nodes_map,
|
||||
})
|
||||
}
|
||||
|
||||
impl ShareChangeSessionPlan {
|
||||
/// Is empty (nothing-to-do) plan?
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.isolated_nodes.is_empty()
|
||||
&& self.nodes_to_add.is_empty()
|
||||
&& self.nodes_to_move.is_empty()
|
||||
&& self.nodes_to_remove.is_empty()
|
||||
self.new_nodes_map.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use key_server_cluster::math;
|
||||
use super::prepare_share_change_session_plan;
|
||||
|
||||
#[test]
|
||||
fn share_change_plan_creates_empty_plan() {
|
||||
let cluster_nodes: Vec<_> = (0..3).map(|_| math::generate_random_point().unwrap()).collect();
|
||||
let master = cluster_nodes[0].clone();
|
||||
let old_key_version_owners = cluster_nodes.iter().cloned().collect();
|
||||
let new_nodes_set = cluster_nodes.iter().cloned().collect();
|
||||
let plan = prepare_share_change_session_plan(&cluster_nodes.iter().cloned().collect(), 1, Default::default(), &master, &old_key_version_owners, &new_nodes_set).unwrap();
|
||||
|
||||
assert!(plan.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn share_change_plan_adds_new_nodes() {
|
||||
let cluster_nodes: Vec<_> = (0..3).map(|_| math::generate_random_point().unwrap()).collect();
|
||||
let master = cluster_nodes[0].clone();
|
||||
let old_key_version_owners = cluster_nodes[0..2].iter().cloned().collect();
|
||||
let new_nodes_set = cluster_nodes.iter().cloned().collect();
|
||||
let plan = prepare_share_change_session_plan(&cluster_nodes.iter().cloned().collect(), 1, Default::default(), &master, &old_key_version_owners, &new_nodes_set).unwrap();
|
||||
|
||||
assert!(!plan.is_empty());
|
||||
assert_eq!(old_key_version_owners, plan.consensus_group);
|
||||
assert_eq!(new_nodes_set, plan.new_nodes_map.keys().cloned().collect());
|
||||
}
|
||||
}
|
||||
|
@ -1,828 +0,0 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::collections::BTreeSet;
|
||||
use parking_lot::{Mutex, Condvar};
|
||||
use ethkey::{Public, Signature};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, DocumentKeyShare, KeyStorage};
|
||||
use key_server_cluster::cluster::Cluster;
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::message::{Message, ShareRemoveMessage, ShareRemoveConsensusMessage, ConsensusMessageWithServersSet,
|
||||
ShareRemoveRequest, ShareRemoveConfirm, ShareRemoveError, InitializeConsensusSessionWithServersSet,
|
||||
ConfirmConsensusInitialization};
|
||||
use key_server_cluster::jobs::job_session::JobTransport;
|
||||
use key_server_cluster::jobs::dummy_job::{DummyJob, DummyJobTransport};
|
||||
use key_server_cluster::jobs::servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest};
|
||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||
|
||||
/// Share remove session API.
|
||||
pub trait Session: Send + Sync + 'static {
|
||||
/// Wait until session is completed.
|
||||
fn wait(&self) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Share remove session transport.
|
||||
pub trait SessionTransport: Clone + JobTransport<PartialJobRequest=ServersSetChangeAccessRequest, PartialJobResponse=bool> {
|
||||
/// Send message to given node.
|
||||
fn send(&self, node: &NodeId, message: ShareRemoveMessage) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Share remove session.
|
||||
pub struct SessionImpl<T: SessionTransport> {
|
||||
/// Session core.
|
||||
core: SessionCore<T>,
|
||||
/// Session data.
|
||||
data: Mutex<SessionData<T>>,
|
||||
}
|
||||
|
||||
/// Immutable session data.
|
||||
struct SessionCore<T: SessionTransport> {
|
||||
/// Session metadata.
|
||||
pub meta: ShareChangeSessionMeta,
|
||||
/// Session-level nonce.
|
||||
pub nonce: u64,
|
||||
/// Original key share.
|
||||
pub key_share: DocumentKeyShare,
|
||||
/// All known cluster nodes.
|
||||
pub cluster_nodes_set: BTreeSet<NodeId>,
|
||||
/// Session transport to communicate to other cluster nodes.
|
||||
pub transport: T,
|
||||
/// Key storage.
|
||||
pub key_storage: Arc<KeyStorage>,
|
||||
/// Administrator public key.
|
||||
pub admin_public: Option<Public>,
|
||||
/// SessionImpl completion condvar.
|
||||
pub completed: Condvar,
|
||||
}
|
||||
|
||||
/// Share remove consensus session type.
|
||||
type ShareRemoveChangeConsensusSession<T> = ConsensusSession<ServersSetChangeAccessJob, T, DummyJob, DummyJobTransport>;
|
||||
|
||||
/// Mutable session data.
|
||||
struct SessionData<T: SessionTransport> {
|
||||
/// Session state.
|
||||
pub state: SessionState,
|
||||
/// Consensus session.
|
||||
pub consensus_session: Option<ShareRemoveChangeConsensusSession<T>>,
|
||||
/// Shares to remove.
|
||||
pub shares_to_remove: Option<BTreeSet<NodeId>>,
|
||||
/// Remove confirmations to receive.
|
||||
pub remove_confirmations_to_receive: Option<BTreeSet<NodeId>>,
|
||||
/// Share remove change result.
|
||||
pub result: Option<Result<(), Error>>,
|
||||
}
|
||||
|
||||
/// SessionImpl creation parameters
|
||||
pub struct SessionParams<T: SessionTransport> {
|
||||
/// Session meta.
|
||||
pub meta: ShareChangeSessionMeta,
|
||||
/// Session nonce.
|
||||
pub nonce: u64,
|
||||
/// All known cluster nodes.
|
||||
pub cluster_nodes_set: BTreeSet<NodeId>,
|
||||
/// Session transport to communicate to other cluster nodes.
|
||||
pub transport: T,
|
||||
/// Key storage.
|
||||
pub key_storage: Arc<KeyStorage>,
|
||||
/// Administrator public key.
|
||||
pub admin_public: Option<Public>,
|
||||
}
|
||||
|
||||
/// Share move session state.
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum SessionState {
|
||||
/// State when consensus is establishing.
|
||||
ConsensusEstablishing,
|
||||
/// Waiting for remove confirmation.
|
||||
WaitingForRemoveConfirmation,
|
||||
/// Session is finished.
|
||||
Finished,
|
||||
}
|
||||
|
||||
/// Isolated ShareRemove session transport.
|
||||
#[derive(Clone)]
|
||||
pub struct IsolatedSessionTransport {
|
||||
/// Key id.
|
||||
session: SessionId,
|
||||
/// Session-level nonce.
|
||||
nonce: u64,
|
||||
/// Cluster.
|
||||
cluster: Arc<Cluster>,
|
||||
}
|
||||
|
||||
impl<T> SessionImpl<T> where T: SessionTransport {
|
||||
/// Create new share remove session.
|
||||
pub fn new(params: SessionParams<T>) -> Result<Self, Error> {
|
||||
Ok(SessionImpl {
|
||||
core: SessionCore {
|
||||
meta: params.meta.clone(),
|
||||
nonce: params.nonce,
|
||||
key_share: params.key_storage.get(¶ms.meta.id).map_err(|e| Error::KeyStorage(e.into()))?,
|
||||
cluster_nodes_set: params.cluster_nodes_set,
|
||||
transport: params.transport,
|
||||
key_storage: params.key_storage,
|
||||
admin_public: params.admin_public,
|
||||
completed: Condvar::new(),
|
||||
},
|
||||
data: Mutex::new(SessionData {
|
||||
state: SessionState::ConsensusEstablishing,
|
||||
consensus_session: None,
|
||||
shares_to_remove: None,
|
||||
remove_confirmations_to_receive: None,
|
||||
result: None,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
/// Set pre-established consensus data.
|
||||
pub fn set_consensus_output(&self, shares_to_remove: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
|
||||
// check state
|
||||
if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
check_shares_to_remove(&self.core, &shares_to_remove)?;
|
||||
|
||||
let remove_confirmations_to_receive: BTreeSet<NodeId> = shares_to_remove.iter()
|
||||
.filter(|n| self.core.cluster_nodes_set.contains(n))
|
||||
.cloned()
|
||||
.collect();
|
||||
let need_wait_for_confirmations = !remove_confirmations_to_receive.is_empty();
|
||||
data.shares_to_remove = Some(shares_to_remove);
|
||||
data.remove_confirmations_to_receive = Some(remove_confirmations_to_receive);
|
||||
|
||||
// on slave nodes it can happen that all nodes being removed are isolated
|
||||
// => there's no need to wait for confirmations
|
||||
if !need_wait_for_confirmations {
|
||||
Self::complete_session(&self.core, &mut *data)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize share remove session on master node.
|
||||
pub fn initialize(&self, shares_to_remove: Option<BTreeSet<NodeId>>, old_set_signature: Option<Signature>, new_set_signature: Option<Signature>) -> Result<(), Error> {
|
||||
debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id);
|
||||
|
||||
let mut data = self.data.lock();
|
||||
// check state
|
||||
if data.state == SessionState::Finished {
|
||||
// probably there are isolated nodes && we only remove isolated nodes from session
|
||||
return Ok(());
|
||||
}
|
||||
if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
// if consensus is not yet established => start consensus session
|
||||
let is_consensus_pre_established = data.shares_to_remove.is_some();
|
||||
if !is_consensus_pre_established {
|
||||
let shares_to_remove = shares_to_remove.ok_or(Error::InvalidMessage)?;
|
||||
check_shares_to_remove(&self.core, &shares_to_remove)?;
|
||||
|
||||
let old_set_signature = old_set_signature.ok_or(Error::InvalidMessage)?;
|
||||
let new_set_signature = new_set_signature.ok_or(Error::InvalidMessage)?;
|
||||
let old_nodes_set: BTreeSet<_> = self.core.key_share.id_numbers.keys().cloned().collect();
|
||||
let new_nodes_set: BTreeSet<_> = old_nodes_set.iter().cloned().filter(|n| !shares_to_remove.contains(&n)).collect();
|
||||
let mut active_nodes_set = old_nodes_set.clone();
|
||||
let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?;
|
||||
|
||||
// if some session nodes were removed from cluster (we treat this as a failure, or as a 'improper' removal)
|
||||
// => do not require these nodes to be connected
|
||||
for isolated_node in old_nodes_set.difference(&self.core.cluster_nodes_set) {
|
||||
active_nodes_set.remove(&isolated_node);
|
||||
}
|
||||
|
||||
let mut consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
||||
meta: self.core.meta.clone().into_consensus_meta(active_nodes_set.len())?,
|
||||
consensus_executor: ServersSetChangeAccessJob::new_on_master(admin_public,
|
||||
old_nodes_set.clone(),
|
||||
old_nodes_set,
|
||||
new_nodes_set,
|
||||
old_set_signature,
|
||||
new_set_signature),
|
||||
consensus_transport: self.core.transport.clone(),
|
||||
})?;
|
||||
consensus_session.initialize(active_nodes_set)?;
|
||||
data.consensus_session = Some(consensus_session);
|
||||
data.remove_confirmations_to_receive = Some(shares_to_remove.clone());
|
||||
data.shares_to_remove = Some(shares_to_remove);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// otherwise => start sending ShareRemove-specific messages
|
||||
Self::on_consensus_established(&self.core, &mut *data)
|
||||
}
|
||||
|
||||
/// Process single message.
|
||||
pub fn process_message(&self, sender: &NodeId, message: &ShareRemoveMessage) -> Result<(), Error> {
|
||||
if self.core.nonce != message.session_nonce() {
|
||||
return Err(Error::ReplayProtection);
|
||||
}
|
||||
|
||||
match message {
|
||||
&ShareRemoveMessage::ShareRemoveConsensusMessage(ref message) =>
|
||||
self.on_consensus_message(sender, message),
|
||||
&ShareRemoveMessage::ShareRemoveRequest(ref message) =>
|
||||
self.on_share_remove_request(sender, message),
|
||||
&ShareRemoveMessage::ShareRemoveConfirm(ref message) =>
|
||||
self.on_share_remove_confirmation(sender, message),
|
||||
&ShareRemoveMessage::ShareRemoveError(ref message) =>
|
||||
self.on_session_error(sender, message),
|
||||
}
|
||||
}
|
||||
|
||||
/// When consensus-related message is received.
|
||||
pub fn on_consensus_message(&self, sender: &NodeId, message: &ShareRemoveConsensusMessage) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
// start slave consensus session if needed
|
||||
let mut data = self.data.lock();
|
||||
if data.consensus_session.is_none() && sender == &self.core.meta.master_node_id {
|
||||
match &message.message {
|
||||
&ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => {
|
||||
let admin_public = self.core.admin_public.clone().ok_or(Error::InvalidMessage)?;
|
||||
let current_nodes_set = self.core.key_share.id_numbers.keys().cloned().collect();
|
||||
data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams {
|
||||
meta: self.core.meta.clone().into_consensus_meta(message.old_nodes_set.len())?,
|
||||
consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public, current_nodes_set),
|
||||
consensus_transport: self.core.transport.clone(),
|
||||
})?);
|
||||
},
|
||||
_ => return Err(Error::InvalidStateForRequest),
|
||||
}
|
||||
}
|
||||
|
||||
let (is_establishing_consensus, is_consensus_established, shares_to_remove) = {
|
||||
let consensus_session = data.consensus_session.as_mut().ok_or(Error::InvalidMessage)?;
|
||||
let is_establishing_consensus = consensus_session.state() == ConsensusSessionState::EstablishingConsensus;
|
||||
let shares_to_remove = match &message.message {
|
||||
&ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => {
|
||||
consensus_session.on_consensus_partial_request(sender, ServersSetChangeAccessRequest::from(message))?;
|
||||
let shares_to_remove = message.old_nodes_set.difference(&message.new_nodes_set).cloned().map(Into::into).collect::<BTreeSet<_>>();
|
||||
check_shares_to_remove(&self.core, &shares_to_remove)?;
|
||||
Some(shares_to_remove)
|
||||
},
|
||||
&ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ref message) => {
|
||||
consensus_session.on_consensus_partial_response(sender, message.is_confirmed)?;
|
||||
None
|
||||
},
|
||||
};
|
||||
|
||||
(
|
||||
is_establishing_consensus,
|
||||
consensus_session.state() == ConsensusSessionState::ConsensusEstablished,
|
||||
shares_to_remove
|
||||
)
|
||||
};
|
||||
|
||||
if let Some(shares_to_remove) = shares_to_remove {
|
||||
data.remove_confirmations_to_receive = Some(shares_to_remove.clone());
|
||||
data.shares_to_remove = Some(shares_to_remove);
|
||||
}
|
||||
if self.core.meta.self_node_id != self.core.meta.master_node_id || !is_establishing_consensus || !is_consensus_established {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Self::on_consensus_established(&self.core, &mut *data)
|
||||
}
|
||||
|
||||
/// When share remove request is received.
|
||||
pub fn on_share_remove_request(&self, sender: &NodeId, message: &ShareRemoveRequest) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
// awaiting this message from master node only
|
||||
if sender != &self.core.meta.master_node_id {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
// check state
|
||||
let mut data = self.data.lock();
|
||||
if data.state == SessionState::ConsensusEstablishing && data.shares_to_remove.is_some() {
|
||||
data.state = SessionState::WaitingForRemoveConfirmation;
|
||||
} else if data.state != SessionState::WaitingForRemoveConfirmation {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
// only process if we are waiting for this request
|
||||
{
|
||||
let shares_to_remove = data.shares_to_remove.as_ref()
|
||||
.expect("shares_to_remove is filled when consensus is established; we only process share move request after consensus is established; qed");
|
||||
if !shares_to_remove.contains(&self.core.meta.self_node_id) {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
}
|
||||
|
||||
// remove share
|
||||
Self::complete_session(&self.core, &mut *data)
|
||||
}
|
||||
|
||||
/// When share is received from destination node.
|
||||
pub fn on_share_remove_confirmation(&self, sender: &NodeId, message: &ShareRemoveConfirm) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
// check state
|
||||
let mut data = self.data.lock();
|
||||
if data.state == SessionState::ConsensusEstablishing && data.shares_to_remove.is_some() {
|
||||
data.state = SessionState::WaitingForRemoveConfirmation;
|
||||
} else if data.state != SessionState::WaitingForRemoveConfirmation {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
// find share source
|
||||
{
|
||||
let remove_confirmations_to_receive = data.remove_confirmations_to_receive.as_mut()
|
||||
.expect("remove_confirmations_to_receive is filled when consensus is established; we only process share move confirmations after consensus is established; qed");
|
||||
if !remove_confirmations_to_receive.remove(sender) {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
if !remove_confirmations_to_receive.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
Self::complete_session(&self.core, &mut *data)
|
||||
}
|
||||
|
||||
/// When error has occured on another node.
|
||||
pub fn on_session_error(&self, sender: &NodeId, message: &ShareRemoveError) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
|
||||
warn!("{}: share remove session failed with error: {} from {}", self.core.meta.self_node_id, message.error, sender);
|
||||
|
||||
data.state = SessionState::Finished;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start sending ShareMove-specific messages, when consensus is established.
|
||||
fn on_consensus_established(core: &SessionCore<T>, data: &mut SessionData<T>) -> Result<(), Error> {
|
||||
// update state
|
||||
data.state = SessionState::WaitingForRemoveConfirmation;
|
||||
|
||||
// send share remove requests to every required node
|
||||
Self::disseminate_share_remove_requests(core, data)?;
|
||||
|
||||
{
|
||||
let shares_to_remove = data.shares_to_remove.as_ref()
|
||||
.expect("shares_to_remove is filled when consensus is established; on_consensus_established is called after consensus is established; qed");
|
||||
let remove_confirmations_to_receive: BTreeSet<_> = shares_to_remove.iter()
|
||||
.filter(|n| core.cluster_nodes_set.contains(n))
|
||||
.cloned()
|
||||
.collect();
|
||||
if !shares_to_remove.contains(&core.meta.self_node_id) && !remove_confirmations_to_receive.is_empty() {
|
||||
// remember remove confirmations to receive
|
||||
data.remove_confirmations_to_receive = Some(remove_confirmations_to_receive);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// complete session if share is lost
|
||||
Self::complete_session(core, data)
|
||||
}
|
||||
|
||||
/// Disseminate share remove requests.
|
||||
fn disseminate_share_remove_requests(core: &SessionCore<T>, data: &mut SessionData<T>) -> Result<(), Error> {
|
||||
let shares_to_remove = data.shares_to_remove.as_ref()
|
||||
.expect("shares_to_remove is filled when consensus is established; disseminate_share_remove_requests is called after consensus is established; qed");
|
||||
for node in shares_to_remove.iter().filter(|n| **n != core.meta.self_node_id && core.cluster_nodes_set.contains(n)) {
|
||||
core.transport.send(node, ShareRemoveMessage::ShareRemoveRequest(ShareRemoveRequest {
|
||||
session: core.meta.id.clone().into(),
|
||||
session_nonce: core.nonce,
|
||||
}))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Complete session on this node.
|
||||
fn complete_session(core: &SessionCore<T>, data: &mut SessionData<T>) -> Result<(), Error> {
|
||||
// update state
|
||||
data.state = SessionState::Finished;
|
||||
|
||||
// if we are 'removing' node => remove share from storage
|
||||
let shares_to_remove = data.shares_to_remove.as_ref()
|
||||
.expect("shares_to_remove is filled when consensus is established; complete_session is called after consensus is established; qed");
|
||||
if shares_to_remove.contains(&core.meta.self_node_id) {
|
||||
// send confirmation to all other nodes
|
||||
let new_nodes_set = core.key_share.id_numbers.keys().filter(|n| !shares_to_remove.contains(n)).collect::<Vec<_>>();
|
||||
for node in new_nodes_set.into_iter().filter(|n| **n != core.meta.self_node_id && core.cluster_nodes_set.contains(n)) {
|
||||
core.transport.send(&node, ShareRemoveMessage::ShareRemoveConfirm(ShareRemoveConfirm {
|
||||
session: core.meta.id.clone().into(),
|
||||
session_nonce: core.nonce,
|
||||
}))?;
|
||||
}
|
||||
|
||||
return core.key_storage.remove(&core.meta.id)
|
||||
.map_err(|e| Error::KeyStorage(e.into()));
|
||||
}
|
||||
|
||||
// else we need to update key_share.id_numbers.keys()
|
||||
let mut key_share = core.key_share.clone();
|
||||
for share_to_remove in shares_to_remove {
|
||||
key_share.id_numbers.remove(share_to_remove);
|
||||
}
|
||||
|
||||
// ... and update key share in storage
|
||||
core.key_storage.update(core.meta.id.clone(), key_share)
|
||||
.map_err(|e| Error::KeyStorage(e.into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Session for SessionImpl<T> where T: SessionTransport + Send + Sync + 'static {
|
||||
fn wait(&self) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
if !data.result.is_some() {
|
||||
self.core.completed.wait(&mut data);
|
||||
}
|
||||
|
||||
data.result.clone()
|
||||
.expect("checked above or waited for completed; completed is only signaled when result.is_some(); qed")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ClusterSession for SessionImpl<T> where T: SessionTransport {
|
||||
fn is_finished(&self) -> bool {
|
||||
self.data.lock().state == SessionState::Finished
|
||||
}
|
||||
|
||||
fn on_session_timeout(&self) {
|
||||
let mut data = self.data.lock();
|
||||
|
||||
warn!("{}: share remove session failed with timeout", self.core.meta.self_node_id);
|
||||
|
||||
data.state = SessionState::Finished;
|
||||
data.result = Some(Err(Error::NodeDisconnected));
|
||||
self.core.completed.notify_all();
|
||||
}
|
||||
|
||||
fn on_node_timeout(&self, node: &NodeId) {
|
||||
let mut data = self.data.lock();
|
||||
|
||||
warn!("{}: share remove session failed because {} connection has timeouted", self.core.meta.self_node_id, node);
|
||||
|
||||
data.state = SessionState::Finished;
|
||||
data.result = Some(Err(Error::NodeDisconnected));
|
||||
self.core.completed.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
impl IsolatedSessionTransport {
|
||||
pub fn new(session_id: SessionId, nonce: u64, cluster: Arc<Cluster>) -> Self {
|
||||
IsolatedSessionTransport {
|
||||
session: session_id,
|
||||
nonce: nonce,
|
||||
cluster: cluster,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl JobTransport for IsolatedSessionTransport {
|
||||
type PartialJobRequest = ServersSetChangeAccessRequest;
|
||||
type PartialJobResponse = bool;
|
||||
|
||||
fn send_partial_request(&self, node: &NodeId, request: ServersSetChangeAccessRequest) -> Result<(), Error> {
|
||||
self.cluster.send(node, Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(ShareRemoveConsensusMessage {
|
||||
session: self.session.clone().into(),
|
||||
session_nonce: self.nonce,
|
||||
message: ConsensusMessageWithServersSet::InitializeConsensusSession(InitializeConsensusSessionWithServersSet {
|
||||
old_nodes_set: request.old_servers_set.into_iter().map(Into::into).collect(),
|
||||
new_nodes_set: request.new_servers_set.into_iter().map(Into::into).collect(),
|
||||
old_set_signature: request.old_set_signature.into(),
|
||||
new_set_signature: request.new_set_signature.into(),
|
||||
}),
|
||||
})))
|
||||
}
|
||||
|
||||
fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> {
|
||||
self.cluster.send(node, Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(ShareRemoveConsensusMessage {
|
||||
session: self.session.clone().into(),
|
||||
session_nonce: self.nonce,
|
||||
message: ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ConfirmConsensusInitialization {
|
||||
is_confirmed: response,
|
||||
}),
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionTransport for IsolatedSessionTransport {
|
||||
fn send(&self, node: &NodeId, message: ShareRemoveMessage) -> Result<(), Error> {
|
||||
self.cluster.send(node, Message::ShareRemove(message))
|
||||
}
|
||||
}
|
||||
|
||||
fn check_shares_to_remove<T: SessionTransport>(core: &SessionCore<T>, shares_to_remove: &BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
// shares to remove must not be empty
|
||||
if shares_to_remove.is_empty() {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
// all shares_to_remove nodes must be old nodes of the session
|
||||
if shares_to_remove.iter().any(|n| !core.key_share.id_numbers.contains_key(n)) {
|
||||
return Err(Error::InvalidNodesConfiguration);
|
||||
}
|
||||
|
||||
// do not allow removing more shares than possible
|
||||
let nodes_left = core.key_share.id_numbers.len() - shares_to_remove.len();
|
||||
if core.key_share.threshold + 1 > nodes_left {
|
||||
return Err(Error::InvalidNodesConfiguration);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::collections::{VecDeque, BTreeMap, BTreeSet};
|
||||
use ethkey::{Random, Generator, Public, Signature, KeyPair, sign};
|
||||
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage};
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::cluster::tests::DummyCluster;
|
||||
use key_server_cluster::generation_session::tests::{Node as GenerationNode, generate_nodes_ids};
|
||||
use key_server_cluster::math;
|
||||
use key_server_cluster::message::Message;
|
||||
use key_server_cluster::servers_set_change_session::tests::generate_key;
|
||||
use key_server_cluster::jobs::servers_set_change_access_job::ordered_nodes_hash;
|
||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||
use key_server_cluster::admin_sessions::share_add_session::tests::check_secret_is_preserved;
|
||||
use super::{SessionImpl, SessionParams, IsolatedSessionTransport};
|
||||
|
||||
struct Node {
|
||||
pub cluster: Arc<DummyCluster>,
|
||||
pub key_storage: Arc<DummyKeyStorage>,
|
||||
pub session: SessionImpl<IsolatedSessionTransport>,
|
||||
}
|
||||
|
||||
struct MessageLoop {
|
||||
pub admin_key_pair: KeyPair,
|
||||
pub original_key_pair: KeyPair,
|
||||
pub old_nodes_set: BTreeSet<NodeId>,
|
||||
pub new_nodes_set: BTreeSet<NodeId>,
|
||||
pub old_set_signature: Signature,
|
||||
pub new_set_signature: Signature,
|
||||
pub nodes: BTreeMap<NodeId, Node>,
|
||||
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
||||
}
|
||||
|
||||
fn create_session(mut meta: ShareChangeSessionMeta, admin_public: Public, self_node_id: NodeId, cluster: Arc<DummyCluster>, key_storage: Arc<KeyStorage>, all_cluster_nodes: BTreeSet<NodeId>) -> SessionImpl<IsolatedSessionTransport> {
|
||||
let session_id = meta.id.clone();
|
||||
meta.self_node_id = self_node_id;
|
||||
SessionImpl::new(SessionParams {
|
||||
meta: meta.clone(),
|
||||
transport: IsolatedSessionTransport::new(session_id, 1, cluster),
|
||||
key_storage: key_storage,
|
||||
admin_public: Some(admin_public),
|
||||
cluster_nodes_set: all_cluster_nodes,
|
||||
nonce: 1,
|
||||
}).unwrap()
|
||||
}
|
||||
|
||||
fn create_node(meta: ShareChangeSessionMeta, admin_public: Public, node: GenerationNode, all_nodes_set: BTreeSet<NodeId>) -> Node {
|
||||
Node {
|
||||
cluster: node.cluster.clone(),
|
||||
key_storage: node.key_storage.clone(),
|
||||
session: create_session(meta, admin_public, node.session.node().clone(), node.cluster, node.key_storage, all_nodes_set),
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageLoop {
|
||||
pub fn new(t: usize, master_node_id: NodeId, old_nodes_set: BTreeSet<NodeId>, shares_to_remove: BTreeSet<NodeId>) -> Self {
|
||||
// generate admin key pair
|
||||
let admin_key_pair = Random.generate().unwrap();
|
||||
let admin_public = admin_key_pair.public().clone();
|
||||
|
||||
// run initial generation session
|
||||
let gml = generate_key(t, old_nodes_set.clone());
|
||||
let original_secret = math::compute_joint_secret(gml.nodes.values()
|
||||
.map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().polynom1[0].clone())
|
||||
.collect::<Vec<_>>()
|
||||
.iter()).unwrap();
|
||||
let original_key_pair = KeyPair::from_secret(original_secret).unwrap();
|
||||
|
||||
// prepare sessions on all nodes
|
||||
let meta = ShareChangeSessionMeta {
|
||||
id: SessionId::default(),
|
||||
self_node_id: NodeId::default(),
|
||||
master_node_id: master_node_id,
|
||||
};
|
||||
let new_nodes_set: BTreeSet<_> = old_nodes_set.iter()
|
||||
.filter(|n| !shares_to_remove.contains(n))
|
||||
.cloned()
|
||||
.collect();
|
||||
let nodes = gml.nodes.into_iter().map(|gn| create_node(meta.clone(), admin_public.clone(), gn.1, old_nodes_set.clone()));
|
||||
let nodes = nodes.map(|n| (n.session.core.meta.self_node_id.clone(), n)).collect();
|
||||
|
||||
let old_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&old_nodes_set)).unwrap();
|
||||
let new_set_signature = sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap();
|
||||
MessageLoop {
|
||||
admin_key_pair: admin_key_pair,
|
||||
original_key_pair: original_key_pair,
|
||||
old_nodes_set: old_nodes_set.clone(),
|
||||
new_nodes_set: new_nodes_set.clone(),
|
||||
old_set_signature: old_set_signature,
|
||||
new_set_signature: new_set_signature,
|
||||
nodes: nodes,
|
||||
queue: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run(&mut self) {
|
||||
while let Some((from, to, message)) = self.take_message() {
|
||||
self.process_message((from, to, message)).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> {
|
||||
self.nodes.values()
|
||||
.filter_map(|n| n.cluster.take_message().map(|m| (n.session.core.meta.self_node_id.clone(), m.0, m.1)))
|
||||
.nth(0)
|
||||
.or_else(|| self.queue.pop_front())
|
||||
}
|
||||
|
||||
pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> {
|
||||
match { match msg.2 {
|
||||
Message::ShareRemove(ref message) =>
|
||||
self.nodes[&msg.1].session.process_message(&msg.0, message),
|
||||
_ => unreachable!("only servers set change messages are expected"),
|
||||
} } {
|
||||
Ok(_) => Ok(()),
|
||||
Err(Error::TooEarlyForRequest) => {
|
||||
self.queue.push_back(msg);
|
||||
Ok(())
|
||||
},
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_session_fails_if_no_nodes_are_removed() {
|
||||
let (t, n) = (1, 3);
|
||||
let old_nodes_set = generate_nodes_ids(n);
|
||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||
let nodes_to_remove = BTreeSet::new();
|
||||
let ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
||||
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
||||
Some(ml.old_set_signature.clone()),
|
||||
Some(ml.new_set_signature.clone())), Err(Error::InvalidMessage));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_session_fails_if_foreign_nodes_are_removed() {
|
||||
let (t, n) = (1, 3);
|
||||
let old_nodes_set = generate_nodes_ids(n);
|
||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||
let nodes_to_remove: BTreeSet<_> = vec![math::generate_random_point().unwrap()].into_iter().collect();
|
||||
let ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
||||
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
||||
Some(ml.old_set_signature.clone()),
|
||||
Some(ml.new_set_signature.clone())), Err(Error::InvalidNodesConfiguration));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_session_fails_if_too_many_nodes_are_removed() {
|
||||
let (t, n) = (1, 3);
|
||||
let old_nodes_set = generate_nodes_ids(n);
|
||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().take(2).collect();
|
||||
let ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
||||
assert_eq!(ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
||||
Some(ml.old_set_signature.clone()),
|
||||
Some(ml.new_set_signature.clone())), Err(Error::InvalidNodesConfiguration));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nodes_removed_using_share_remove_from_master_node() {
|
||||
let t = 1;
|
||||
let test_cases = vec![(3, 1), (5, 3)];
|
||||
for (n, nodes_to_remove) in test_cases {
|
||||
// generate key && prepare ShareRemove sessions
|
||||
let old_nodes_set = generate_nodes_ids(n);
|
||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().take(nodes_to_remove).collect();
|
||||
let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
||||
|
||||
// initialize session on master node && run to completion
|
||||
ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
||||
Some(ml.old_set_signature.clone()),
|
||||
Some(ml.new_set_signature.clone())).unwrap();
|
||||
ml.run();
|
||||
|
||||
// check that session has completed on all nodes
|
||||
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
||||
|
||||
// check that secret is still the same as before adding the share
|
||||
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter()
|
||||
.filter(|&(k, _)| !nodes_to_remove.contains(k))
|
||||
.map(|(k, v)| (k.clone(), v.key_storage.clone()))
|
||||
.collect());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nodes_removed_using_share_remove_from_non_master_node() {
|
||||
let t = 1;
|
||||
let test_cases = vec![(3, 1), (5, 3)];
|
||||
for (n, nodes_to_remove) in test_cases {
|
||||
// generate key && prepare ShareRemove sessions
|
||||
let old_nodes_set = generate_nodes_ids(n);
|
||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1).take(nodes_to_remove).collect();
|
||||
let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
||||
|
||||
// initialize session on master node && run to completion
|
||||
ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
||||
Some(ml.old_set_signature.clone()),
|
||||
Some(ml.new_set_signature.clone())).unwrap();
|
||||
ml.run();
|
||||
|
||||
// check that session has completed on all nodes
|
||||
assert!(ml.nodes.values().all(|n| n.session.is_finished()));
|
||||
|
||||
// check that secret is still the same as before adding the share
|
||||
check_secret_is_preserved(ml.original_key_pair.clone(), ml.nodes.iter()
|
||||
.filter(|&(k, _)| !nodes_to_remove.contains(k))
|
||||
.map(|(k, v)| (k.clone(), v.key_storage.clone()))
|
||||
.collect());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nodes_are_removed_even_if_some_other_nodes_are_isolated_from_cluster() {
|
||||
let t = 1;
|
||||
let (n, nodes_to_remove, nodes_to_isolate) = (5, 1, 2);
|
||||
|
||||
// generate key && prepare ShareRemove sessions
|
||||
let old_nodes_set = generate_nodes_ids(n);
|
||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1).take(nodes_to_remove).collect();
|
||||
let nodes_to_isolate: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1 + nodes_to_remove.len()).take(nodes_to_isolate).collect();
|
||||
let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
||||
|
||||
// simulate node failure - isolate nodes (it is removed from cluster completely, but it is still a part of session)
|
||||
for node_to_isolate in &nodes_to_isolate {
|
||||
ml.nodes.remove(node_to_isolate);
|
||||
}
|
||||
for node in ml.nodes.values_mut() {
|
||||
for node_to_isolate in &nodes_to_isolate {
|
||||
node.session.core.cluster_nodes_set.remove(node_to_isolate);
|
||||
node.cluster.remove_node(node_to_isolate);
|
||||
}
|
||||
}
|
||||
|
||||
// initialize session on master node && run to completion
|
||||
ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
||||
Some(ml.old_set_signature.clone()),
|
||||
Some(ml.new_set_signature.clone())).unwrap();
|
||||
ml.run();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nodes_are_removed_even_if_isolated_from_cluster() {
|
||||
let t = 1;
|
||||
let (n, nodes_to_isolate_and_remove) = (5, 3);
|
||||
|
||||
// generate key && prepare ShareRemove sessions
|
||||
let old_nodes_set = generate_nodes_ids(n);
|
||||
let master_node_id = old_nodes_set.iter().cloned().nth(0).unwrap();
|
||||
let nodes_to_remove: BTreeSet<_> = old_nodes_set.iter().cloned().skip(1).take(nodes_to_isolate_and_remove).collect();
|
||||
let mut ml = MessageLoop::new(t, master_node_id.clone(), old_nodes_set, nodes_to_remove.clone());
|
||||
|
||||
// simulate node failure - isolate nodes (it is removed from cluster completely, but it is still a part of session)
|
||||
for node_to_isolate in &nodes_to_remove {
|
||||
ml.nodes.remove(node_to_isolate);
|
||||
}
|
||||
for node in ml.nodes.values_mut() {
|
||||
for node_to_isolate in &nodes_to_remove {
|
||||
node.session.core.cluster_nodes_set.remove(node_to_isolate);
|
||||
node.cluster.remove_node(node_to_isolate);
|
||||
}
|
||||
}
|
||||
|
||||
// initialize session on master node && run to completion
|
||||
ml.nodes[&master_node_id].session.initialize(Some(nodes_to_remove.clone()),
|
||||
Some(ml.old_set_signature.clone()),
|
||||
Some(ml.new_set_signature.clone())).unwrap();
|
||||
ml.run();
|
||||
}
|
||||
}
|
@ -14,16 +14,16 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::cmp::{Ord, PartialOrd, Ordering};
|
||||
use std::sync::Arc;
|
||||
use parking_lot::{Mutex, Condvar};
|
||||
use bigint::hash::H256;
|
||||
use ethkey::{Secret, Signature};
|
||||
use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId, EncryptedDocumentKeyShadow, SessionMeta};
|
||||
use key_server_cluster::cluster::Cluster;
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession};
|
||||
use key_server_cluster::message::{Message, DecryptionMessage, DecryptionConsensusMessage, RequestPartialDecryption,
|
||||
PartialDecryption, DecryptionSessionError, DecryptionSessionCompleted, ConsensusMessage, InitializeConsensusSession,
|
||||
ConfirmConsensusInitialization};
|
||||
ConfirmConsensusInitialization, DecryptionSessionDelegation, DecryptionSessionDelegationCompleted};
|
||||
use key_server_cluster::jobs::job_session::JobTransport;
|
||||
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
||||
use key_server_cluster::jobs::decryption_job::{PartialDecryptionRequest, PartialDecryptionResponse, DecryptionJob};
|
||||
@ -57,7 +57,7 @@ struct SessionCore {
|
||||
/// Decryption session access key.
|
||||
pub access_key: Secret,
|
||||
/// Key share.
|
||||
pub key_share: DocumentKeyShare,
|
||||
pub key_share: Option<DocumentKeyShare>,
|
||||
/// Cluster which allows this node to send messages to other nodes in the cluster.
|
||||
pub cluster: Arc<Cluster>,
|
||||
/// Session-level nonce.
|
||||
@ -71,23 +71,18 @@ type DecryptionConsensusSession = ConsensusSession<KeyAccessJob, DecryptionConse
|
||||
|
||||
/// Mutable session data.
|
||||
struct SessionData {
|
||||
/// Key version to use for decryption.
|
||||
pub version: Option<H256>,
|
||||
/// Consensus-based decryption session.
|
||||
pub consensus_session: DecryptionConsensusSession,
|
||||
/// Is shadow decryption requested?
|
||||
pub is_shadow_decryption: Option<bool>,
|
||||
/// Delegation status.
|
||||
pub delegation_status: Option<DelegationStatus>,
|
||||
/// Decryption result.
|
||||
pub result: Option<Result<EncryptedDocumentKeyShadow, Error>>,
|
||||
}
|
||||
|
||||
/// Decryption session Id.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct DecryptionSessionId {
|
||||
/// Encryption session id.
|
||||
pub id: SessionId,
|
||||
/// Decryption session access key.
|
||||
pub access_key: Secret,
|
||||
}
|
||||
|
||||
/// SessionImpl creation parameters
|
||||
pub struct SessionParams {
|
||||
/// Session metadata.
|
||||
@ -95,7 +90,7 @@ pub struct SessionParams {
|
||||
/// Session access key.
|
||||
pub access_key: Secret,
|
||||
/// Key share.
|
||||
pub key_share: DocumentKeyShare,
|
||||
pub key_share: Option<DocumentKeyShare>,
|
||||
/// ACL storage.
|
||||
pub acl_storage: Arc<AclStorage>,
|
||||
/// Cluster.
|
||||
@ -112,6 +107,8 @@ struct DecryptionConsensusTransport {
|
||||
access_key: Secret,
|
||||
/// Session-level nonce.
|
||||
nonce: u64,
|
||||
/// Selected key version (on master node).
|
||||
version: Option<H256>,
|
||||
/// Cluster.
|
||||
cluster: Arc<Cluster>,
|
||||
}
|
||||
@ -128,28 +125,32 @@ struct DecryptionJobTransport {
|
||||
cluster: Arc<Cluster>,
|
||||
}
|
||||
|
||||
/// Session delegation status.
|
||||
enum DelegationStatus {
|
||||
/// Delegated to other node.
|
||||
DelegatedTo(NodeId),
|
||||
/// Delegated from other node.
|
||||
DelegatedFrom(NodeId, u64),
|
||||
}
|
||||
|
||||
impl SessionImpl {
|
||||
/// Create new decryption session.
|
||||
pub fn new(params: SessionParams, requester_signature: Option<Signature>) -> Result<Self, Error> {
|
||||
debug_assert_eq!(params.meta.threshold, params.key_share.threshold);
|
||||
debug_assert_eq!(params.meta.self_node_id == params.meta.master_node_id, requester_signature.is_some());
|
||||
|
||||
use key_server_cluster::generation_session::{check_cluster_nodes, check_threshold};
|
||||
debug_assert_eq!(params.meta.threshold, params.key_share.as_ref().map(|ks| ks.threshold).unwrap_or_default());
|
||||
|
||||
// check that common_point and encrypted_point are already set
|
||||
if params.key_share.common_point.is_none() || params.key_share.encrypted_point.is_none() {
|
||||
if let Some(key_share) = params.key_share.as_ref() {
|
||||
// encrypted data must be set
|
||||
if key_share.common_point.is_none() || key_share.encrypted_point.is_none() {
|
||||
return Err(Error::NotStartedSessionId);
|
||||
}
|
||||
|
||||
// check nodes and threshold
|
||||
let nodes = params.key_share.id_numbers.keys().cloned().collect();
|
||||
check_cluster_nodes(¶ms.meta.self_node_id, &nodes)?;
|
||||
check_threshold(params.key_share.threshold, &nodes)?;
|
||||
}
|
||||
|
||||
let consensus_transport = DecryptionConsensusTransport {
|
||||
id: params.meta.id.clone(),
|
||||
access_key: params.access_key.clone(),
|
||||
nonce: params.nonce,
|
||||
version: None,
|
||||
cluster: params.cluster.clone(),
|
||||
};
|
||||
let consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
||||
@ -171,8 +172,10 @@ impl SessionImpl {
|
||||
completed: Condvar::new(),
|
||||
},
|
||||
data: Mutex::new(SessionData {
|
||||
version: None,
|
||||
consensus_session: consensus_session,
|
||||
is_shadow_decryption: None,
|
||||
delegation_status: None,
|
||||
result: None,
|
||||
}),
|
||||
})
|
||||
@ -202,18 +205,59 @@ impl SessionImpl {
|
||||
self.data.lock().result.clone()
|
||||
}
|
||||
|
||||
/// Initialize decryption session on master node.
|
||||
pub fn initialize(&self, is_shadow_decryption: bool) -> Result<(), Error> {
|
||||
/// Delegate session to other node.
|
||||
pub fn delegate(&self, master: NodeId, version: H256, is_shadow_decryption: bool) -> Result<(), Error> {
|
||||
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
let mut data = self.data.lock();
|
||||
if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(false);
|
||||
self.core.cluster.send(&master, Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(DecryptionSessionDelegation {
|
||||
session: self.core.meta.id.clone().into(),
|
||||
sub_session: self.core.access_key.clone().into(),
|
||||
session_nonce: self.core.nonce,
|
||||
requestor_signature: data.consensus_session.consensus_job().executor().requester_signature()
|
||||
.expect("signature is passed to master node on creation; session can be delegated from master node only; qed")
|
||||
.clone().into(),
|
||||
version: version.into(),
|
||||
is_shadow_decryption: is_shadow_decryption,
|
||||
})))?;
|
||||
data.delegation_status = Some(DelegationStatus::DelegatedTo(master));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize decryption session on master node.
|
||||
pub fn initialize(&self, version: H256, is_shadow_decryption: bool) -> Result<(), Error> {
|
||||
debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id);
|
||||
|
||||
// check if version exists
|
||||
let key_version = match self.core.key_share.as_ref() {
|
||||
None => return Err(Error::InvalidMessage),
|
||||
Some(key_share) => key_share.version(&version).map_err(|e| Error::KeyStorage(e.into()))?,
|
||||
};
|
||||
|
||||
let mut data = self.data.lock();
|
||||
let non_isolated_nodes = self.core.cluster.nodes();
|
||||
data.consensus_session.consensus_job_mut().transport_mut().version = Some(version.clone());
|
||||
data.version = Some(version.clone());
|
||||
data.is_shadow_decryption = Some(is_shadow_decryption);
|
||||
data.consensus_session.initialize(self.core.key_share.id_numbers.keys().cloned().collect())?;
|
||||
data.consensus_session.initialize(key_version.id_numbers.keys()
|
||||
.filter(|n| non_isolated_nodes.contains(*n))
|
||||
.cloned()
|
||||
.chain(::std::iter::once(self.core.meta.self_node_id.clone()))
|
||||
.collect())?;
|
||||
|
||||
if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished {
|
||||
self.core.disseminate_jobs(&mut data.consensus_session, is_shadow_decryption)?;
|
||||
self.core.disseminate_jobs(&mut data.consensus_session, &version, is_shadow_decryption)?;
|
||||
|
||||
debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished);
|
||||
data.result = Some(Ok(data.consensus_session.result()?));
|
||||
self.core.completed.notify_all();
|
||||
let result = data.consensus_session.result()?;
|
||||
Self::set_decryption_result(&self.core, &mut *data, Ok(result));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -233,12 +277,58 @@ impl SessionImpl {
|
||||
&DecryptionMessage::PartialDecryption(ref message) =>
|
||||
self.on_partial_decryption(sender, message),
|
||||
&DecryptionMessage::DecryptionSessionError(ref message) =>
|
||||
self.on_session_error(sender, message),
|
||||
self.process_node_error(Some(&sender), Error::Io(message.error.clone())),
|
||||
&DecryptionMessage::DecryptionSessionCompleted(ref message) =>
|
||||
self.on_session_completed(sender, message),
|
||||
&DecryptionMessage::DecryptionSessionDelegation(ref message) =>
|
||||
self.on_session_delegated(sender, message),
|
||||
&DecryptionMessage::DecryptionSessionDelegationCompleted(ref message) =>
|
||||
self.on_session_delegation_completed(sender, message),
|
||||
}
|
||||
}
|
||||
|
||||
/// When session is delegated to this node.
|
||||
pub fn on_session_delegated(&self, sender: &NodeId, message: &DecryptionSessionDelegation) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(self.core.access_key == *message.sub_session);
|
||||
|
||||
{
|
||||
let mut data = self.data.lock();
|
||||
if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
data.consensus_session.consensus_job_mut().executor_mut().set_requester_signature(message.requestor_signature.clone().into());
|
||||
data.delegation_status = Some(DelegationStatus::DelegatedFrom(sender.clone(), message.session_nonce));
|
||||
}
|
||||
|
||||
self.initialize(message.version.clone().into(), message.is_shadow_decryption)
|
||||
}
|
||||
|
||||
/// When delegated session is completed on other node.
|
||||
pub fn on_session_delegation_completed(&self, sender: &NodeId, message: &DecryptionSessionDelegationCompleted) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(self.core.access_key == *message.sub_session);
|
||||
|
||||
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
let mut data = self.data.lock();
|
||||
match data.delegation_status.as_ref() {
|
||||
Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (),
|
||||
_ => return Err(Error::InvalidMessage),
|
||||
}
|
||||
|
||||
Self::set_decryption_result(&self.core, &mut *data, Ok(EncryptedDocumentKeyShadow {
|
||||
decrypted_secret: message.decrypted_secret.clone().into(),
|
||||
common_point: message.common_point.clone().map(Into::into),
|
||||
decrypt_shadows: message.decrypt_shadows.clone().map(Into::into),
|
||||
}));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When consensus-related message is received.
|
||||
pub fn on_consensus_message(&self, sender: &NodeId, message: &DecryptionConsensusMessage) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
@ -246,6 +336,14 @@ impl SessionImpl {
|
||||
|
||||
let mut data = self.data.lock();
|
||||
let is_establishing_consensus = data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus;
|
||||
if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message {
|
||||
let version = msg.version.clone().into();
|
||||
let has_key_share = self.core.key_share.as_ref()
|
||||
.map(|ks| ks.version(&version).is_ok())
|
||||
.unwrap_or(false);
|
||||
data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(has_key_share);
|
||||
data.version = Some(version);
|
||||
}
|
||||
data.consensus_session.on_consensus_message(&sender, &message.message)?;
|
||||
|
||||
let is_consensus_established = data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished;
|
||||
@ -253,9 +351,10 @@ impl SessionImpl {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone();
|
||||
let is_shadow_decryption = data.is_shadow_decryption
|
||||
.expect("we are on master node; on master node is_shadow_decryption is filled in initialize(); on_consensus_message follows initialize (state check in consensus_session); qed");
|
||||
self.core.disseminate_jobs(&mut data.consensus_session, is_shadow_decryption)
|
||||
self.core.disseminate_jobs(&mut data.consensus_session, &version, is_shadow_decryption)
|
||||
}
|
||||
|
||||
/// When partial decryption is requested.
|
||||
@ -264,9 +363,16 @@ impl SessionImpl {
|
||||
debug_assert!(self.core.access_key == *message.sub_session);
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
let key_share = match self.core.key_share.as_ref() {
|
||||
None => return Err(Error::InvalidMessage),
|
||||
Some(key_share) => key_share,
|
||||
};
|
||||
|
||||
let mut data = self.data.lock();
|
||||
let key_version = key_share.version(data.version.as_ref().ok_or(Error::InvalidMessage)?)
|
||||
.map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
||||
let requester = data.consensus_session.consensus_job().executor().requester()?.ok_or(Error::InvalidStateForRequest)?.clone();
|
||||
let decryption_job = DecryptionJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.access_key.clone(), requester, self.core.key_share.clone())?;
|
||||
let decryption_job = DecryptionJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.access_key.clone(), requester, key_share.clone(), key_version)?;
|
||||
let decryption_transport = self.core.decryption_transport();
|
||||
|
||||
data.consensus_session.on_job_request(&sender, PartialDecryptionRequest {
|
||||
@ -302,8 +408,8 @@ impl SessionImpl {
|
||||
})))?;
|
||||
}
|
||||
|
||||
data.result = Some(Ok(data.consensus_session.result()?));
|
||||
self.core.completed.notify_all();
|
||||
let result = data.consensus_session.result()?;
|
||||
Self::set_decryption_result(&self.core, &mut *data, Ok(result));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -317,14 +423,16 @@ impl SessionImpl {
|
||||
self.data.lock().consensus_session.on_session_completed(sender)
|
||||
}
|
||||
|
||||
/// When error has occured on another node.
|
||||
pub fn on_session_error(&self, sender: &NodeId, message: &DecryptionSessionError) -> Result<(), Error> {
|
||||
self.process_node_error(Some(&sender), &message.error)
|
||||
/// Process error from the other node.
|
||||
fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
let is_self_node_error = node.map(|n| n == &self.core.meta.self_node_id).unwrap_or(false);
|
||||
// error is always fatal if coming from this node
|
||||
if is_self_node_error {
|
||||
Self::set_decryption_result(&self.core, &mut *data, Err(error.clone()));
|
||||
return Err(error);
|
||||
}
|
||||
|
||||
/// Process error from the other node.
|
||||
fn process_node_error(&self, node: Option<&NodeId>, error: &String) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
match {
|
||||
match node {
|
||||
Some(node) => data.consensus_session.on_node_error(node),
|
||||
@ -333,15 +441,15 @@ impl SessionImpl {
|
||||
} {
|
||||
Ok(false) => Ok(()),
|
||||
Ok(true) => {
|
||||
let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone();
|
||||
let is_shadow_decryption = data.is_shadow_decryption.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when is_shadow_decryption.is_some(); qed");
|
||||
let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, is_shadow_decryption);
|
||||
let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, &version, is_shadow_decryption);
|
||||
match disseminate_result {
|
||||
Ok(()) => Ok(()),
|
||||
Err(err) => {
|
||||
warn!("{}: decryption session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
||||
|
||||
data.result = Some(Err(err.clone()));
|
||||
self.core.completed.notify_all();
|
||||
Self::set_decryption_result(&self.core, &mut *data, Err(err.clone()));
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
@ -349,29 +457,92 @@ impl SessionImpl {
|
||||
Err(err) => {
|
||||
warn!("{}: decryption session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
||||
|
||||
data.result = Some(Err(err.clone()));
|
||||
self.core.completed.notify_all();
|
||||
Self::set_decryption_result(&self.core, &mut *data, Err(err.clone()));
|
||||
Err(err)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Set decryption result.
|
||||
fn set_decryption_result(core: &SessionCore, data: &mut SessionData, result: Result<EncryptedDocumentKeyShadow, Error>) {
|
||||
if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() {
|
||||
// error means can't communicate => ignore it
|
||||
let _ = match result.as_ref() {
|
||||
Ok(document_key) => core.cluster.send(&master, Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted(DecryptionSessionDelegationCompleted {
|
||||
session: core.meta.id.clone().into(),
|
||||
sub_session: core.access_key.clone().into(),
|
||||
session_nonce: nonce,
|
||||
decrypted_secret: document_key.decrypted_secret.clone().into(),
|
||||
common_point: document_key.common_point.clone().map(Into::into),
|
||||
decrypt_shadows: document_key.decrypt_shadows.clone(),
|
||||
}))),
|
||||
Err(error) => core.cluster.send(&master, Message::Decryption(DecryptionMessage::DecryptionSessionError(DecryptionSessionError {
|
||||
session: core.meta.id.clone().into(),
|
||||
sub_session: core.access_key.clone().into(),
|
||||
session_nonce: nonce,
|
||||
error: error.clone().into(),
|
||||
}))),
|
||||
};
|
||||
}
|
||||
|
||||
data.result = Some(result);
|
||||
core.completed.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
impl ClusterSession for SessionImpl {
|
||||
type Id = SessionIdWithSubSession;
|
||||
|
||||
fn type_name() -> &'static str {
|
||||
"decryption"
|
||||
}
|
||||
|
||||
fn id(&self) -> SessionIdWithSubSession {
|
||||
SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone())
|
||||
}
|
||||
|
||||
fn is_finished(&self) -> bool {
|
||||
let data = self.data.lock();
|
||||
data.consensus_session.state() == ConsensusSessionState::Failed
|
||||
|| data.consensus_session.state() == ConsensusSessionState::Finished
|
||||
self.data.lock().result.is_some()
|
||||
}
|
||||
|
||||
fn on_node_timeout(&self, node: &NodeId) {
|
||||
// ignore error, only state matters
|
||||
let _ = self.process_node_error(Some(node), &Error::NodeDisconnected.into());
|
||||
let _ = self.process_node_error(Some(node), Error::NodeDisconnected);
|
||||
}
|
||||
|
||||
fn on_session_timeout(&self) {
|
||||
// ignore error, only state matters
|
||||
let _ = self.process_node_error(None, &Error::NodeDisconnected.into());
|
||||
let _ = self.process_node_error(None, Error::NodeDisconnected);
|
||||
}
|
||||
|
||||
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||
let is_fatal = self.process_node_error(Some(node), error.clone()).is_err();
|
||||
let is_this_node_error = *node == self.core.meta.self_node_id;
|
||||
if is_fatal || is_this_node_error {
|
||||
// error in signing session is non-fatal, if occurs on slave node
|
||||
// => either respond with error
|
||||
// => or broadcast error
|
||||
let message = Message::Decryption(DecryptionMessage::DecryptionSessionError(DecryptionSessionError {
|
||||
session: self.core.meta.id.clone().into(),
|
||||
sub_session: self.core.access_key.clone().into(),
|
||||
session_nonce: self.core.nonce,
|
||||
error: error.clone().into(),
|
||||
}));
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id {
|
||||
self.core.cluster.broadcast(message)
|
||||
} else {
|
||||
self.core.cluster.send(&self.core.meta.master_node_id, message)
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||
match *message {
|
||||
Message::Decryption(ref message) => self.process_message(sender, message),
|
||||
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -398,9 +569,15 @@ impl SessionCore {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn disseminate_jobs(&self, consensus_session: &mut DecryptionConsensusSession, is_shadow_decryption: bool) -> Result<(), Error> {
|
||||
pub fn disseminate_jobs(&self, consensus_session: &mut DecryptionConsensusSession, version: &H256, is_shadow_decryption: bool) -> Result<(), Error> {
|
||||
let key_share = match self.key_share.as_ref() {
|
||||
None => return Err(Error::InvalidMessage),
|
||||
Some(key_share) => key_share,
|
||||
};
|
||||
|
||||
let key_version = key_share.version(version).map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
||||
let requester = consensus_session.consensus_job().executor().requester()?.ok_or(Error::InvalidStateForRequest)?.clone();
|
||||
let decryption_job = DecryptionJob::new_on_master(self.meta.self_node_id.clone(), self.access_key.clone(), requester, self.key_share.clone(), is_shadow_decryption)?;
|
||||
let decryption_job = DecryptionJob::new_on_master(self.meta.self_node_id.clone(), self.access_key.clone(), requester, key_share.clone(), key_version, is_shadow_decryption)?;
|
||||
consensus_session.disseminate_jobs(decryption_job, self.decryption_transport())
|
||||
}
|
||||
}
|
||||
@ -410,12 +587,15 @@ impl JobTransport for DecryptionConsensusTransport {
|
||||
type PartialJobResponse=bool;
|
||||
|
||||
fn send_partial_request(&self, node: &NodeId, request: Signature) -> Result<(), Error> {
|
||||
let version = self.version.as_ref()
|
||||
.expect("send_partial_request is called on initialized master node only; version is filled in before initialization starts on master node; qed");
|
||||
self.cluster.send(node, Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(DecryptionConsensusMessage {
|
||||
session: self.id.clone().into(),
|
||||
sub_session: self.access_key.clone().into(),
|
||||
session_nonce: self.nonce,
|
||||
message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||
requestor_signature: request.into(),
|
||||
version: version.clone().into(),
|
||||
})
|
||||
})))
|
||||
}
|
||||
@ -459,38 +639,13 @@ impl JobTransport for DecryptionJobTransport {
|
||||
}
|
||||
}
|
||||
|
||||
impl DecryptionSessionId {
|
||||
/// Create new decryption session Id.
|
||||
pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self {
|
||||
DecryptionSessionId {
|
||||
id: session_id,
|
||||
access_key: sub_session_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for DecryptionSessionId {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for DecryptionSessionId {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match self.id.cmp(&other.id) {
|
||||
Ordering::Equal => self.access_key.cmp(&other.access_key),
|
||||
r @ _ => r,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::collections::BTreeMap;
|
||||
use acl_storage::DummyAclStorage;
|
||||
use ethkey::{self, KeyPair, Random, Generator, Public, Secret};
|
||||
use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error, EncryptedDocumentKeyShadow, SessionMeta};
|
||||
use key_server_cluster::{NodeId, DocumentKeyShare, DocumentKeyShareVersion, SessionId, Error, EncryptedDocumentKeyShadow, SessionMeta};
|
||||
use key_server_cluster::cluster::tests::DummyCluster;
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::decryption_session::{SessionImpl, SessionParams};
|
||||
@ -528,11 +683,13 @@ mod tests {
|
||||
let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 3,
|
||||
id_numbers: id_numbers.clone().into_iter().collect(),
|
||||
secret_share: secret_shares[i].clone(),
|
||||
polynom1: Vec::new(),
|
||||
common_point: Some(common_point.clone()),
|
||||
encrypted_point: Some(encrypted_point.clone()),
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
hash: Default::default(),
|
||||
id_numbers: id_numbers.clone().into_iter().collect(),
|
||||
secret_share: secret_shares[i].clone(),
|
||||
}],
|
||||
}).collect();
|
||||
let acl_storages: Vec<_> = (0..5).map(|_| Arc::new(DummyAclStorage::default())).collect();
|
||||
let clusters: Vec<_> = (0..5).map(|i| {
|
||||
@ -552,7 +709,7 @@ mod tests {
|
||||
threshold: encrypted_datas[i].threshold,
|
||||
},
|
||||
access_key: access_key.clone(),
|
||||
key_share: encrypted_datas[i].clone(),
|
||||
key_share: Some(encrypted_datas[i].clone()),
|
||||
acl_storage: acl_storages[i].clone(),
|
||||
cluster: clusters[i].clone(),
|
||||
nonce: 0,
|
||||
@ -594,15 +751,17 @@ mod tests {
|
||||
threshold: 0,
|
||||
},
|
||||
access_key: Random.generate().unwrap().secret().clone(),
|
||||
key_share: DocumentKeyShare {
|
||||
key_share: Some(DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 0,
|
||||
id_numbers: nodes,
|
||||
secret_share: Random.generate().unwrap().secret().clone(),
|
||||
polynom1: Vec::new(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
},
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
hash: Default::default(),
|
||||
id_numbers: nodes,
|
||||
secret_share: Random.generate().unwrap().secret().clone(),
|
||||
}],
|
||||
}),
|
||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||
nonce: 0,
|
||||
@ -613,12 +772,9 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fails_to_construct_if_not_a_part_of_cluster() {
|
||||
let mut nodes = BTreeMap::new();
|
||||
fn fails_to_initialize_if_does_not_have_a_share() {
|
||||
let self_node_id = Random.generate().unwrap().public().clone();
|
||||
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
||||
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
||||
match SessionImpl::new(SessionParams {
|
||||
let session = SessionImpl::new(SessionParams {
|
||||
meta: SessionMeta {
|
||||
id: SessionId::default(),
|
||||
self_node_id: self_node_id.clone(),
|
||||
@ -626,31 +782,21 @@ mod tests {
|
||||
threshold: 0,
|
||||
},
|
||||
access_key: Random.generate().unwrap().secret().clone(),
|
||||
key_share: DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 0,
|
||||
id_numbers: nodes,
|
||||
secret_share: Random.generate().unwrap().secret().clone(),
|
||||
polynom1: Vec::new(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
},
|
||||
key_share: None,
|
||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||
nonce: 0,
|
||||
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) {
|
||||
Err(Error::InvalidNodesConfiguration) => (),
|
||||
_ => panic!("unexpected"),
|
||||
}
|
||||
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())).unwrap();
|
||||
assert_eq!(session.initialize(Default::default(), false), Err(Error::InvalidMessage));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fails_to_construct_if_threshold_is_wrong() {
|
||||
fn fails_to_initialize_if_threshold_is_wrong() {
|
||||
let mut nodes = BTreeMap::new();
|
||||
let self_node_id = Random.generate().unwrap().public().clone();
|
||||
nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone());
|
||||
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
||||
match SessionImpl::new(SessionParams {
|
||||
let session = SessionImpl::new(SessionParams {
|
||||
meta: SessionMeta {
|
||||
id: SessionId::default(),
|
||||
self_node_id: self_node_id.clone(),
|
||||
@ -658,41 +804,42 @@ mod tests {
|
||||
threshold: 2,
|
||||
},
|
||||
access_key: Random.generate().unwrap().secret().clone(),
|
||||
key_share: DocumentKeyShare {
|
||||
key_share: Some(DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 2,
|
||||
id_numbers: nodes,
|
||||
secret_share: Random.generate().unwrap().secret().clone(),
|
||||
polynom1: Vec::new(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
},
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
hash: Default::default(),
|
||||
id_numbers: nodes,
|
||||
secret_share: Random.generate().unwrap().secret().clone(),
|
||||
}],
|
||||
}),
|
||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||
nonce: 0,
|
||||
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) {
|
||||
Err(Error::InvalidThreshold) => (),
|
||||
_ => panic!("unexpected"),
|
||||
}
|
||||
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())).unwrap();
|
||||
assert_eq!(session.initialize(Default::default(), false), Err(Error::ConsensusUnreachable));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fails_to_initialize_when_already_initialized() {
|
||||
let (_, _, _, sessions) = prepare_decryption_sessions();
|
||||
assert_eq!(sessions[0].initialize(false).unwrap(), ());
|
||||
assert_eq!(sessions[0].initialize(false).unwrap_err(), Error::InvalidStateForRequest);
|
||||
assert_eq!(sessions[0].initialize(Default::default(), false).unwrap(), ());
|
||||
assert_eq!(sessions[0].initialize(Default::default(), false).unwrap_err(), Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fails_to_accept_initialization_when_already_initialized() {
|
||||
let (_, _, _, sessions) = prepare_decryption_sessions();
|
||||
assert_eq!(sessions[0].initialize(false).unwrap(), ());
|
||||
assert_eq!(sessions[0].initialize(Default::default(), false).unwrap(), ());
|
||||
assert_eq!(sessions[0].on_consensus_message(sessions[1].node(), &message::DecryptionConsensusMessage {
|
||||
session: SessionId::default().into(),
|
||||
sub_session: sessions[0].access_key().clone().into(),
|
||||
session_nonce: 0,
|
||||
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
||||
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
version: Default::default(),
|
||||
}),
|
||||
}).unwrap_err(), Error::InvalidMessage);
|
||||
}
|
||||
@ -706,6 +853,7 @@ mod tests {
|
||||
session_nonce: 0,
|
||||
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
||||
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
version: Default::default(),
|
||||
}),
|
||||
}).unwrap(), ());
|
||||
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node(), &message::RequestPartialDecryption {
|
||||
@ -727,6 +875,7 @@ mod tests {
|
||||
session_nonce: 0,
|
||||
message: message::ConsensusMessage::InitializeConsensusSession(message::InitializeConsensusSession {
|
||||
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
version: Default::default(),
|
||||
}),
|
||||
}).unwrap(), ());
|
||||
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node(), &message::RequestPartialDecryption {
|
||||
@ -755,7 +904,7 @@ mod tests {
|
||||
#[test]
|
||||
fn fails_to_accept_partial_decrypt_twice() {
|
||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||
sessions[0].initialize(false).unwrap();
|
||||
sessions[0].initialize(Default::default(), false).unwrap();
|
||||
|
||||
let mut pd_from = None;
|
||||
let mut pd_msg = None;
|
||||
@ -783,7 +932,7 @@ mod tests {
|
||||
#[test]
|
||||
fn node_is_marked_rejected_when_timed_out_during_initialization_confirmation() {
|
||||
let (_, _, _, sessions) = prepare_decryption_sessions();
|
||||
sessions[0].initialize(false).unwrap();
|
||||
sessions[0].initialize(Default::default(), false).unwrap();
|
||||
|
||||
// 1 node disconnects => we still can recover secret
|
||||
sessions[0].on_node_timeout(sessions[1].node());
|
||||
@ -801,7 +950,7 @@ mod tests {
|
||||
let key_pair = Random.generate().unwrap();
|
||||
|
||||
acl_storages[1].prohibit(key_pair.public().clone(), SessionId::default());
|
||||
sessions[0].initialize(false).unwrap();
|
||||
sessions[0].initialize(Default::default(), false).unwrap();
|
||||
|
||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
||||
|
||||
@ -813,7 +962,7 @@ mod tests {
|
||||
#[test]
|
||||
fn session_does_not_fail_if_requested_node_disconnects() {
|
||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||
sessions[0].initialize(false).unwrap();
|
||||
sessions[0].initialize(Default::default(), false).unwrap();
|
||||
|
||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
||||
|
||||
@ -829,7 +978,7 @@ mod tests {
|
||||
#[test]
|
||||
fn session_does_not_fail_if_node_with_shadow_point_disconnects() {
|
||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||
sessions[0].initialize(false).unwrap();
|
||||
sessions[0].initialize(Default::default(), false).unwrap();
|
||||
|
||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults
|
||||
&& sessions[0].data.lock().consensus_session.computation_job().responses().len() == 2).unwrap();
|
||||
@ -846,7 +995,7 @@ mod tests {
|
||||
#[test]
|
||||
fn session_restarts_if_confirmed_node_disconnects() {
|
||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||
sessions[0].initialize(false).unwrap();
|
||||
sessions[0].initialize(Default::default(), false).unwrap();
|
||||
|
||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
||||
|
||||
@ -861,7 +1010,7 @@ mod tests {
|
||||
#[test]
|
||||
fn session_does_not_fail_if_non_master_node_disconnects_from_non_master_node() {
|
||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||
sessions[0].initialize(false).unwrap();
|
||||
sessions[0].initialize(Default::default(), false).unwrap();
|
||||
|
||||
do_messages_exchange_until(&clusters, &sessions, |_, _, _| sessions[0].state() == ConsensusSessionState::WaitingForPartialResults).unwrap();
|
||||
|
||||
@ -876,7 +1025,7 @@ mod tests {
|
||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||
|
||||
// now let's try to do a decryption
|
||||
sessions[0].initialize(false).unwrap();
|
||||
sessions[0].initialize(Default::default(), false).unwrap();
|
||||
|
||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||
|
||||
@ -898,7 +1047,7 @@ mod tests {
|
||||
let (key_pair, clusters, _, sessions) = prepare_decryption_sessions();
|
||||
|
||||
// now let's try to do a decryption
|
||||
sessions[0].initialize(true).unwrap();
|
||||
sessions[0].initialize(Default::default(), true).unwrap();
|
||||
|
||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||
|
||||
@ -929,7 +1078,7 @@ mod tests {
|
||||
let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions();
|
||||
|
||||
// now let's try to do a decryption
|
||||
sessions[0].initialize(false).unwrap();
|
||||
sessions[0].initialize(Default::default(), false).unwrap();
|
||||
|
||||
// we need 4 out of 5 nodes to agree to do a decryption
|
||||
// let's say that 2 of these nodes are disagree
|
||||
@ -952,7 +1101,7 @@ mod tests {
|
||||
acl_storages[0].prohibit(key_pair.public().clone(), SessionId::default());
|
||||
|
||||
// now let's try to do a decryption
|
||||
sessions[0].initialize(false).unwrap();
|
||||
sessions[0].initialize(Default::default(), false).unwrap();
|
||||
|
||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||
|
||||
@ -979,4 +1128,52 @@ mod tests {
|
||||
}
|
||||
)), Err(Error::ReplayProtection));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decryption_works_when_delegated_to_other_node() {
|
||||
let (_, clusters, _, mut sessions) = prepare_decryption_sessions();
|
||||
|
||||
// let's say node1 doesn't have a share && delegates decryption request to node0
|
||||
// initially session is created on node1 => node1 is master for itself, but for other nodes node0 is still master
|
||||
sessions[1].core.meta.master_node_id = sessions[1].core.meta.self_node_id.clone();
|
||||
sessions[1].data.lock().consensus_session.consensus_job_mut().executor_mut().set_requester_signature(
|
||||
sessions[0].data.lock().consensus_session.consensus_job().executor().requester_signature().unwrap().clone()
|
||||
);
|
||||
|
||||
// now let's try to do a decryption
|
||||
sessions[1].delegate(sessions[0].core.meta.self_node_id.clone(), Default::default(), false).unwrap();
|
||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||
|
||||
// now check that:
|
||||
// 1) 4 of 5 sessions are in Finished state
|
||||
assert_eq!(sessions.iter().filter(|s| s.state() == ConsensusSessionState::Finished).count(), 5);
|
||||
// 2) 1 session has decrypted key value
|
||||
assert_eq!(sessions[1].decrypted_secret().unwrap().unwrap(), EncryptedDocumentKeyShadow {
|
||||
decrypted_secret: SECRET_PLAIN.into(),
|
||||
common_point: None,
|
||||
decrypt_shadows: None,
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decryption_works_when_share_owners_are_isolated() {
|
||||
let (_, clusters, _, sessions) = prepare_decryption_sessions();
|
||||
|
||||
// we need 4 out of 5 nodes to agree to do a decryption
|
||||
// let's say that 1 of these nodes (master) is isolated
|
||||
let isolated_node_id = sessions[4].core.meta.self_node_id.clone();
|
||||
for cluster in &clusters {
|
||||
cluster.remove_node(&isolated_node_id);
|
||||
}
|
||||
|
||||
// now let's try to do a decryption
|
||||
sessions[0].initialize(Default::default(), false).unwrap();
|
||||
do_messages_exchange(&clusters, &sessions).unwrap();
|
||||
|
||||
assert_eq!(sessions[0].decrypted_secret().unwrap().unwrap(), EncryptedDocumentKeyShadow {
|
||||
decrypted_secret: SECRET_PLAIN.into(),
|
||||
common_point: None,
|
||||
decrypt_shadows: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ pub struct SessionImpl {
|
||||
/// Public identifier of this node.
|
||||
self_node_id: NodeId,
|
||||
/// Encrypted data.
|
||||
encrypted_data: DocumentKeyShare,
|
||||
encrypted_data: Option<DocumentKeyShare>,
|
||||
/// Key storage.
|
||||
key_storage: Arc<KeyStorage>,
|
||||
/// Cluster which allows this node to send messages to other nodes in the cluster.
|
||||
@ -68,7 +68,7 @@ pub struct SessionParams {
|
||||
/// Id of node, on which this session is running.
|
||||
pub self_node_id: Public,
|
||||
/// Encrypted data (result of running generation_session::SessionImpl).
|
||||
pub encrypted_data: DocumentKeyShare,
|
||||
pub encrypted_data: Option<DocumentKeyShare>,
|
||||
/// Key storage.
|
||||
pub key_storage: Arc<KeyStorage>,
|
||||
/// Cluster
|
||||
@ -115,7 +115,7 @@ pub enum SessionState {
|
||||
impl SessionImpl {
|
||||
/// Create new encryption session.
|
||||
pub fn new(params: SessionParams) -> Result<Self, Error> {
|
||||
check_encrypted_data(¶ms.self_node_id, ¶ms.encrypted_data)?;
|
||||
check_encrypted_data(¶ms.encrypted_data)?;
|
||||
|
||||
Ok(SessionImpl {
|
||||
id: params.id,
|
||||
@ -147,31 +147,31 @@ impl SessionImpl {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
// update state
|
||||
data.state = SessionState::WaitingForInitializationConfirm;
|
||||
data.nodes.extend(self.cluster.nodes().into_iter().map(|n| (n, NodeData {
|
||||
initialization_confirmed: &n == self.node(),
|
||||
})));
|
||||
|
||||
// TODO: id signature is not enough here, as it was already used in key generation
|
||||
// TODO: there could be situation when some nodes have failed to store encrypted data
|
||||
// => potential problems during restore. some confirmation step is needed (2pc)?
|
||||
// save encryption data
|
||||
if let Some(mut encrypted_data) = self.encrypted_data.clone() {
|
||||
// check that the requester is the author of the encrypted data
|
||||
let requestor_public = ethkey::recover(&requestor_signature, &self.id)?;
|
||||
if self.encrypted_data.author != requestor_public {
|
||||
if encrypted_data.author != requestor_public {
|
||||
return Err(Error::AccessDenied);
|
||||
}
|
||||
|
||||
// update state
|
||||
data.state = SessionState::WaitingForInitializationConfirm;
|
||||
for node_id in self.encrypted_data.id_numbers.keys() {
|
||||
data.nodes.insert(node_id.clone(), NodeData {
|
||||
initialization_confirmed: node_id == self.node(),
|
||||
});
|
||||
}
|
||||
|
||||
// TODO: there could be situation when some nodes have failed to store encrypted data
|
||||
// => potential problems during restore. some confirmation step is needed?
|
||||
// save encryption data
|
||||
let mut encrypted_data = self.encrypted_data.clone();
|
||||
encrypted_data.common_point = Some(common_point.clone());
|
||||
encrypted_data.encrypted_point = Some(encrypted_point.clone());
|
||||
self.key_storage.update(self.id.clone(), encrypted_data)
|
||||
.map_err(|e| Error::KeyStorage(e.into()))?;
|
||||
}
|
||||
|
||||
// start initialization
|
||||
if self.encrypted_data.id_numbers.len() > 1 {
|
||||
if data.nodes.len() > 1 {
|
||||
self.cluster.broadcast(Message::Encryption(EncryptionMessage::InitializeEncryptionSession(InitializeEncryptionSession {
|
||||
session: self.id.clone().into(),
|
||||
session_nonce: self.nonce,
|
||||
@ -193,8 +193,6 @@ impl SessionImpl {
|
||||
debug_assert!(self.id == *message.session);
|
||||
debug_assert!(&sender != self.node());
|
||||
|
||||
self.check_nonce(message.session_nonce)?;
|
||||
|
||||
let mut data = self.data.lock();
|
||||
|
||||
// check state
|
||||
@ -203,17 +201,18 @@ impl SessionImpl {
|
||||
}
|
||||
|
||||
// check that the requester is the author of the encrypted data
|
||||
if let Some(mut encrypted_data) = self.encrypted_data.clone() {
|
||||
let requestor_public = ethkey::recover(&message.requestor_signature.clone().into(), &self.id)?;
|
||||
if self.encrypted_data.author != requestor_public {
|
||||
if encrypted_data.author != requestor_public {
|
||||
return Err(Error::AccessDenied);
|
||||
}
|
||||
|
||||
// save encryption data
|
||||
let mut encrypted_data = self.encrypted_data.clone();
|
||||
encrypted_data.common_point = Some(message.common_point.clone().into());
|
||||
encrypted_data.encrypted_point = Some(message.encrypted_point.clone().into());
|
||||
self.key_storage.update(self.id.clone(), encrypted_data)
|
||||
.map_err(|e| Error::KeyStorage(e.into()))?;
|
||||
}
|
||||
|
||||
// update state
|
||||
data.state = SessionState::Finished;
|
||||
@ -230,8 +229,6 @@ impl SessionImpl {
|
||||
debug_assert!(self.id == *message.session);
|
||||
debug_assert!(&sender != self.node());
|
||||
|
||||
self.check_nonce(message.session_nonce)?;
|
||||
|
||||
let mut data = self.data.lock();
|
||||
debug_assert!(data.nodes.contains_key(&sender));
|
||||
|
||||
@ -250,32 +247,19 @@ impl SessionImpl {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When error has occured on another node.
|
||||
pub fn on_session_error(&self, sender: &NodeId, message: &EncryptionSessionError) -> Result<(), Error> {
|
||||
self.check_nonce(message.session_nonce)?;
|
||||
|
||||
let mut data = self.data.lock();
|
||||
|
||||
warn!("{}: encryption session failed with error: {} from {}", self.node(), message.error, sender);
|
||||
|
||||
data.state = SessionState::Failed;
|
||||
data.result = Some(Err(Error::Io(message.error.clone())));
|
||||
self.completed.notify_all();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check session nonce.
|
||||
fn check_nonce(&self, message_session_nonce: u64) -> Result<(), Error> {
|
||||
match self.nonce == message_session_nonce {
|
||||
true => Ok(()),
|
||||
false => Err(Error::ReplayProtection),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ClusterSession for SessionImpl {
|
||||
type Id = SessionId;
|
||||
|
||||
fn type_name() -> &'static str {
|
||||
"encryption"
|
||||
}
|
||||
|
||||
fn id(&self) -> SessionId {
|
||||
self.id.clone()
|
||||
}
|
||||
|
||||
fn is_finished(&self) -> bool {
|
||||
let data = self.data.lock();
|
||||
data.state == SessionState::Failed
|
||||
@ -301,6 +285,47 @@ impl ClusterSession for SessionImpl {
|
||||
data.result = Some(Err(Error::NodeDisconnected));
|
||||
self.completed.notify_all();
|
||||
}
|
||||
|
||||
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||
// error in encryption session is considered fatal
|
||||
// => broadcast error if error occured on this node
|
||||
if *node == self.self_node_id {
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = self.cluster.broadcast(Message::Encryption(EncryptionMessage::EncryptionSessionError(EncryptionSessionError {
|
||||
session: self.id.clone().into(),
|
||||
session_nonce: self.nonce,
|
||||
error: error.clone().into(),
|
||||
})));
|
||||
}
|
||||
|
||||
let mut data = self.data.lock();
|
||||
|
||||
warn!("{}: encryption session failed with error: {} from {}", self.node(), error, node);
|
||||
|
||||
data.state = SessionState::Failed;
|
||||
data.result = Some(Err(error));
|
||||
self.completed.notify_all();
|
||||
}
|
||||
|
||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||
if Some(self.nonce) != message.session_nonce() {
|
||||
return Err(Error::ReplayProtection);
|
||||
}
|
||||
|
||||
match message {
|
||||
&Message::Encryption(ref message) => match message {
|
||||
&EncryptionMessage::InitializeEncryptionSession(ref message) =>
|
||||
self.on_initialize_session(sender.clone(), message),
|
||||
&EncryptionMessage::ConfirmEncryptionInitialization(ref message) =>
|
||||
self.on_confirm_initialization(sender.clone(), message),
|
||||
&EncryptionMessage::EncryptionSessionError(ref message) => {
|
||||
self.on_session_error(sender, Error::Io(message.error.clone().into()));
|
||||
Ok(())
|
||||
},
|
||||
},
|
||||
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Session for SessionImpl {
|
||||
@ -329,15 +354,13 @@ impl Debug for SessionImpl {
|
||||
}
|
||||
}
|
||||
|
||||
fn check_encrypted_data(self_node_id: &Public, encrypted_data: &DocumentKeyShare) -> Result<(), Error> {
|
||||
use key_server_cluster::generation_session::{check_cluster_nodes, check_threshold};
|
||||
|
||||
fn check_encrypted_data(encrypted_data: &Option<DocumentKeyShare>) -> Result<(), Error> {
|
||||
if let &Some(ref encrypted_data) = encrypted_data {
|
||||
// check that common_point and encrypted_point are still not set yet
|
||||
if encrypted_data.common_point.is_some() || encrypted_data.encrypted_point.is_some() {
|
||||
return Err(Error::CompletedSessionId);
|
||||
}
|
||||
|
||||
let nodes = encrypted_data.id_numbers.keys().cloned().collect();
|
||||
check_cluster_nodes(self_node_id, &nodes)?;
|
||||
check_threshold(encrypted_data.threshold, &nodes)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ use std::time;
|
||||
use std::sync::Arc;
|
||||
use parking_lot::{Condvar, Mutex};
|
||||
use ethkey::{Public, Secret};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare, DocumentKeyShareVersion};
|
||||
use key_server_cluster::math;
|
||||
use key_server_cluster::cluster::Cluster;
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
@ -291,8 +291,10 @@ impl SessionImpl {
|
||||
self.on_keys_dissemination(sender.clone(), message),
|
||||
&GenerationMessage::PublicKeyShare(ref message) =>
|
||||
self.on_public_key_share(sender.clone(), message),
|
||||
&GenerationMessage::SessionError(ref message) =>
|
||||
self.on_session_error(sender, message),
|
||||
&GenerationMessage::SessionError(ref message) => {
|
||||
self.on_session_error(sender, Error::Io(message.error.clone().into()));
|
||||
Ok(())
|
||||
},
|
||||
&GenerationMessage::SessionCompleted(ref message) =>
|
||||
self.on_session_completed(sender.clone(), message),
|
||||
}
|
||||
@ -504,11 +506,12 @@ impl SessionImpl {
|
||||
let encrypted_data = DocumentKeyShare {
|
||||
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
||||
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
||||
id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
||||
secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
||||
polynom1: data.polynom1.as_ref().expect("polynom1 is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
||||
common_point: None,
|
||||
encrypted_point: None,
|
||||
versions: vec![DocumentKeyShareVersion::new(
|
||||
data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
||||
data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
||||
)],
|
||||
};
|
||||
|
||||
if let Some(ref key_storage) = self.key_storage {
|
||||
@ -546,20 +549,6 @@ impl SessionImpl {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When error has occured on another node.
|
||||
pub fn on_session_error(&self, sender: &NodeId, message: &SessionError) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
|
||||
warn!("{}: generation session failed with error: {} from {}", self.node(), message.error, sender);
|
||||
|
||||
data.state = SessionState::Failed;
|
||||
data.key_share = Some(Err(Error::Io(message.error.clone())));
|
||||
data.joint_public_and_secret = Some(Err(Error::Io(message.error.clone())));
|
||||
self.completed.notify_all();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Complete initialization (when all other nodex has responded with confirmation)
|
||||
fn complete_initialization(&self, mut derived_point: Public) -> Result<(), Error> {
|
||||
// update point once again to make sure that derived point is not generated by last node
|
||||
@ -683,11 +672,12 @@ impl SessionImpl {
|
||||
let encrypted_data = DocumentKeyShare {
|
||||
author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(),
|
||||
threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"),
|
||||
id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
||||
secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
||||
polynom1: data.polynom1.as_ref().expect("polynom1 is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
||||
common_point: None,
|
||||
encrypted_point: None,
|
||||
versions: vec![DocumentKeyShareVersion::new(
|
||||
data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(),
|
||||
data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(),
|
||||
)],
|
||||
};
|
||||
|
||||
// if we are at the slave node - wait for session completion
|
||||
@ -725,6 +715,16 @@ impl SessionImpl {
|
||||
}
|
||||
|
||||
impl ClusterSession for SessionImpl {
|
||||
type Id = SessionId;
|
||||
|
||||
fn type_name() -> &'static str {
|
||||
"generation"
|
||||
}
|
||||
|
||||
fn id(&self) -> SessionId {
|
||||
self.id.clone()
|
||||
}
|
||||
|
||||
fn is_finished(&self) -> bool {
|
||||
let data = self.data.lock();
|
||||
data.state == SessionState::Failed
|
||||
@ -754,6 +754,32 @@ impl ClusterSession for SessionImpl {
|
||||
data.joint_public_and_secret = Some(Err(Error::NodeDisconnected));
|
||||
self.completed.notify_all();
|
||||
}
|
||||
|
||||
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||
// error in generation session is considered fatal
|
||||
// => broadcast error if error occured on this node
|
||||
if *node == self.self_node_id {
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = self.cluster.broadcast(Message::Generation(GenerationMessage::SessionError(SessionError {
|
||||
session: self.id.clone().into(),
|
||||
session_nonce: self.nonce,
|
||||
error: error.clone().into(),
|
||||
})));
|
||||
}
|
||||
|
||||
let mut data = self.data.lock();
|
||||
data.state = SessionState::Failed;
|
||||
data.key_share = Some(Err(error.clone()));
|
||||
data.joint_public_and_secret = Some(Err(error));
|
||||
self.completed.notify_all();
|
||||
}
|
||||
|
||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||
match *message {
|
||||
Message::Generation(ref message) => self.process_message(sender, message),
|
||||
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Session for SessionImpl {
|
||||
@ -852,8 +878,8 @@ pub mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::collections::{BTreeSet, BTreeMap, VecDeque};
|
||||
use tokio_core::reactor::Core;
|
||||
use ethkey::{Random, Generator, Public};
|
||||
use key_server_cluster::{NodeId, SessionId, Error, DummyKeyStorage};
|
||||
use ethkey::{Random, Generator, Public, KeyPair};
|
||||
use key_server_cluster::{NodeId, SessionId, Error, KeyStorage, DummyKeyStorage};
|
||||
use key_server_cluster::message::{self, Message, GenerationMessage};
|
||||
use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established};
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
@ -956,6 +982,26 @@ pub mod tests {
|
||||
let msg = self.take_message().unwrap();
|
||||
self.process_message(msg)
|
||||
}
|
||||
|
||||
pub fn compute_key_pair(&self, t: usize) -> KeyPair {
|
||||
let secret_shares = self.nodes.values()
|
||||
.map(|nd| nd.key_storage.get(&SessionId::default()).unwrap().unwrap().last_version().unwrap().secret_share.clone())
|
||||
.take(t + 1)
|
||||
.collect::<Vec<_>>();
|
||||
let secret_shares = secret_shares.iter().collect::<Vec<_>>();
|
||||
let id_numbers = self.nodes.iter()
|
||||
.map(|(n, nd)| nd.key_storage.get(&SessionId::default()).unwrap().unwrap().last_version().unwrap().id_numbers[n].clone())
|
||||
.take(t + 1)
|
||||
.collect::<Vec<_>>();
|
||||
let id_numbers = id_numbers.iter().collect::<Vec<_>>();
|
||||
let joint_secret1 = math::compute_joint_secret_from_shares(t, &secret_shares, &id_numbers).unwrap();
|
||||
|
||||
let secret_values: Vec<_> = self.nodes.values().map(|s| s.session.joint_public_and_secret().unwrap().unwrap().1).collect();
|
||||
let joint_secret2 = math::compute_joint_secret(secret_values.iter()).unwrap();
|
||||
assert_eq!(joint_secret1, joint_secret2);
|
||||
|
||||
KeyPair::from_secret(joint_secret1).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
fn make_simple_cluster(threshold: usize, num_nodes: usize) -> Result<(SessionId, NodeId, NodeId, MessageLoop), Error> {
|
||||
|
@ -21,19 +21,17 @@ use ethkey::{Public, Secret, Signature};
|
||||
use bigint::hash::H256;
|
||||
use key_server_cluster::{Error, NodeId, SessionId, SessionMeta, AclStorage, DocumentKeyShare};
|
||||
use key_server_cluster::cluster::{Cluster};
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::cluster_sessions::{SessionIdWithSubSession, ClusterSession};
|
||||
use key_server_cluster::generation_session::{SessionImpl as GenerationSession, SessionParams as GenerationSessionParams,
|
||||
Session as GenerationSessionApi, SessionState as GenerationSessionState};
|
||||
use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, SigningGenerationMessage,
|
||||
RequestPartialSignature, PartialSignature, SigningSessionCompleted, GenerationMessage, ConsensusMessage, SigningSessionError,
|
||||
InitializeConsensusSession, ConfirmConsensusInitialization};
|
||||
InitializeConsensusSession, ConfirmConsensusInitialization, SigningSessionDelegation, SigningSessionDelegationCompleted};
|
||||
use key_server_cluster::jobs::job_session::JobTransport;
|
||||
use key_server_cluster::jobs::key_access_job::KeyAccessJob;
|
||||
use key_server_cluster::jobs::signing_job::{PartialSigningRequest, PartialSigningResponse, SigningJob};
|
||||
use key_server_cluster::jobs::consensus_session::{ConsensusSessionParams, ConsensusSessionState, ConsensusSession};
|
||||
|
||||
pub use key_server_cluster::decryption_session::DecryptionSessionId as SigningSessionId;
|
||||
|
||||
/// Signing session API.
|
||||
pub trait Session: Send + Sync + 'static {
|
||||
/// Wait until session is completed. Returns signed message.
|
||||
@ -61,7 +59,7 @@ struct SessionCore {
|
||||
/// Signing session access key.
|
||||
pub access_key: Secret,
|
||||
/// Key share.
|
||||
pub key_share: DocumentKeyShare,
|
||||
pub key_share: Option<DocumentKeyShare>,
|
||||
/// Cluster which allows this node to send messages to other nodes in the cluster.
|
||||
pub cluster: Arc<Cluster>,
|
||||
/// Session-level nonce.
|
||||
@ -79,10 +77,14 @@ struct SessionData {
|
||||
pub state: SessionState,
|
||||
/// Message hash.
|
||||
pub message_hash: Option<H256>,
|
||||
/// Key version to use for decryption.
|
||||
pub version: Option<H256>,
|
||||
/// Consensus-based signing session.
|
||||
pub consensus_session: SigningConsensusSession,
|
||||
/// Session key generation session.
|
||||
pub generation_session: Option<GenerationSession>,
|
||||
/// Delegation status.
|
||||
pub delegation_status: Option<DelegationStatus>,
|
||||
/// Decryption result.
|
||||
pub result: Option<Result<(Secret, Secret), Error>>,
|
||||
}
|
||||
@ -106,7 +108,7 @@ pub struct SessionParams {
|
||||
/// Session access key.
|
||||
pub access_key: Secret,
|
||||
/// Key share.
|
||||
pub key_share: DocumentKeyShare,
|
||||
pub key_share: Option<DocumentKeyShare>,
|
||||
/// ACL storage.
|
||||
pub acl_storage: Arc<AclStorage>,
|
||||
/// Cluster
|
||||
@ -123,6 +125,8 @@ struct SigningConsensusTransport {
|
||||
access_key: Secret,
|
||||
/// Session-level nonce.
|
||||
nonce: u64,
|
||||
/// Selected key version (on master node).
|
||||
version: Option<H256>,
|
||||
/// Cluster.
|
||||
cluster: Arc<Cluster>,
|
||||
}
|
||||
@ -151,23 +155,24 @@ struct SigningJobTransport {
|
||||
cluster: Arc<Cluster>,
|
||||
}
|
||||
|
||||
/// Session delegation status.
|
||||
enum DelegationStatus {
|
||||
/// Delegated to other node.
|
||||
DelegatedTo(NodeId),
|
||||
/// Delegated from other node.
|
||||
DelegatedFrom(NodeId, u64),
|
||||
}
|
||||
|
||||
impl SessionImpl {
|
||||
/// Create new signing session.
|
||||
pub fn new(params: SessionParams, requester_signature: Option<Signature>) -> Result<Self, Error> {
|
||||
debug_assert_eq!(params.meta.threshold, params.key_share.threshold);
|
||||
debug_assert_eq!(params.meta.self_node_id == params.meta.master_node_id, requester_signature.is_some());
|
||||
|
||||
use key_server_cluster::generation_session::{check_cluster_nodes, check_threshold};
|
||||
|
||||
// check nodes and threshold
|
||||
let nodes = params.key_share.id_numbers.keys().cloned().collect();
|
||||
check_cluster_nodes(¶ms.meta.self_node_id, &nodes)?;
|
||||
check_threshold(params.key_share.threshold, &nodes)?;
|
||||
debug_assert_eq!(params.meta.threshold, params.key_share.as_ref().map(|ks| ks.threshold).unwrap_or_default());
|
||||
|
||||
let consensus_transport = SigningConsensusTransport {
|
||||
id: params.meta.id.clone(),
|
||||
access_key: params.access_key.clone(),
|
||||
nonce: params.nonce,
|
||||
version: None,
|
||||
cluster: params.cluster.clone(),
|
||||
};
|
||||
let consensus_session = ConsensusSession::new(ConsensusSessionParams {
|
||||
@ -191,8 +196,10 @@ impl SessionImpl {
|
||||
data: Mutex::new(SessionData {
|
||||
state: SessionState::ConsensusEstablishing,
|
||||
message_hash: None,
|
||||
version: None,
|
||||
consensus_session: consensus_session,
|
||||
generation_session: None,
|
||||
delegation_status: None,
|
||||
result: None,
|
||||
}),
|
||||
})
|
||||
@ -204,11 +211,53 @@ impl SessionImpl {
|
||||
self.data.lock().state
|
||||
}
|
||||
|
||||
/// Initialize signing session on master node.
|
||||
pub fn initialize(&self, message_hash: H256) -> Result<(), Error> {
|
||||
/// Delegate session to other node.
|
||||
pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> {
|
||||
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
let mut data = self.data.lock();
|
||||
if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(false);
|
||||
self.core.cluster.send(&master, Message::Signing(SigningMessage::SigningSessionDelegation(SigningSessionDelegation {
|
||||
session: self.core.meta.id.clone().into(),
|
||||
sub_session: self.core.access_key.clone().into(),
|
||||
session_nonce: self.core.nonce,
|
||||
requestor_signature: data.consensus_session.consensus_job().executor().requester_signature()
|
||||
.expect("signature is passed to master node on creation; session can be delegated from master node only; qed")
|
||||
.clone().into(),
|
||||
version: version.into(),
|
||||
message_hash: message_hash.into(),
|
||||
})))?;
|
||||
data.delegation_status = Some(DelegationStatus::DelegatedTo(master));
|
||||
Ok(())
|
||||
|
||||
}
|
||||
|
||||
/// Initialize signing session on master node.
|
||||
pub fn initialize(&self, version: H256, message_hash: H256) -> Result<(), Error> {
|
||||
debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id);
|
||||
|
||||
// check if version exists
|
||||
let key_version = match self.core.key_share.as_ref() {
|
||||
None => return Err(Error::InvalidMessage),
|
||||
Some(key_share) => key_share.version(&version).map_err(|e| Error::KeyStorage(e.into()))?,
|
||||
};
|
||||
|
||||
let mut data = self.data.lock();
|
||||
let non_isolated_nodes = self.core.cluster.nodes();
|
||||
data.consensus_session.consensus_job_mut().transport_mut().version = Some(version.clone());
|
||||
data.version = Some(version.clone());
|
||||
data.message_hash = Some(message_hash);
|
||||
data.consensus_session.initialize(self.core.key_share.id_numbers.keys().cloned().collect())?;
|
||||
data.consensus_session.initialize(key_version.id_numbers.keys()
|
||||
.filter(|n| non_isolated_nodes.contains(*n))
|
||||
.cloned()
|
||||
.chain(::std::iter::once(self.core.meta.self_node_id.clone()))
|
||||
.collect())?;
|
||||
|
||||
if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished {
|
||||
let generation_session = GenerationSession::new(GenerationSessionParams {
|
||||
@ -232,11 +281,11 @@ impl SessionImpl {
|
||||
data.generation_session = Some(generation_session);
|
||||
data.state = SessionState::SignatureComputing;
|
||||
|
||||
self.core.disseminate_jobs(&mut data.consensus_session, joint_public_and_secret.0, joint_public_and_secret.1, message_hash)?;
|
||||
self.core.disseminate_jobs(&mut data.consensus_session, &version, joint_public_and_secret.0, joint_public_and_secret.1, message_hash)?;
|
||||
|
||||
debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished);
|
||||
data.result = Some(Ok(data.consensus_session.result()?));
|
||||
self.core.completed.notify_all();
|
||||
let result = data.consensus_session.result()?;
|
||||
Self::set_signing_result(&self.core, &mut *data, Ok(result));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -258,12 +307,55 @@ impl SessionImpl {
|
||||
&SigningMessage::PartialSignature(ref message) =>
|
||||
self.on_partial_signature(sender, message),
|
||||
&SigningMessage::SigningSessionError(ref message) =>
|
||||
self.on_session_error(sender, message),
|
||||
self.process_node_error(Some(&sender), Error::Io(message.error.clone())),
|
||||
&SigningMessage::SigningSessionCompleted(ref message) =>
|
||||
self.on_session_completed(sender, message),
|
||||
&SigningMessage::SigningSessionDelegation(ref message) =>
|
||||
self.on_session_delegated(sender, message),
|
||||
&SigningMessage::SigningSessionDelegationCompleted(ref message) =>
|
||||
self.on_session_delegation_completed(sender, message),
|
||||
}
|
||||
}
|
||||
|
||||
/// When session is delegated to this node.
|
||||
pub fn on_session_delegated(&self, sender: &NodeId, message: &SigningSessionDelegation) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(self.core.access_key == *message.sub_session);
|
||||
|
||||
{
|
||||
let mut data = self.data.lock();
|
||||
if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization || data.delegation_status.is_some() {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
data.consensus_session.consensus_job_mut().executor_mut().set_requester_signature(message.requestor_signature.clone().into());
|
||||
data.delegation_status = Some(DelegationStatus::DelegatedFrom(sender.clone(), message.session_nonce));
|
||||
}
|
||||
|
||||
self.initialize(message.version.clone().into(), message.message_hash.clone().into())
|
||||
}
|
||||
|
||||
/// When delegated session is completed on other node.
|
||||
pub fn on_session_delegation_completed(&self, sender: &NodeId, message: &SigningSessionDelegationCompleted) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
debug_assert!(self.core.access_key == *message.sub_session);
|
||||
|
||||
if self.core.meta.master_node_id != self.core.meta.self_node_id {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
let mut data = self.data.lock();
|
||||
match data.delegation_status.as_ref() {
|
||||
Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (),
|
||||
_ => return Err(Error::InvalidMessage),
|
||||
}
|
||||
|
||||
Self::set_signing_result(&self.core, &mut *data, Ok((message.signature_c.clone().into(), message.signature_s.clone().into())));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
/// When consensus-related message is received.
|
||||
pub fn on_consensus_message(&self, sender: &NodeId, message: &SigningConsensusMessage) -> Result<(), Error> {
|
||||
debug_assert!(self.core.meta.id == *message.session);
|
||||
@ -272,6 +364,15 @@ impl SessionImpl {
|
||||
|
||||
let mut data = self.data.lock();
|
||||
let is_establishing_consensus = data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus;
|
||||
|
||||
if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message {
|
||||
let version = msg.version.clone().into();
|
||||
let has_key_share = self.core.key_share.as_ref()
|
||||
.map(|ks| ks.version(&version).is_ok())
|
||||
.unwrap_or(false);
|
||||
data.consensus_session.consensus_job_mut().executor_mut().set_has_key_share(has_key_share);
|
||||
data.version = Some(version);
|
||||
}
|
||||
data.consensus_session.on_consensus_message(&sender, &message.message)?;
|
||||
|
||||
let is_consensus_established = data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished;
|
||||
@ -283,6 +384,11 @@ impl SessionImpl {
|
||||
let mut other_consensus_group_nodes = consensus_group.clone();
|
||||
other_consensus_group_nodes.remove(&self.core.meta.self_node_id);
|
||||
|
||||
let key_share = match self.core.key_share.as_ref() {
|
||||
None => return Err(Error::InvalidMessage),
|
||||
Some(key_share) => key_share,
|
||||
};
|
||||
|
||||
let generation_session = GenerationSession::new(GenerationSessionParams {
|
||||
id: self.core.meta.id.clone(),
|
||||
self_node_id: self.core.meta.self_node_id.clone(),
|
||||
@ -295,7 +401,7 @@ impl SessionImpl {
|
||||
}),
|
||||
nonce: None,
|
||||
});
|
||||
generation_session.initialize(Public::default(), self.core.key_share.threshold, consensus_group)?;
|
||||
generation_session.initialize(Public::default(), key_share.threshold, consensus_group)?;
|
||||
data.generation_session = Some(generation_session);
|
||||
data.state = SessionState::SessionKeyGeneration;
|
||||
|
||||
@ -312,7 +418,10 @@ impl SessionImpl {
|
||||
|
||||
if let &GenerationMessage::InitializeSession(ref message) = &message.message {
|
||||
if &self.core.meta.master_node_id != sender {
|
||||
return Err(Error::InvalidMessage);
|
||||
match data.delegation_status.as_ref() {
|
||||
Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (),
|
||||
_ => return Err(Error::InvalidMessage),
|
||||
}
|
||||
}
|
||||
|
||||
let consensus_group: BTreeSet<NodeId> = message.nodes.keys().cloned().map(Into::into).collect();
|
||||
@ -351,13 +460,14 @@ impl SessionImpl {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone();
|
||||
let message_hash = data.message_hash
|
||||
.expect("we are on master node; on master node message_hash is filled in initialize(); on_generation_message follows initialize; qed");
|
||||
let joint_public_and_secret = data.generation_session.as_ref()
|
||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")
|
||||
.joint_public_and_secret()
|
||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?;
|
||||
self.core.disseminate_jobs(&mut data.consensus_session, joint_public_and_secret.0, joint_public_and_secret.1, message_hash)
|
||||
self.core.disseminate_jobs(&mut data.consensus_session, &version, joint_public_and_secret.0, joint_public_and_secret.1, message_hash)
|
||||
}
|
||||
|
||||
/// When partial signature is requested.
|
||||
@ -366,6 +476,11 @@ impl SessionImpl {
|
||||
debug_assert!(self.core.access_key == *message.sub_session);
|
||||
debug_assert!(sender != &self.core.meta.self_node_id);
|
||||
|
||||
let key_share = match self.core.key_share.as_ref() {
|
||||
None => return Err(Error::InvalidMessage),
|
||||
Some(key_share) => key_share,
|
||||
};
|
||||
|
||||
let mut data = self.data.lock();
|
||||
|
||||
if sender != &self.core.meta.master_node_id {
|
||||
@ -379,7 +494,9 @@ impl SessionImpl {
|
||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")
|
||||
.joint_public_and_secret()
|
||||
.expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?;
|
||||
let signing_job = SigningJob::new_on_slave(self.core.meta.self_node_id.clone(), self.core.key_share.clone(), joint_public_and_secret.0, joint_public_and_secret.1)?;
|
||||
let key_version = key_share.version(data.version.as_ref().ok_or(Error::InvalidMessage)?)
|
||||
.map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
||||
let signing_job = SigningJob::new_on_slave(self.core.meta.self_node_id.clone(), key_share.clone(), key_version, joint_public_and_secret.0, joint_public_and_secret.1)?;
|
||||
let signing_transport = self.core.signing_transport();
|
||||
|
||||
data.consensus_session.on_job_request(sender, PartialSigningRequest {
|
||||
@ -414,8 +531,8 @@ impl SessionImpl {
|
||||
})))?;
|
||||
}
|
||||
|
||||
data.result = Some(Ok(data.consensus_session.result()?));
|
||||
self.core.completed.notify_all();
|
||||
let result = data.consensus_session.result()?;
|
||||
Self::set_signing_result(&self.core, &mut *data, Ok(result));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -429,66 +546,130 @@ impl SessionImpl {
|
||||
self.data.lock().consensus_session.on_session_completed(sender)
|
||||
}
|
||||
|
||||
/// When error has occured on another node.
|
||||
pub fn on_session_error(&self, sender: &NodeId, message: &SigningSessionError) -> Result<(), Error> {
|
||||
self.process_node_error(Some(&sender), &message.error)
|
||||
/// Process error from the other node.
|
||||
fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
let is_self_node_error = node.map(|n| n == &self.core.meta.self_node_id).unwrap_or(false);
|
||||
// error is always fatal if coming from this node
|
||||
if is_self_node_error {
|
||||
Self::set_signing_result(&self.core, &mut *data, Err(error.clone()));
|
||||
return Err(error);
|
||||
}
|
||||
|
||||
/// Process error from the other node.
|
||||
fn process_node_error(&self, node: Option<&NodeId>, error: &String) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
match {
|
||||
match node {
|
||||
Some(node) => data.consensus_session.on_node_error(node),
|
||||
None => data.consensus_session.on_session_timeout(),
|
||||
}
|
||||
} {
|
||||
Ok(false) => Ok(()),
|
||||
Ok(false) => {
|
||||
Ok(())
|
||||
},
|
||||
Ok(true) => {
|
||||
let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone();
|
||||
let message_hash = data.message_hash.as_ref().cloned()
|
||||
.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed");
|
||||
let joint_public_and_secret = data.generation_session.as_ref()
|
||||
.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed")
|
||||
.joint_public_and_secret()
|
||||
.expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed")?;
|
||||
let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, joint_public_and_secret.0, joint_public_and_secret.1, message_hash);
|
||||
let disseminate_result = self.core.disseminate_jobs(&mut data.consensus_session, &version, joint_public_and_secret.0, joint_public_and_secret.1, message_hash);
|
||||
match disseminate_result {
|
||||
Ok(()) => Ok(()),
|
||||
Err(err) => {
|
||||
warn!("{}: signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
||||
|
||||
data.result = Some(Err(err.clone()));
|
||||
self.core.completed.notify_all();
|
||||
Self::set_signing_result(&self.core, &mut *data, Err(err.clone()));
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(err) => {
|
||||
warn!("{}: signing session failed with error: {:?} from {:?}", &self.core.meta.self_node_id, error, node);
|
||||
|
||||
data.result = Some(Err(err.clone()));
|
||||
self.core.completed.notify_all();
|
||||
Self::set_signing_result(&self.core, &mut *data, Err(err.clone()));
|
||||
Err(err)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Set signing session result.
|
||||
fn set_signing_result(core: &SessionCore, data: &mut SessionData, result: Result<(Secret, Secret), Error>) {
|
||||
if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() {
|
||||
// error means can't communicate => ignore it
|
||||
let _ = match result.as_ref() {
|
||||
Ok(signature) => core.cluster.send(&master, Message::Signing(SigningMessage::SigningSessionDelegationCompleted(SigningSessionDelegationCompleted {
|
||||
session: core.meta.id.clone().into(),
|
||||
sub_session: core.access_key.clone().into(),
|
||||
session_nonce: nonce,
|
||||
signature_c: signature.0.clone().into(),
|
||||
signature_s: signature.1.clone().into(),
|
||||
}))),
|
||||
Err(error) => core.cluster.send(&master, Message::Signing(SigningMessage::SigningSessionError(SigningSessionError {
|
||||
session: core.meta.id.clone().into(),
|
||||
sub_session: core.access_key.clone().into(),
|
||||
session_nonce: nonce,
|
||||
error: error.clone().into(),
|
||||
}))),
|
||||
};
|
||||
}
|
||||
|
||||
data.result = Some(result);
|
||||
core.completed.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
impl ClusterSession for SessionImpl {
|
||||
type Id = SessionIdWithSubSession;
|
||||
|
||||
fn type_name() -> &'static str {
|
||||
"signing"
|
||||
}
|
||||
|
||||
fn id(&self) -> SessionIdWithSubSession {
|
||||
SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone())
|
||||
}
|
||||
|
||||
fn is_finished(&self) -> bool {
|
||||
let data = self.data.lock();
|
||||
data.consensus_session.state() == ConsensusSessionState::Failed
|
||||
|| data.consensus_session.state() == ConsensusSessionState::Finished
|
||||
self.data.lock().result.is_some()
|
||||
}
|
||||
|
||||
fn on_node_timeout(&self, node: &NodeId) {
|
||||
// ignore error, only state matters
|
||||
let _ = self.process_node_error(Some(node), &Error::NodeDisconnected.into());
|
||||
let _ = self.process_node_error(Some(node), Error::NodeDisconnected);
|
||||
}
|
||||
|
||||
fn on_session_timeout(&self) {
|
||||
// ignore error, only state matters
|
||||
let _ = self.process_node_error(None, &Error::NodeDisconnected.into());
|
||||
let _ = self.process_node_error(None, Error::NodeDisconnected);
|
||||
}
|
||||
|
||||
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||
let is_fatal = self.process_node_error(Some(node), error.clone()).is_err();
|
||||
let is_this_node_error = *node == self.core.meta.self_node_id;
|
||||
if is_fatal || is_this_node_error {
|
||||
// error in signing session is non-fatal, if occurs on slave node
|
||||
// => either respond with error
|
||||
// => or broadcast error
|
||||
let message = Message::Signing(SigningMessage::SigningSessionError(SigningSessionError {
|
||||
session: self.core.meta.id.clone().into(),
|
||||
sub_session: self.core.access_key.clone().into(),
|
||||
session_nonce: self.core.nonce,
|
||||
error: error.clone().into(),
|
||||
}));
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id {
|
||||
self.core.cluster.broadcast(message)
|
||||
} else {
|
||||
self.core.cluster.send(&self.core.meta.master_node_id, message)
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||
match *message {
|
||||
Message::Signing(ref message) => self.process_message(sender, message),
|
||||
_ => unreachable!("cluster checks message to be correct before passing; qed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -552,8 +733,14 @@ impl SessionCore {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn disseminate_jobs(&self, consensus_session: &mut SigningConsensusSession, session_public: Public, session_secret_share: Secret, message_hash: H256) -> Result<(), Error> {
|
||||
let signing_job = SigningJob::new_on_master(self.meta.self_node_id.clone(), self.key_share.clone(), session_public, session_secret_share, message_hash)?;
|
||||
pub fn disseminate_jobs(&self, consensus_session: &mut SigningConsensusSession, version: &H256, session_public: Public, session_secret_share: Secret, message_hash: H256) -> Result<(), Error> {
|
||||
let key_share = match self.key_share.as_ref() {
|
||||
None => return Err(Error::InvalidMessage),
|
||||
Some(key_share) => key_share,
|
||||
};
|
||||
|
||||
let key_version = key_share.version(version).map_err(|e| Error::KeyStorage(e.into()))?.hash.clone();
|
||||
let signing_job = SigningJob::new_on_master(self.meta.self_node_id.clone(), key_share.clone(), key_version, session_public, session_secret_share, message_hash)?;
|
||||
consensus_session.disseminate_jobs(signing_job, self.signing_transport())
|
||||
}
|
||||
}
|
||||
@ -563,12 +750,15 @@ impl JobTransport for SigningConsensusTransport {
|
||||
type PartialJobResponse=bool;
|
||||
|
||||
fn send_partial_request(&self, node: &NodeId, request: Signature) -> Result<(), Error> {
|
||||
let version = self.version.as_ref()
|
||||
.expect("send_partial_request is called on initialized master node only; version is filled in before initialization starts on master node; qed");
|
||||
self.cluster.send(node, Message::Signing(SigningMessage::SigningConsensusMessage(SigningConsensusMessage {
|
||||
session: self.id.clone().into(),
|
||||
sub_session: self.access_key.clone().into(),
|
||||
session_nonce: self.nonce,
|
||||
message: ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||
requestor_signature: request.into(),
|
||||
version: version.clone().into(),
|
||||
})
|
||||
})))
|
||||
}
|
||||
@ -619,7 +809,8 @@ mod tests {
|
||||
use bigint::hash::H256;
|
||||
use ethkey::{self, Random, Generator, Public, Secret, KeyPair};
|
||||
use acl_storage::DummyAclStorage;
|
||||
use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, SessionMeta, Error, KeyStorage};
|
||||
use key_server_cluster::{NodeId, DummyKeyStorage, DocumentKeyShare, DocumentKeyShareVersion, SessionId, SessionMeta, Error, KeyStorage};
|
||||
use key_server_cluster::cluster_sessions::ClusterSession;
|
||||
use key_server_cluster::cluster::tests::DummyCluster;
|
||||
use key_server_cluster::generation_session::{Session as GenerationSession};
|
||||
use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop;
|
||||
@ -631,6 +822,7 @@ mod tests {
|
||||
struct Node {
|
||||
pub node_id: NodeId,
|
||||
pub cluster: Arc<DummyCluster>,
|
||||
pub key_storage: Arc<DummyKeyStorage>,
|
||||
pub session: SessionImpl,
|
||||
}
|
||||
|
||||
@ -640,10 +832,12 @@ mod tests {
|
||||
pub nodes: BTreeMap<NodeId, Node>,
|
||||
pub queue: VecDeque<(NodeId, NodeId, Message)>,
|
||||
pub acl_storages: Vec<Arc<DummyAclStorage>>,
|
||||
pub version: H256,
|
||||
}
|
||||
|
||||
impl MessageLoop {
|
||||
pub fn new(gl: &KeyGenerationMessageLoop) -> Self {
|
||||
let version = gl.nodes.values().nth(0).unwrap().key_storage.get(&Default::default()).unwrap().unwrap().versions.iter().last().unwrap().hash;
|
||||
let mut nodes = BTreeMap::new();
|
||||
let session_id = gl.session_id.clone();
|
||||
let requester = Random.generate().unwrap();
|
||||
@ -659,15 +853,15 @@ mod tests {
|
||||
id: session_id.clone(),
|
||||
self_node_id: gl_node_id.clone(),
|
||||
master_node_id: master_node_id.clone(),
|
||||
threshold: gl_node.key_storage.get(&session_id).unwrap().threshold,
|
||||
threshold: gl_node.key_storage.get(&session_id).unwrap().unwrap().threshold,
|
||||
},
|
||||
access_key: "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(),
|
||||
key_share: gl_node.key_storage.get(&session_id).unwrap(),
|
||||
key_share: Some(gl_node.key_storage.get(&session_id).unwrap().unwrap()),
|
||||
acl_storage: acl_storage,
|
||||
cluster: cluster.clone(),
|
||||
nonce: 0,
|
||||
}, if i == 0 { signature.clone() } else { None }).unwrap();
|
||||
nodes.insert(gl_node_id.clone(), Node { node_id: gl_node_id.clone(), cluster: cluster, session: session });
|
||||
nodes.insert(gl_node_id.clone(), Node { node_id: gl_node_id.clone(), cluster: cluster, key_storage: gl_node.key_storage.clone(), session: session });
|
||||
}
|
||||
|
||||
let nodes_ids: Vec<_> = nodes.keys().cloned().collect();
|
||||
@ -683,6 +877,7 @@ mod tests {
|
||||
nodes: nodes,
|
||||
queue: VecDeque::new(),
|
||||
acl_storages: acl_storages,
|
||||
version: version,
|
||||
}
|
||||
}
|
||||
|
||||
@ -700,16 +895,7 @@ mod tests {
|
||||
pub fn process_message(&mut self, mut msg: (NodeId, NodeId, Message)) -> Result<(), Error> {
|
||||
let mut is_queued_message = false;
|
||||
loop {
|
||||
match {
|
||||
match msg.2 {
|
||||
Message::Signing(SigningMessage::SigningConsensusMessage(ref message)) => self.nodes[&msg.1].session.on_consensus_message(&msg.0, &message),
|
||||
Message::Signing(SigningMessage::SigningGenerationMessage(ref message)) => self.nodes[&msg.1].session.on_generation_message(&msg.0, &message),
|
||||
Message::Signing(SigningMessage::RequestPartialSignature(ref message)) => self.nodes[&msg.1].session.on_partial_signature_requested(&msg.0, &message),
|
||||
Message::Signing(SigningMessage::PartialSignature(ref message)) => self.nodes[&msg.1].session.on_partial_signature(&msg.0, &message),
|
||||
Message::Signing(SigningMessage::SigningSessionCompleted(ref message)) => self.nodes[&msg.1].session.on_session_completed(&msg.0, &message),
|
||||
_ => panic!("unexpected"),
|
||||
}
|
||||
} {
|
||||
match self.nodes[&msg.1].session.on_message(&msg.0, &msg.2) {
|
||||
Ok(_) => {
|
||||
if let Some(message) = self.queue.pop_front() {
|
||||
msg = message;
|
||||
@ -765,7 +951,7 @@ mod tests {
|
||||
|
||||
// run signing session
|
||||
let message_hash = H256::from(777);
|
||||
sl.master().initialize(message_hash).unwrap();
|
||||
sl.master().initialize(sl.version.clone(), message_hash).unwrap();
|
||||
while let Some((from, to, message)) = sl.take_message() {
|
||||
sl.process_message((from, to, message)).unwrap();
|
||||
}
|
||||
@ -790,15 +976,17 @@ mod tests {
|
||||
threshold: 0,
|
||||
},
|
||||
access_key: Random.generate().unwrap().secret().clone(),
|
||||
key_share: DocumentKeyShare {
|
||||
key_share: Some(DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 0,
|
||||
id_numbers: nodes,
|
||||
secret_share: Random.generate().unwrap().secret().clone(),
|
||||
polynom1: Vec::new(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
},
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
hash: Default::default(),
|
||||
id_numbers: nodes,
|
||||
secret_share: Random.generate().unwrap().secret().clone(),
|
||||
}],
|
||||
}),
|
||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||
nonce: 0,
|
||||
@ -809,12 +997,9 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fails_to_construct_if_not_a_part_of_cluster() {
|
||||
let mut nodes = BTreeMap::new();
|
||||
fn fails_to_initialize_if_does_not_have_a_share() {
|
||||
let self_node_id = Random.generate().unwrap().public().clone();
|
||||
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
||||
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
||||
match SessionImpl::new(SessionParams {
|
||||
let session = SessionImpl::new(SessionParams {
|
||||
meta: SessionMeta {
|
||||
id: SessionId::default(),
|
||||
self_node_id: self_node_id.clone(),
|
||||
@ -822,31 +1007,21 @@ mod tests {
|
||||
threshold: 0,
|
||||
},
|
||||
access_key: Random.generate().unwrap().secret().clone(),
|
||||
key_share: DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 0,
|
||||
id_numbers: nodes,
|
||||
secret_share: Random.generate().unwrap().secret().clone(),
|
||||
polynom1: Vec::new(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
},
|
||||
key_share: None,
|
||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||
nonce: 0,
|
||||
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) {
|
||||
Err(Error::InvalidNodesConfiguration) => (),
|
||||
_ => panic!("unexpected"),
|
||||
}
|
||||
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())).unwrap();
|
||||
assert_eq!(session.initialize(Default::default(), Default::default()), Err(Error::InvalidMessage));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fails_to_construct_if_threshold_is_wrong() {
|
||||
fn fails_to_initialize_if_threshold_is_wrong() {
|
||||
let mut nodes = BTreeMap::new();
|
||||
let self_node_id = Random.generate().unwrap().public().clone();
|
||||
nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone());
|
||||
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
|
||||
match SessionImpl::new(SessionParams {
|
||||
let session = SessionImpl::new(SessionParams {
|
||||
meta: SessionMeta {
|
||||
id: SessionId::default(),
|
||||
self_node_id: self_node_id.clone(),
|
||||
@ -854,35 +1029,35 @@ mod tests {
|
||||
threshold: 2,
|
||||
},
|
||||
access_key: Random.generate().unwrap().secret().clone(),
|
||||
key_share: DocumentKeyShare {
|
||||
key_share: Some(DocumentKeyShare {
|
||||
author: Public::default(),
|
||||
threshold: 2,
|
||||
id_numbers: nodes,
|
||||
secret_share: Random.generate().unwrap().secret().clone(),
|
||||
polynom1: Vec::new(),
|
||||
common_point: Some(Random.generate().unwrap().public().clone()),
|
||||
encrypted_point: Some(Random.generate().unwrap().public().clone()),
|
||||
},
|
||||
versions: vec![DocumentKeyShareVersion {
|
||||
hash: Default::default(),
|
||||
id_numbers: nodes,
|
||||
secret_share: Random.generate().unwrap().secret().clone(),
|
||||
}],
|
||||
}),
|
||||
acl_storage: Arc::new(DummyAclStorage::default()),
|
||||
cluster: Arc::new(DummyCluster::new(self_node_id.clone())),
|
||||
nonce: 0,
|
||||
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) {
|
||||
Err(Error::InvalidThreshold) => (),
|
||||
_ => panic!("unexpected"),
|
||||
}
|
||||
}, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())).unwrap();
|
||||
assert_eq!(session.initialize(Default::default(), Default::default()), Err(Error::ConsensusUnreachable));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fails_to_initialize_when_already_initialized() {
|
||||
let (_, sl) = prepare_signing_sessions(1, 3);
|
||||
assert_eq!(sl.master().initialize(777.into()), Ok(()));
|
||||
assert_eq!(sl.master().initialize(777.into()), Err(Error::InvalidStateForRequest));
|
||||
assert_eq!(sl.master().initialize(sl.version.clone(), 777.into()), Ok(()));
|
||||
assert_eq!(sl.master().initialize(sl.version.clone(), 777.into()), Err(Error::InvalidStateForRequest));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn does_not_fail_when_consensus_message_received_after_consensus_established() {
|
||||
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||
sl.master().initialize(777.into()).unwrap();
|
||||
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
|
||||
// consensus is established
|
||||
sl.run_until(|sl| sl.master().state() == SessionState::SessionKeyGeneration).unwrap();
|
||||
// but 3rd node continues to send its messages
|
||||
@ -929,7 +1104,7 @@ mod tests {
|
||||
#[test]
|
||||
fn fails_when_generation_sesson_is_initialized_by_slave_node() {
|
||||
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||
sl.master().initialize(777.into()).unwrap();
|
||||
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
|
||||
sl.run_until(|sl| sl.master().state() == SessionState::SessionKeyGeneration).unwrap();
|
||||
|
||||
let slave2_id = sl.nodes.keys().nth(2).unwrap().clone();
|
||||
@ -980,7 +1155,7 @@ mod tests {
|
||||
#[test]
|
||||
fn failed_signing_session() {
|
||||
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||
sl.master().initialize(777.into()).unwrap();
|
||||
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
|
||||
|
||||
// we need at least 2-of-3 nodes to agree to reach consensus
|
||||
// let's say 2 of 3 nodes disagee
|
||||
@ -994,7 +1169,7 @@ mod tests {
|
||||
#[test]
|
||||
fn complete_signing_session_with_single_node_failing() {
|
||||
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||
sl.master().initialize(777.into()).unwrap();
|
||||
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
|
||||
|
||||
// we need at least 2-of-3 nodes to agree to reach consensus
|
||||
// let's say 1 of 3 nodes disagee
|
||||
@ -1015,7 +1190,7 @@ mod tests {
|
||||
#[test]
|
||||
fn complete_signing_session_with_acl_check_failed_on_master() {
|
||||
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||
sl.master().initialize(777.into()).unwrap();
|
||||
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
|
||||
|
||||
// we need at least 2-of-3 nodes to agree to reach consensus
|
||||
// let's say 1 of 3 nodes disagee
|
||||
@ -1047,4 +1222,55 @@ mod tests {
|
||||
}),
|
||||
})), Err(Error::ReplayProtection));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn signing_works_when_delegated_to_other_node() {
|
||||
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||
|
||||
// let's say node1 doesn't have a share && delegates decryption request to node0
|
||||
// initially session is created on node1 => node1 is master for itself, but for other nodes node0 is still master
|
||||
let actual_master = sl.nodes.keys().nth(0).cloned().unwrap();
|
||||
let requested_node = sl.nodes.keys().skip(1).nth(0).cloned().unwrap();
|
||||
let version = sl.nodes[&actual_master].key_storage.get(&Default::default()).unwrap().unwrap().last_version().unwrap().hash.clone();
|
||||
sl.nodes[&requested_node].key_storage.remove(&Default::default()).unwrap();
|
||||
sl.nodes.get_mut(&requested_node).unwrap().session.core.key_share = None;
|
||||
sl.nodes.get_mut(&requested_node).unwrap().session.core.meta.master_node_id = sl.nodes[&requested_node].session.core.meta.self_node_id.clone();
|
||||
sl.nodes[&requested_node].session.data.lock().consensus_session.consensus_job_mut().executor_mut().set_requester_signature(
|
||||
sl.nodes[&actual_master].session.data.lock().consensus_session.consensus_job().executor().requester_signature().unwrap().clone()
|
||||
);
|
||||
|
||||
// now let's try to do a decryption
|
||||
sl.nodes[&requested_node].session.delegate(actual_master, version, Default::default()).unwrap();
|
||||
|
||||
// then consensus reachable, but single node will disagree
|
||||
while let Some((from, to, message)) = sl.take_message() {
|
||||
sl.process_message((from, to, message)).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn signing_works_when_share_owners_are_isolated() {
|
||||
let (_, mut sl) = prepare_signing_sessions(1, 3);
|
||||
|
||||
// we need 2 out of 3 nodes to agree to do a decryption
|
||||
// let's say that 1 of these nodes (master) is isolated
|
||||
let isolated_node_id = sl.nodes.keys().skip(2).nth(0).cloned().unwrap();
|
||||
for node in sl.nodes.values() {
|
||||
node.cluster.remove_node(&isolated_node_id);
|
||||
}
|
||||
|
||||
// now let's try to do a signing
|
||||
sl.master().initialize(sl.version.clone(), 777.into()).unwrap();
|
||||
|
||||
// then consensus reachable, but single node will disagree
|
||||
while let Some((from, to, message)) = sl.take_message() {
|
||||
sl.process_message((from, to, message)).unwrap();
|
||||
}
|
||||
|
||||
let data = sl.master().data.lock();
|
||||
match data.result {
|
||||
Some(Ok(_)) => (),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -16,31 +16,28 @@
|
||||
|
||||
use std::time;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::collections::{VecDeque, BTreeSet, BTreeMap};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::collections::{VecDeque, BTreeMap};
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use bigint::hash::H256;
|
||||
use ethkey::{Public, Secret, Signature};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, DocumentKeyShare, EncryptedDocumentKeyShadow, SessionMeta};
|
||||
use key_server_cluster::cluster::{Cluster, ClusterData, ClusterConfiguration};
|
||||
use key_server_cluster::message::{self, Message, GenerationMessage, EncryptionMessage, DecryptionMessage, SigningMessage,
|
||||
ShareAddMessage, ShareMoveMessage, ShareRemoveMessage, ServersSetChangeMessage};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, EncryptedDocumentKeyShadow};
|
||||
use key_server_cluster::cluster::{Cluster, ClusterData, ClusterConfiguration, ClusterView};
|
||||
use key_server_cluster::message::{self, Message};
|
||||
use key_server_cluster::generation_session::{Session as GenerationSession, SessionImpl as GenerationSessionImpl,
|
||||
SessionParams as GenerationSessionParams, SessionState as GenerationSessionState};
|
||||
use key_server_cluster::decryption_session::{Session as DecryptionSession, SessionImpl as DecryptionSessionImpl,
|
||||
DecryptionSessionId, SessionParams as DecryptionSessionParams};
|
||||
SessionState as GenerationSessionState};
|
||||
use key_server_cluster::decryption_session::{Session as DecryptionSession, SessionImpl as DecryptionSessionImpl};
|
||||
use key_server_cluster::encryption_session::{Session as EncryptionSession, SessionImpl as EncryptionSessionImpl,
|
||||
SessionParams as EncryptionSessionParams, SessionState as EncryptionSessionState};
|
||||
use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl as SigningSessionImpl,
|
||||
SigningSessionId, SessionParams as SigningSessionParams};
|
||||
SessionState as EncryptionSessionState};
|
||||
use key_server_cluster::signing_session::{Session as SigningSession, SessionImpl as SigningSessionImpl};
|
||||
use key_server_cluster::share_add_session::{Session as ShareAddSession, SessionImpl as ShareAddSessionImpl,
|
||||
SessionParams as ShareAddSessionParams, IsolatedSessionTransport as ShareAddTransport};
|
||||
use key_server_cluster::share_move_session::{Session as ShareMoveSession, SessionImpl as ShareMoveSessionImpl,
|
||||
SessionParams as ShareMoveSessionParams, IsolatedSessionTransport as ShareMoveTransport};
|
||||
use key_server_cluster::share_remove_session::{Session as ShareRemoveSession, SessionImpl as ShareRemoveSessionImpl,
|
||||
SessionParams as ShareRemoveSessionParams, IsolatedSessionTransport as ShareRemoveTransport};
|
||||
use key_server_cluster::servers_set_change_session::{Session as ServersSetChangeSession, SessionImpl as ServersSetChangeSessionImpl,
|
||||
SessionParams as ServersSetChangeSessionParams};
|
||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||
IsolatedSessionTransport as ShareAddTransport};
|
||||
use key_server_cluster::servers_set_change_session::{Session as ServersSetChangeSession, SessionImpl as ServersSetChangeSessionImpl};
|
||||
use key_server_cluster::key_version_negotiation_session::{Session as KeyVersionNegotiationSession, SessionImpl as KeyVersionNegotiationSessionImpl,
|
||||
IsolatedSessionTransport as VersionNegotiationTransport, ContinueAction};
|
||||
|
||||
use key_server_cluster::cluster_sessions_creator::{GenerationSessionCreator, EncryptionSessionCreator, DecryptionSessionCreator, SigningSessionCreator,
|
||||
KeyVersionNegotiationSessionCreator, AdminSessionCreator, SessionCreatorCore, ClusterSessionCreator};
|
||||
|
||||
/// When there are no session-related messages for SESSION_TIMEOUT_INTERVAL seconds,
|
||||
/// we must treat this session as stalled && finish it with an error.
|
||||
@ -52,81 +49,91 @@ const SESSION_KEEP_ALIVE_INTERVAL: u64 = 30;
|
||||
|
||||
lazy_static! {
|
||||
/// Servers set change session id (there could be at most 1 session => hardcoded id).
|
||||
static ref SERVERS_SET_CHANGE_SESSION_ID: SessionId = "10b7af423bb551d5dc8645db754163a2145d37d78d468fa7330435ed77064c1c"
|
||||
pub static ref SERVERS_SET_CHANGE_SESSION_ID: SessionId = "10b7af423bb551d5dc8645db754163a2145d37d78d468fa7330435ed77064c1c"
|
||||
.parse()
|
||||
.expect("hardcoded id should parse without errors; qed");
|
||||
}
|
||||
|
||||
/// Session id with sub session.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct SessionIdWithSubSession {
|
||||
/// Key id.
|
||||
pub id: SessionId,
|
||||
/// Sub session id.
|
||||
pub access_key: Secret,
|
||||
}
|
||||
|
||||
/// Generic cluster session.
|
||||
pub trait ClusterSession {
|
||||
/// Session identifier type.
|
||||
type Id: Ord + Clone;
|
||||
|
||||
/// Session type name.
|
||||
fn type_name() -> &'static str;
|
||||
/// Get session id.
|
||||
fn id(&self) -> Self::Id;
|
||||
/// If session is finished (either with succcess or not).
|
||||
fn is_finished(&self) -> bool;
|
||||
/// When it takes too much time to complete session.
|
||||
fn on_session_timeout(&self);
|
||||
/// When it takes too much time to receive response from the node.
|
||||
fn on_node_timeout(&self, node_id: &NodeId);
|
||||
/// Process error that has occured during session + propagate this error to required nodes.
|
||||
fn on_session_error(&self, sender: &NodeId, error: Error);
|
||||
/// Process session message.
|
||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Administrative session.
|
||||
pub enum AdminSession {
|
||||
/// Share add session.
|
||||
ShareAdd(ShareAddSessionImpl<ShareAddTransport>),
|
||||
/// Share move session.
|
||||
ShareMove(ShareMoveSessionImpl<ShareMoveTransport>),
|
||||
/// Share remove session.
|
||||
ShareRemove(ShareRemoveSessionImpl<ShareRemoveTransport>),
|
||||
/// Servers set change session.
|
||||
ServersSetChange(ServersSetChangeSessionImpl),
|
||||
}
|
||||
|
||||
/// Administrative session creation data.
|
||||
pub enum AdminSessionCreationData {
|
||||
/// Share add session.
|
||||
ShareAdd(H256),
|
||||
/// Servers set change session.
|
||||
ServersSetChange,
|
||||
}
|
||||
|
||||
/// Active sessions on this cluster.
|
||||
pub struct ClusterSessions {
|
||||
/// Key generation sessions.
|
||||
pub generation_sessions: ClusterSessionsContainer<SessionId, GenerationSessionImpl, GenerationMessage>,
|
||||
pub generation_sessions: ClusterSessionsContainer<GenerationSessionImpl, GenerationSessionCreator, ()>,
|
||||
/// Encryption sessions.
|
||||
pub encryption_sessions: ClusterSessionsContainer<SessionId, EncryptionSessionImpl, EncryptionMessage>,
|
||||
pub encryption_sessions: ClusterSessionsContainer<EncryptionSessionImpl, EncryptionSessionCreator, ()>,
|
||||
/// Decryption sessions.
|
||||
pub decryption_sessions: ClusterSessionsContainer<DecryptionSessionId, DecryptionSessionImpl, DecryptionMessage>,
|
||||
pub decryption_sessions: ClusterSessionsContainer<DecryptionSessionImpl, DecryptionSessionCreator, Signature>,
|
||||
/// Signing sessions.
|
||||
pub signing_sessions: ClusterSessionsContainer<SigningSessionId, SigningSessionImpl, SigningMessage>,
|
||||
pub signing_sessions: ClusterSessionsContainer<SigningSessionImpl, SigningSessionCreator, Signature>,
|
||||
/// Key version negotiation sessions.
|
||||
pub negotiation_sessions: ClusterSessionsContainer<KeyVersionNegotiationSessionImpl<VersionNegotiationTransport>, KeyVersionNegotiationSessionCreator, ()>,
|
||||
/// Administrative sessions.
|
||||
pub admin_sessions: ClusterSessionsContainer<SessionId, AdminSession, Message>,
|
||||
pub admin_sessions: ClusterSessionsContainer<AdminSession, AdminSessionCreator, AdminSessionCreationData>,
|
||||
/// Self node id.
|
||||
self_node_id: NodeId,
|
||||
/// All nodes ids.
|
||||
nodes: BTreeSet<NodeId>,
|
||||
/// Reference to key storage
|
||||
key_storage: Arc<KeyStorage>,
|
||||
/// Reference to ACL storage
|
||||
acl_storage: Arc<AclStorage>,
|
||||
/// Administrator public.
|
||||
admin_public: Option<Public>,
|
||||
/// Make faulty generation sessions.
|
||||
make_faulty_generation_sessions: AtomicBool,
|
||||
/// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks:
|
||||
/// 1) during handshake, KeyServers generate new random key to encrypt messages
|
||||
/// => there's no way to use messages from previous connections for replay attacks
|
||||
/// 2) when session (of any type) is started, master node increases its own session counter and broadcasts it
|
||||
/// 3) when slave KeyServer receives session initialization message, it checks that new nonce is larger than previous (from the same master)
|
||||
/// => there's no way to use messages from previous sessions for replay attacks
|
||||
/// 4) KeyServer checks that each session message contains the same nonce that initialization message
|
||||
/// Given that: (A) handshake is secure and (B) session itself is initially replay-protected
|
||||
/// => this guarantees that sessions are replay-protected.
|
||||
session_counter: AtomicUsize,
|
||||
/// Maximal session nonce, received from given connection.
|
||||
max_nonce: RwLock<BTreeMap<NodeId, u64>>,
|
||||
/// Creator core.
|
||||
creator_core: Arc<SessionCreatorCore>,
|
||||
}
|
||||
|
||||
/// Active sessions container.
|
||||
pub struct ClusterSessionsContainer<K, V, M> {
|
||||
pub struct ClusterSessionsContainer<S: ClusterSession, SC: ClusterSessionCreator<S, D>, D> {
|
||||
/// Sessions creator.
|
||||
pub creator: SC,
|
||||
/// Active sessions.
|
||||
pub sessions: RwLock<BTreeMap<K, QueuedSession<V, M>>>,
|
||||
sessions: RwLock<BTreeMap<S::Id, QueuedSession<S>>>,
|
||||
/// Sessions container state.
|
||||
container_state: Arc<Mutex<ClusterSessionsContainerState>>
|
||||
container_state: Arc<Mutex<ClusterSessionsContainerState>>,
|
||||
/// Phantom data.
|
||||
_pd: ::std::marker::PhantomData<D>,
|
||||
}
|
||||
|
||||
/// Session and its message queue.
|
||||
pub struct QueuedSession<V, M> {
|
||||
pub struct QueuedSession<S> {
|
||||
/// Session master.
|
||||
pub master: NodeId,
|
||||
/// Cluster view.
|
||||
@ -136,9 +143,9 @@ pub struct QueuedSession<V, M> {
|
||||
/// Last received message time.
|
||||
pub last_message_time: time::Instant,
|
||||
/// Generation session.
|
||||
pub session: Arc<V>,
|
||||
pub session: Arc<S>,
|
||||
/// Messages queue.
|
||||
pub queue: VecDeque<(NodeId, M)>,
|
||||
pub queue: VecDeque<(NodeId, Message)>,
|
||||
}
|
||||
|
||||
/// Cluster sessions container state.
|
||||
@ -177,7 +184,7 @@ pub struct DecryptionSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<DecryptionSession>,
|
||||
/// Session Id.
|
||||
session_id: DecryptionSessionId,
|
||||
session_id: SessionIdWithSubSession,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
@ -187,7 +194,7 @@ pub struct SigningSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<SigningSession>,
|
||||
/// Session Id.
|
||||
session_id: SigningSessionId,
|
||||
session_id: SessionIdWithSubSession,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
@ -202,30 +209,50 @@ pub struct AdminSessionWrapper {
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
/// Key server version negotiation session implementation, which removes session from cluster on drop.
|
||||
pub struct KeyNegotiationSessionWrapper {
|
||||
/// Wrapped session.
|
||||
session: Arc<KeyVersionNegotiationSession>,
|
||||
/// Session Id.
|
||||
session_id: SessionIdWithSubSession,
|
||||
/// Cluster data reference.
|
||||
cluster: Weak<ClusterData>,
|
||||
}
|
||||
|
||||
impl ClusterSessions {
|
||||
/// Create new cluster sessions container.
|
||||
pub fn new(config: &ClusterConfiguration) -> Self {
|
||||
let container_state = Arc::new(Mutex::new(ClusterSessionsContainerState::Idle));
|
||||
let creator_core = Arc::new(SessionCreatorCore::new(config));
|
||||
ClusterSessions {
|
||||
self_node_id: config.self_key_pair.public().clone(),
|
||||
nodes: config.key_server_set.get().keys().cloned().collect(),
|
||||
acl_storage: config.acl_storage.clone(),
|
||||
key_storage: config.key_storage.clone(),
|
||||
admin_public: config.admin_public.clone(),
|
||||
generation_sessions: ClusterSessionsContainer::new(container_state.clone()),
|
||||
encryption_sessions: ClusterSessionsContainer::new(container_state.clone()),
|
||||
decryption_sessions: ClusterSessionsContainer::new(container_state.clone()),
|
||||
signing_sessions: ClusterSessionsContainer::new(container_state.clone()),
|
||||
admin_sessions: ClusterSessionsContainer::new(container_state),
|
||||
generation_sessions: ClusterSessionsContainer::new(GenerationSessionCreator {
|
||||
core: creator_core.clone(),
|
||||
make_faulty_generation_sessions: AtomicBool::new(false),
|
||||
session_counter: AtomicUsize::new(0),
|
||||
max_nonce: RwLock::new(BTreeMap::new()),
|
||||
}, container_state.clone()),
|
||||
encryption_sessions: ClusterSessionsContainer::new(EncryptionSessionCreator {
|
||||
core: creator_core.clone(),
|
||||
}, container_state.clone()),
|
||||
decryption_sessions: ClusterSessionsContainer::new(DecryptionSessionCreator {
|
||||
core: creator_core.clone(),
|
||||
}, container_state.clone()),
|
||||
signing_sessions: ClusterSessionsContainer::new(SigningSessionCreator {
|
||||
core: creator_core.clone(),
|
||||
}, container_state.clone()),
|
||||
negotiation_sessions: ClusterSessionsContainer::new(KeyVersionNegotiationSessionCreator {
|
||||
core: creator_core.clone(),
|
||||
}, container_state.clone()),
|
||||
admin_sessions: ClusterSessionsContainer::new(AdminSessionCreator {
|
||||
core: creator_core.clone(),
|
||||
admin_public: config.admin_public.clone(),
|
||||
}, container_state),
|
||||
creator_core: creator_core,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn make_faulty_generation_sessions(&self) {
|
||||
self.make_faulty_generation_sessions.store(true, Ordering::Relaxed);
|
||||
self.generation_sessions.creator.make_faulty_generation_sessions();
|
||||
}
|
||||
|
||||
/// Send session-level keep-alive messages.
|
||||
@ -240,296 +267,13 @@ impl ClusterSessions {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new generation session.
|
||||
pub fn new_generation_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<Cluster>) -> Result<Arc<GenerationSessionImpl>, Error> {
|
||||
// check that there's no finished encryption session with the same id
|
||||
if self.key_storage.contains(&session_id) {
|
||||
return Err(Error::DuplicateSessionId);
|
||||
}
|
||||
|
||||
// communicating to all other nodes is crucial for encryption session
|
||||
// => check that we have connections to all cluster nodes
|
||||
if self.nodes.iter().any(|n| !cluster.is_connected(n)) {
|
||||
return Err(Error::NodeDisconnected);
|
||||
}
|
||||
|
||||
// check that there's no active encryption session with the same id
|
||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
||||
self.generation_sessions.insert(master, session_id, cluster.clone(), false, move ||
|
||||
Ok(GenerationSessionImpl::new(GenerationSessionParams {
|
||||
id: session_id.clone(),
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
key_storage: Some(self.key_storage.clone()),
|
||||
cluster: cluster,
|
||||
nonce: Some(nonce),
|
||||
})))
|
||||
.map(|session| {
|
||||
if self.make_faulty_generation_sessions.load(Ordering::Relaxed) {
|
||||
session.simulate_faulty_behaviour();
|
||||
}
|
||||
session
|
||||
})
|
||||
}
|
||||
|
||||
/// Send generation session error.
|
||||
pub fn respond_with_generation_error(&self, session_id: &SessionId, error: message::SessionError) {
|
||||
self.generation_sessions.sessions.read().get(session_id)
|
||||
.map(|s| {
|
||||
// error in generation session is considered fatal
|
||||
// => broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = s.cluster_view.broadcast(Message::Generation(GenerationMessage::SessionError(error)));
|
||||
});
|
||||
}
|
||||
|
||||
/// Create new encryption session.
|
||||
pub fn new_encryption_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<Cluster>) -> Result<Arc<EncryptionSessionImpl>, Error> {
|
||||
let encrypted_data = self.read_key_share(&session_id, &cluster)?;
|
||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
||||
|
||||
self.encryption_sessions.insert(master, session_id, cluster.clone(), false, move || EncryptionSessionImpl::new(EncryptionSessionParams {
|
||||
id: session_id.clone(),
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
encrypted_data: encrypted_data,
|
||||
key_storage: self.key_storage.clone(),
|
||||
cluster: cluster,
|
||||
nonce: nonce,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Send encryption session error.
|
||||
pub fn respond_with_encryption_error(&self, session_id: &SessionId, error: message::EncryptionSessionError) {
|
||||
self.encryption_sessions.sessions.read().get(session_id)
|
||||
.map(|s| {
|
||||
// error in encryption session is considered fatal
|
||||
// => broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = s.cluster_view.broadcast(Message::Encryption(EncryptionMessage::EncryptionSessionError(error)));
|
||||
});
|
||||
}
|
||||
|
||||
/// Create new decryption session.
|
||||
pub fn new_decryption_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, nonce: Option<u64>, cluster: Arc<Cluster>, requester_signature: Option<Signature>) -> Result<Arc<DecryptionSessionImpl>, Error> {
|
||||
let session_id = DecryptionSessionId::new(session_id, sub_session_id);
|
||||
let encrypted_data = self.read_key_share(&session_id.id, &cluster)?;
|
||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
||||
|
||||
self.decryption_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || DecryptionSessionImpl::new(DecryptionSessionParams {
|
||||
meta: SessionMeta {
|
||||
id: session_id.id,
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
threshold: encrypted_data.threshold,
|
||||
},
|
||||
access_key: session_id.access_key,
|
||||
key_share: encrypted_data,
|
||||
acl_storage: self.acl_storage.clone(),
|
||||
cluster: cluster,
|
||||
nonce: nonce,
|
||||
}, requester_signature))
|
||||
}
|
||||
|
||||
/// Send decryption session error.
|
||||
pub fn respond_with_decryption_error(&self, session_id: &SessionId, sub_session_id: &Secret, to: &NodeId, error: message::DecryptionSessionError) {
|
||||
let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
|
||||
self.decryption_sessions.sessions.read().get(&session_id)
|
||||
.map(|s| {
|
||||
// error in decryption session is non-fatal, if occurs on slave node
|
||||
// => either respond with error
|
||||
// => or broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
if s.master == self.self_node_id {
|
||||
let _ = s.cluster_view.broadcast(Message::Decryption(DecryptionMessage::DecryptionSessionError(error)));
|
||||
} else {
|
||||
let _ = s.cluster_view.send(to, Message::Decryption(DecryptionMessage::DecryptionSessionError(error)));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Create new signing session.
|
||||
pub fn new_signing_session(&self, master: NodeId, session_id: SessionId, sub_session_id: Secret, nonce: Option<u64>, cluster: Arc<Cluster>, requester_signature: Option<Signature>) -> Result<Arc<SigningSessionImpl>, Error> {
|
||||
let session_id = SigningSessionId::new(session_id, sub_session_id);
|
||||
let encrypted_data = self.read_key_share(&session_id.id, &cluster)?;
|
||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
||||
|
||||
self.signing_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || SigningSessionImpl::new(SigningSessionParams {
|
||||
meta: SessionMeta {
|
||||
id: session_id.id,
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
threshold: encrypted_data.threshold,
|
||||
},
|
||||
access_key: session_id.access_key,
|
||||
key_share: encrypted_data,
|
||||
acl_storage: self.acl_storage.clone(),
|
||||
cluster: cluster,
|
||||
nonce: nonce,
|
||||
}, requester_signature))
|
||||
}
|
||||
|
||||
/// Send signing session error.
|
||||
pub fn respond_with_signing_error(&self, session_id: &SessionId, sub_session_id: &Secret, to: &NodeId, error: message::SigningSessionError) {
|
||||
let session_id = SigningSessionId::new(session_id.clone(), sub_session_id.clone());
|
||||
self.signing_sessions.sessions.read().get(&session_id)
|
||||
.map(|s| {
|
||||
// error in signing session is non-fatal, if occurs on slave node
|
||||
// => either respond with error
|
||||
// => or broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
if s.master == self.self_node_id {
|
||||
let _ = s.cluster_view.broadcast(Message::Signing(SigningMessage::SigningSessionError(error)));
|
||||
} else {
|
||||
let _ = s.cluster_view.send(to, Message::Signing(SigningMessage::SigningSessionError(error)));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Create new share add session.
|
||||
pub fn new_share_add_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<Cluster>) -> Result<Arc<AdminSession>, Error> {
|
||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
||||
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
||||
|
||||
self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || ShareAddSessionImpl::new(ShareAddSessionParams {
|
||||
meta: ShareChangeSessionMeta {
|
||||
id: session_id,
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
},
|
||||
transport: ShareAddTransport::new(session_id.clone(), nonce, cluster),
|
||||
key_storage: self.key_storage.clone(),
|
||||
admin_public: Some(admin_public),
|
||||
nonce: nonce,
|
||||
}).map(AdminSession::ShareAdd))
|
||||
}
|
||||
|
||||
/// Send share add session error.
|
||||
pub fn respond_with_share_add_error(&self, session_id: &SessionId, error: message::ShareAddError) {
|
||||
self.admin_sessions.sessions.read().get(&session_id)
|
||||
.map(|s| {
|
||||
// error in any share change session is considered fatal
|
||||
// => broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = s.cluster_view.broadcast(Message::ShareAdd(ShareAddMessage::ShareAddError(error)));
|
||||
});
|
||||
}
|
||||
|
||||
/// Create new share move session.
|
||||
pub fn new_share_move_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<Cluster>) -> Result<Arc<AdminSession>, Error> {
|
||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
||||
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
||||
|
||||
self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || ShareMoveSessionImpl::new(ShareMoveSessionParams {
|
||||
meta: ShareChangeSessionMeta {
|
||||
id: session_id,
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
},
|
||||
transport: ShareMoveTransport::new(session_id.clone(), nonce, cluster),
|
||||
key_storage: self.key_storage.clone(),
|
||||
admin_public: Some(admin_public),
|
||||
nonce: nonce,
|
||||
}).map(AdminSession::ShareMove))
|
||||
}
|
||||
|
||||
/// Send share move session error.
|
||||
pub fn respond_with_share_move_error(&self, session_id: &SessionId, error: message::ShareMoveError) {
|
||||
self.admin_sessions.sessions.read().get(&session_id)
|
||||
.map(|s| {
|
||||
// error in any share change session is considered fatal
|
||||
// => broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = s.cluster_view.broadcast(Message::ShareMove(ShareMoveMessage::ShareMoveError(error)));
|
||||
});
|
||||
}
|
||||
|
||||
/// Create new share remove session.
|
||||
pub fn new_share_remove_session(&self, master: NodeId, session_id: SessionId, nonce: Option<u64>, cluster: Arc<Cluster>, all_nodes_set: BTreeSet<NodeId>) -> Result<Arc<AdminSession>, Error> {
|
||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
||||
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
||||
|
||||
self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), false, move || ShareRemoveSessionImpl::new(ShareRemoveSessionParams {
|
||||
meta: ShareChangeSessionMeta {
|
||||
id: session_id,
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
},
|
||||
cluster_nodes_set: all_nodes_set,
|
||||
transport: ShareRemoveTransport::new(session_id.clone(), nonce, cluster),
|
||||
key_storage: self.key_storage.clone(),
|
||||
admin_public: Some(admin_public),
|
||||
nonce: nonce,
|
||||
}).map(AdminSession::ShareRemove))
|
||||
}
|
||||
|
||||
/// Send share remove session error.
|
||||
pub fn respond_with_share_remove_error(&self, session_id: &SessionId, error: message::ShareRemoveError) {
|
||||
self.admin_sessions.sessions.read().get(&session_id)
|
||||
.map(|s| {
|
||||
// error in any share change session is considered fatal
|
||||
// => broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = s.cluster_view.broadcast(Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(error)));
|
||||
});
|
||||
}
|
||||
|
||||
/// Create new servers set change session.
|
||||
pub fn new_servers_set_change_session(&self, master: NodeId, session_id: Option<SessionId>, nonce: Option<u64>, cluster: Arc<Cluster>, all_nodes_set: BTreeSet<NodeId>) -> Result<Arc<AdminSession>, Error> {
|
||||
// communicating to all other nodes is crucial for ServersSetChange session
|
||||
// => check that we have connections to all cluster nodes
|
||||
if self.nodes.iter().any(|n| !cluster.is_connected(n)) {
|
||||
return Err(Error::NodeDisconnected);
|
||||
}
|
||||
|
||||
let session_id = match session_id {
|
||||
Some(session_id) => if session_id == *SERVERS_SET_CHANGE_SESSION_ID {
|
||||
session_id
|
||||
} else {
|
||||
return Err(Error::InvalidMessage)
|
||||
},
|
||||
None => (*SERVERS_SET_CHANGE_SESSION_ID).clone(),
|
||||
};
|
||||
let nonce = self.check_session_nonce(&master, nonce)?;
|
||||
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
||||
|
||||
self.admin_sessions.insert(master, session_id.clone(), cluster.clone(), true, move || ServersSetChangeSessionImpl::new(ServersSetChangeSessionParams {
|
||||
meta: ShareChangeSessionMeta {
|
||||
id: session_id,
|
||||
self_node_id: self.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
},
|
||||
cluster: cluster,
|
||||
key_storage: self.key_storage.clone(),
|
||||
admin_public: admin_public,
|
||||
nonce: nonce,
|
||||
all_nodes_set: all_nodes_set,
|
||||
}).map(AdminSession::ServersSetChange))
|
||||
}
|
||||
|
||||
/// Send share remove session error.
|
||||
pub fn respond_with_servers_set_change_error(&self, session_id: &SessionId, error: message::ServersSetChangeError) {
|
||||
self.admin_sessions.sessions.read().get(&session_id)
|
||||
.map(|s| {
|
||||
// error in any share change session is considered fatal
|
||||
// => broadcast error
|
||||
|
||||
// do not bother processing send error, as we already processing error
|
||||
let _ = s.cluster_view.broadcast(Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(error)));
|
||||
});
|
||||
}
|
||||
|
||||
/// Stop sessions that are stalling.
|
||||
pub fn stop_stalled_sessions(&self) {
|
||||
self.generation_sessions.stop_stalled_sessions();
|
||||
self.encryption_sessions.stop_stalled_sessions();
|
||||
self.decryption_sessions.stop_stalled_sessions();
|
||||
self.signing_sessions.stop_stalled_sessions();
|
||||
self.negotiation_sessions.stop_stalled_sessions();
|
||||
self.admin_sessions.stop_stalled_sessions();
|
||||
}
|
||||
|
||||
@ -539,44 +283,19 @@ impl ClusterSessions {
|
||||
self.encryption_sessions.on_connection_timeout(node_id);
|
||||
self.decryption_sessions.on_connection_timeout(node_id);
|
||||
self.signing_sessions.on_connection_timeout(node_id);
|
||||
self.negotiation_sessions.on_connection_timeout(node_id);
|
||||
self.admin_sessions.on_connection_timeout(node_id);
|
||||
self.max_nonce.write().remove(node_id);
|
||||
}
|
||||
|
||||
/// Read key share && remove disconnected nodes.
|
||||
fn read_key_share(&self, key_id: &SessionId, cluster: &Arc<Cluster>) -> Result<DocumentKeyShare, Error> {
|
||||
let mut encrypted_data = self.key_storage.get(key_id).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||
|
||||
// some of nodes, which were encrypting secret may be down
|
||||
// => do not use these in session
|
||||
let disconnected_nodes: BTreeSet<_> = encrypted_data.id_numbers.keys().cloned().collect();
|
||||
for disconnected_node in disconnected_nodes.difference(&cluster.nodes()) {
|
||||
encrypted_data.id_numbers.remove(&disconnected_node);
|
||||
}
|
||||
Ok(encrypted_data)
|
||||
}
|
||||
|
||||
/// Check or generate new session nonce.
|
||||
fn check_session_nonce(&self, master: &NodeId, nonce: Option<u64>) -> Result<u64, Error> {
|
||||
// if we're master node of the session, then nonce should be generated
|
||||
// if we're slave node of the session, then nonce should be passed from outside
|
||||
debug_assert!((master == &self.self_node_id) == nonce.is_none());
|
||||
|
||||
match nonce {
|
||||
Some(nonce) => match nonce > *self.max_nonce.write().entry(master.clone()).or_insert(0) {
|
||||
true => Ok(nonce),
|
||||
false => Err(Error::ReplayProtection),
|
||||
},
|
||||
None => Ok(self.session_counter.fetch_add(1, Ordering::Relaxed) as u64 + 1),
|
||||
}
|
||||
self.creator_core.on_connection_timeout(node_id);
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: ClusterSession {
|
||||
pub fn new(container_state: Arc<Mutex<ClusterSessionsContainerState>>) -> Self {
|
||||
impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: ClusterSessionCreator<S, D> {
|
||||
pub fn new(creator: SC, container_state: Arc<Mutex<ClusterSessionsContainerState>>) -> Self {
|
||||
ClusterSessionsContainer {
|
||||
creator: creator,
|
||||
sessions: RwLock::new(BTreeMap::new()),
|
||||
container_state: container_state,
|
||||
_pd: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -584,7 +303,7 @@ impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: Cluster
|
||||
self.sessions.read().is_empty()
|
||||
}
|
||||
|
||||
pub fn get(&self, session_id: &K, update_last_message_time: bool) -> Option<Arc<V>> {
|
||||
pub fn get(&self, session_id: &S::Id, update_last_message_time: bool) -> Option<Arc<S>> {
|
||||
let mut sessions = self.sessions.write();
|
||||
sessions.get_mut(session_id)
|
||||
.map(|s| {
|
||||
@ -595,14 +314,21 @@ impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: Cluster
|
||||
})
|
||||
}
|
||||
|
||||
pub fn insert<F: FnOnce() -> Result<V, Error>>(&self, master: NodeId, session_id: K, cluster: Arc<Cluster>, is_exclusive_session: bool, session: F) -> Result<Arc<V>, Error> {
|
||||
#[cfg(test)]
|
||||
pub fn first(&self) -> Option<Arc<S>> {
|
||||
self.sessions.read().values().nth(0).map(|s| s.session.clone())
|
||||
}
|
||||
|
||||
pub fn insert(&self, cluster: Arc<Cluster>, master: NodeId, session_id: S::Id, session_nonce: Option<u64>, is_exclusive_session: bool, creation_data: Option<D>) -> Result<Arc<S>, Error> {
|
||||
let mut sessions = self.sessions.write();
|
||||
if sessions.contains_key(&session_id) {
|
||||
return Err(Error::DuplicateSessionId);
|
||||
}
|
||||
|
||||
// create cluster
|
||||
// let cluster = create_cluster_view(data, requires_all_connections)?;
|
||||
// create session
|
||||
let session = Arc::new(session()?);
|
||||
let session = self.creator.create(cluster.clone(), master.clone(), session_nonce, session_id.clone(), creation_data)?;
|
||||
// check if session can be started
|
||||
self.container_state.lock().on_session_starting(is_exclusive_session)?;
|
||||
|
||||
@ -619,19 +345,19 @@ impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: Cluster
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
pub fn remove(&self, session_id: &K) {
|
||||
pub fn remove(&self, session_id: &S::Id) {
|
||||
if self.sessions.write().remove(session_id).is_some() {
|
||||
self.container_state.lock().on_session_completed();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enqueue_message(&self, session_id: &K, sender: NodeId, message: M, is_queued_message: bool) {
|
||||
pub fn enqueue_message(&self, session_id: &S::Id, sender: NodeId, message: Message, is_queued_message: bool) {
|
||||
self.sessions.write().get_mut(session_id)
|
||||
.map(|session| if is_queued_message { session.queue.push_front((sender, message)) }
|
||||
else { session.queue.push_back((sender, message)) });
|
||||
}
|
||||
|
||||
pub fn dequeue_message(&self, session_id: &K) -> Option<(NodeId, M)> {
|
||||
pub fn dequeue_message(&self, session_id: &S::Id) -> Option<(NodeId, Message)> {
|
||||
self.sessions.write().get_mut(session_id)
|
||||
.and_then(|session| session.queue.pop_front())
|
||||
}
|
||||
@ -670,8 +396,8 @@ impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: Cluster
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: ClusterSession, SessionId: From<K> {
|
||||
pub fn send_keep_alive(&self, session_id: &K, self_node_id: &NodeId) {
|
||||
impl<S, SC, D> ClusterSessionsContainer<S, SC, D> where S: ClusterSession, SC: ClusterSessionCreator<S, D>, SessionId: From<S::Id> {
|
||||
pub fn send_keep_alive(&self, session_id: &S::Id, self_node_id: &NodeId) {
|
||||
if let Some(session) = self.sessions.write().get_mut(session_id) {
|
||||
let now = time::Instant::now();
|
||||
if self_node_id == &session.master && now - session.last_keep_alive_time > time::Duration::from_secs(SESSION_KEEP_ALIVE_INTERVAL) {
|
||||
@ -686,7 +412,7 @@ impl<K, V, M> ClusterSessionsContainer<K, V, M> where K: Clone + Ord, V: Cluster
|
||||
}
|
||||
}
|
||||
|
||||
pub fn on_keep_alive(&self, session_id: &K, sender: &NodeId) {
|
||||
pub fn on_keep_alive(&self, session_id: &S::Id, sender: &NodeId) {
|
||||
if let Some(session) = self.sessions.write().get_mut(session_id) {
|
||||
let now = time::Instant::now();
|
||||
// we only accept keep alive from master node of ServersSetChange session
|
||||
@ -736,28 +462,32 @@ impl ClusterSessionsContainerState {
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionIdWithSubSession {
|
||||
/// Create new decryption session Id.
|
||||
pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self {
|
||||
SessionIdWithSubSession {
|
||||
id: session_id,
|
||||
access_key: sub_session_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for SessionIdWithSubSession {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for SessionIdWithSubSession {
|
||||
fn cmp(&self, other: &Self) -> ::std::cmp::Ordering {
|
||||
match self.id.cmp(&other.id) {
|
||||
::std::cmp::Ordering::Equal => self.access_key.cmp(&other.access_key),
|
||||
r @ _ => r,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AdminSession {
|
||||
pub fn as_share_add(&self) -> Option<&ShareAddSessionImpl<ShareAddTransport>> {
|
||||
match *self {
|
||||
AdminSession::ShareAdd(ref session) => Some(session),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_share_move(&self) -> Option<&ShareMoveSessionImpl<ShareMoveTransport>> {
|
||||
match *self {
|
||||
AdminSession::ShareMove(ref session) => Some(session),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_share_remove(&self) -> Option<&ShareRemoveSessionImpl<ShareRemoveTransport>> {
|
||||
match *self {
|
||||
AdminSession::ShareRemove(ref session) => Some(session),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_servers_set_change(&self) -> Option<&ServersSetChangeSessionImpl> {
|
||||
match *self {
|
||||
AdminSession::ServersSetChange(ref session) => Some(session),
|
||||
@ -767,11 +497,22 @@ impl AdminSession {
|
||||
}
|
||||
|
||||
impl ClusterSession for AdminSession {
|
||||
type Id = SessionId;
|
||||
|
||||
fn type_name() -> &'static str {
|
||||
"admin"
|
||||
}
|
||||
|
||||
fn id(&self) -> SessionId {
|
||||
match *self {
|
||||
AdminSession::ShareAdd(ref session) => session.id().clone(),
|
||||
AdminSession::ServersSetChange(ref session) => session.id().clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_finished(&self) -> bool {
|
||||
match *self {
|
||||
AdminSession::ShareAdd(ref session) => session.is_finished(),
|
||||
AdminSession::ShareMove(ref session) => session.is_finished(),
|
||||
AdminSession::ShareRemove(ref session) => session.is_finished(),
|
||||
AdminSession::ServersSetChange(ref session) => session.is_finished(),
|
||||
}
|
||||
}
|
||||
@ -779,8 +520,6 @@ impl ClusterSession for AdminSession {
|
||||
fn on_session_timeout(&self) {
|
||||
match *self {
|
||||
AdminSession::ShareAdd(ref session) => session.on_session_timeout(),
|
||||
AdminSession::ShareMove(ref session) => session.on_session_timeout(),
|
||||
AdminSession::ShareRemove(ref session) => session.on_session_timeout(),
|
||||
AdminSession::ServersSetChange(ref session) => session.on_session_timeout(),
|
||||
}
|
||||
}
|
||||
@ -788,11 +527,23 @@ impl ClusterSession for AdminSession {
|
||||
fn on_node_timeout(&self, node_id: &NodeId) {
|
||||
match *self {
|
||||
AdminSession::ShareAdd(ref session) => session.on_node_timeout(node_id),
|
||||
AdminSession::ShareMove(ref session) => session.on_node_timeout(node_id),
|
||||
AdminSession::ShareRemove(ref session) => session.on_node_timeout(node_id),
|
||||
AdminSession::ServersSetChange(ref session) => session.on_node_timeout(node_id),
|
||||
}
|
||||
}
|
||||
|
||||
fn on_session_error(&self, node: &NodeId, error: Error) {
|
||||
match *self {
|
||||
AdminSession::ShareAdd(ref session) => session.on_session_error(node, error),
|
||||
AdminSession::ServersSetChange(ref session) => session.on_session_error(node, error),
|
||||
}
|
||||
}
|
||||
|
||||
fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> {
|
||||
match *self {
|
||||
AdminSession::ShareAdd(ref session) => session.on_message(sender, message),
|
||||
AdminSession::ServersSetChange(ref session) => session.on_message(sender, message),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GenerationSessionWrapper {
|
||||
@ -856,7 +607,7 @@ impl Drop for EncryptionSessionWrapper {
|
||||
}
|
||||
|
||||
impl DecryptionSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: DecryptionSessionId, session: Arc<DecryptionSession>) -> Arc<Self> {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionIdWithSubSession, session: Arc<DecryptionSession>) -> Arc<Self> {
|
||||
Arc::new(DecryptionSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
@ -880,7 +631,7 @@ impl Drop for DecryptionSessionWrapper {
|
||||
}
|
||||
|
||||
impl SigningSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SigningSessionId, session: Arc<SigningSession>) -> Arc<Self> {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionIdWithSubSession, session: Arc<SigningSession>) -> Arc<Self> {
|
||||
Arc::new(SigningSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
@ -922,24 +673,6 @@ impl ShareAddSession for AdminSessionWrapper {
|
||||
}
|
||||
}
|
||||
|
||||
impl ShareMoveSession for AdminSessionWrapper {
|
||||
fn wait(&self) -> Result<(), Error> {
|
||||
match *self.session {
|
||||
AdminSession::ShareMove(ref session) => session.wait(),
|
||||
_ => Err(Error::InvalidMessage),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ShareRemoveSession for AdminSessionWrapper {
|
||||
fn wait(&self) -> Result<(), Error> {
|
||||
match *self.session {
|
||||
AdminSession::ShareRemove(ref session) => session.wait(),
|
||||
_ => Err(Error::InvalidMessage),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ServersSetChangeSession for AdminSessionWrapper {
|
||||
fn wait(&self) -> Result<(), Error> {
|
||||
match *self.session {
|
||||
@ -957,15 +690,60 @@ impl Drop for AdminSessionWrapper {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_cluster_view(data: &Arc<ClusterData>, requires_all_connections: bool) -> Result<Arc<Cluster>, Error> {
|
||||
if requires_all_connections {
|
||||
if !data.connections.disconnected_nodes().is_empty() {
|
||||
return Err(Error::NodeDisconnected);
|
||||
}
|
||||
}
|
||||
|
||||
let mut connected_nodes = data.connections.connected_nodes();
|
||||
connected_nodes.insert(data.self_key_pair.public().clone());
|
||||
|
||||
Ok(Arc::new(ClusterView::new(data.clone(), connected_nodes)))
|
||||
}
|
||||
|
||||
impl KeyNegotiationSessionWrapper {
|
||||
pub fn new(cluster: Weak<ClusterData>, session_id: SessionIdWithSubSession, session: Arc<KeyVersionNegotiationSession>) -> Arc<Self> {
|
||||
Arc::new(KeyNegotiationSessionWrapper {
|
||||
session: session,
|
||||
session_id: session_id,
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyVersionNegotiationSession for KeyNegotiationSessionWrapper {
|
||||
fn set_continue_action(&self, action: ContinueAction) {
|
||||
self.session.set_continue_action(action)
|
||||
}
|
||||
|
||||
fn continue_action(&self) -> Option<ContinueAction> {
|
||||
self.session.continue_action()
|
||||
}
|
||||
|
||||
fn wait(&self) -> Result<(H256, NodeId), Error> {
|
||||
self.session.wait()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for KeyNegotiationSessionWrapper {
|
||||
fn drop(&mut self) {
|
||||
if let Some(cluster) = self.cluster.upgrade() {
|
||||
cluster.sessions().negotiation_sessions.remove(&self.session_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::collections::BTreeSet;
|
||||
use ethkey::{Random, Generator};
|
||||
use key_server_cluster::{Error, DummyAclStorage, DummyKeyStorage, MapKeyServerSet, PlainNodeKeyPair};
|
||||
use key_server_cluster::cluster::ClusterConfiguration;
|
||||
use key_server_cluster::cluster::tests::DummyCluster;
|
||||
use super::ClusterSessions;
|
||||
use super::{ClusterSessions, AdminSessionCreationData};
|
||||
|
||||
pub fn make_cluster_sessions() -> ClusterSessions {
|
||||
let key_pair = Random.generate().unwrap();
|
||||
@ -985,9 +763,8 @@ mod tests {
|
||||
#[test]
|
||||
fn cluster_session_cannot_be_started_if_exclusive_session_is_active() {
|
||||
let sessions = make_cluster_sessions();
|
||||
|
||||
sessions.new_generation_session(Default::default(), Default::default(), Some(1), Arc::new(DummyCluster::new(sessions.self_node_id.clone()))).unwrap();
|
||||
match sessions.new_servers_set_change_session(Default::default(), None, Some(1), Arc::new(DummyCluster::new(sessions.self_node_id.clone())), BTreeSet::new()) {
|
||||
sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None).unwrap();
|
||||
match sessions.admin_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, true, Some(AdminSessionCreationData::ShareAdd(Default::default()))) {
|
||||
Err(Error::HasActiveSessions) => (),
|
||||
Err(e) => unreachable!(format!("{}", e)),
|
||||
Ok(_) => unreachable!("OK"),
|
||||
@ -998,8 +775,8 @@ mod tests {
|
||||
fn exclusive_session_cannot_be_started_if_other_session_is_active() {
|
||||
let sessions = make_cluster_sessions();
|
||||
|
||||
sessions.new_servers_set_change_session(Default::default(), None, Some(1), Arc::new(DummyCluster::new(sessions.self_node_id.clone())), BTreeSet::new()).unwrap();
|
||||
match sessions.new_generation_session(Default::default(), Default::default(), Some(1), Arc::new(DummyCluster::new(sessions.self_node_id.clone()))) {
|
||||
sessions.admin_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, true, Some(AdminSessionCreationData::ShareAdd(Default::default()))).unwrap();
|
||||
match sessions.generation_sessions.insert(Arc::new(DummyCluster::new(Default::default())), Default::default(), Default::default(), None, false, None) {
|
||||
Err(Error::ExclusiveSessionActive) => (),
|
||||
Err(e) => unreachable!(format!("{}", e)),
|
||||
Ok(_) => unreachable!("OK"),
|
||||
|
423
secret_store/src/key_server_cluster/cluster_sessions_creator.rs
Normal file
423
secret_store/src/key_server_cluster/cluster_sessions_creator.rs
Normal file
@ -0,0 +1,423 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::collections::BTreeMap;
|
||||
use parking_lot::RwLock;
|
||||
use ethkey::{Public, Signature};
|
||||
use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, DocumentKeyShare, SessionMeta};
|
||||
use key_server_cluster::cluster::{Cluster, ClusterConfiguration};
|
||||
use key_server_cluster::cluster_sessions::{ClusterSession, SessionIdWithSubSession, AdminSession, AdminSessionCreationData};
|
||||
use key_server_cluster::message::{self, Message, DecryptionMessage, SigningMessage, ConsensusMessageOfShareAdd,
|
||||
ShareAddMessage, ServersSetChangeMessage, ConsensusMessage, ConsensusMessageWithServersSet};
|
||||
use key_server_cluster::generation_session::{SessionImpl as GenerationSessionImpl, SessionParams as GenerationSessionParams};
|
||||
use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl,
|
||||
SessionParams as DecryptionSessionParams};
|
||||
use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl, SessionParams as EncryptionSessionParams};
|
||||
use key_server_cluster::signing_session::{SessionImpl as SigningSessionImpl,
|
||||
SessionParams as SigningSessionParams};
|
||||
use key_server_cluster::share_add_session::{SessionImpl as ShareAddSessionImpl,
|
||||
SessionParams as ShareAddSessionParams, IsolatedSessionTransport as ShareAddTransport};
|
||||
use key_server_cluster::servers_set_change_session::{SessionImpl as ServersSetChangeSessionImpl,
|
||||
SessionParams as ServersSetChangeSessionParams};
|
||||
use key_server_cluster::key_version_negotiation_session::{SessionImpl as KeyVersionNegotiationSessionImpl,
|
||||
SessionParams as KeyVersionNegotiationSessionParams, IsolatedSessionTransport as VersionNegotiationTransport,
|
||||
FastestResultComputer as FastestResultKeyVersionsResultComputer};
|
||||
use key_server_cluster::admin_sessions::ShareChangeSessionMeta;
|
||||
|
||||
/// Generic cluster session creator.
|
||||
pub trait ClusterSessionCreator<S: ClusterSession, D> {
|
||||
/// Get creation data from message.
|
||||
fn creation_data_from_message(_message: &Message) -> Result<Option<D>, Error> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Prepare error message.
|
||||
fn make_error_message(sid: S::Id, nonce: u64, err: Error) -> Message;
|
||||
|
||||
/// Create cluster session.
|
||||
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: S::Id, creation_data: Option<D>) -> Result<Arc<S>, Error>;
|
||||
}
|
||||
|
||||
/// Message with session id.
|
||||
pub trait IntoSessionId<K> {
|
||||
/// Get session id.
|
||||
fn into_session_id(&self) -> Result<K, Error>;
|
||||
}
|
||||
|
||||
pub struct SessionCreatorCore {
|
||||
/// Self node id.
|
||||
self_node_id: NodeId,
|
||||
/// Reference to key storage
|
||||
key_storage: Arc<KeyStorage>,
|
||||
/// Reference to ACL storage
|
||||
acl_storage: Arc<AclStorage>,
|
||||
/// Always-increasing sessions counter. Is used as session nonce to prevent replay attacks:
|
||||
/// 1) during handshake, KeyServers generate new random key to encrypt messages
|
||||
/// => there's no way to use messages from previous connections for replay attacks
|
||||
/// 2) when session (of any type) is started, master node increases its own session counter and broadcasts it
|
||||
/// 3) when slave KeyServer receives session initialization message, it checks that new nonce is larger than previous (from the same master)
|
||||
/// => there's no way to use messages from previous sessions for replay attacks
|
||||
/// 4) KeyServer checks that each session message contains the same nonce that initialization message
|
||||
/// Given that: (A) handshake is secure and (B) session itself is initially replay-protected
|
||||
/// => this guarantees that sessions are replay-protected.
|
||||
session_counter: AtomicUsize,
|
||||
/// Maximal session nonce, received from given connection.
|
||||
max_nonce: RwLock<BTreeMap<NodeId, u64>>,
|
||||
}
|
||||
|
||||
impl SessionCreatorCore {
|
||||
/// Create new session creator core.
|
||||
pub fn new(config: &ClusterConfiguration) -> Self {
|
||||
SessionCreatorCore {
|
||||
self_node_id: config.self_key_pair.public().clone(),
|
||||
acl_storage: config.acl_storage.clone(),
|
||||
key_storage: config.key_storage.clone(),
|
||||
session_counter: AtomicUsize::new(0),
|
||||
max_nonce: RwLock::new(BTreeMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// When node has teimtouted.
|
||||
pub fn on_connection_timeout(&self, node_id: &NodeId) {
|
||||
self.max_nonce.write().remove(node_id);
|
||||
}
|
||||
|
||||
/// Check or generate new session nonce.
|
||||
fn check_session_nonce(&self, master: &NodeId, nonce: Option<u64>) -> Result<u64, Error> {
|
||||
// if we're master node of the session, then nonce should be generated
|
||||
// if we're slave node of the session, then nonce should be passed from outside
|
||||
match nonce {
|
||||
Some(nonce) => match nonce > *self.max_nonce.write().entry(master.clone()).or_insert(0) {
|
||||
true => Ok(nonce),
|
||||
false => Err(Error::ReplayProtection),
|
||||
},
|
||||
None => Ok(self.session_counter.fetch_add(1, Ordering::Relaxed) as u64 + 1),
|
||||
}
|
||||
}
|
||||
|
||||
/// Read key share && remove disconnected nodes.
|
||||
fn read_key_share(&self, key_id: &SessionId) -> Result<Option<DocumentKeyShare>, Error> {
|
||||
self.key_storage.get(key_id).map_err(|e| Error::KeyStorage(e.into()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Generation session creator.
|
||||
pub struct GenerationSessionCreator {
|
||||
/// True if generation sessions must fail.
|
||||
pub make_faulty_generation_sessions: AtomicBool,
|
||||
/// Creator core.
|
||||
pub core: Arc<SessionCreatorCore>,
|
||||
}
|
||||
|
||||
impl GenerationSessionCreator {
|
||||
#[cfg(test)]
|
||||
pub fn make_faulty_generation_sessions(&self) {
|
||||
self.make_faulty_generation_sessions.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
impl ClusterSessionCreator<GenerationSessionImpl, ()> for GenerationSessionCreator {
|
||||
fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message {
|
||||
message::Message::Generation(message::GenerationMessage::SessionError(message::SessionError {
|
||||
session: sid.into(),
|
||||
session_nonce: nonce,
|
||||
error: err.into(),
|
||||
}))
|
||||
}
|
||||
|
||||
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionId, _creation_data: Option<()>) -> Result<Arc<GenerationSessionImpl>, Error> {
|
||||
// check that there's no finished encryption session with the same id
|
||||
if self.core.key_storage.contains(&id) {
|
||||
return Err(Error::DuplicateSessionId);
|
||||
}
|
||||
|
||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
||||
Ok(GenerationSessionImpl::new(GenerationSessionParams {
|
||||
id: id.clone(),
|
||||
self_node_id: self.core.self_node_id.clone(),
|
||||
key_storage: Some(self.core.key_storage.clone()),
|
||||
cluster: cluster,
|
||||
nonce: Some(nonce),
|
||||
}))
|
||||
.map(|session| {
|
||||
if self.make_faulty_generation_sessions.load(Ordering::Relaxed) {
|
||||
session.simulate_faulty_behaviour();
|
||||
}
|
||||
session
|
||||
})
|
||||
.map(Arc::new)
|
||||
}
|
||||
}
|
||||
|
||||
/// Encryption session creator.
|
||||
pub struct EncryptionSessionCreator {
|
||||
/// Creator core.
|
||||
pub core: Arc<SessionCreatorCore>,
|
||||
}
|
||||
|
||||
impl ClusterSessionCreator<EncryptionSessionImpl, ()> for EncryptionSessionCreator {
|
||||
fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message {
|
||||
message::Message::Encryption(message::EncryptionMessage::EncryptionSessionError(message::EncryptionSessionError {
|
||||
session: sid.into(),
|
||||
session_nonce: nonce,
|
||||
error: err.into(),
|
||||
}))
|
||||
}
|
||||
|
||||
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionId, _creation_data: Option<()>) -> Result<Arc<EncryptionSessionImpl>, Error> {
|
||||
let encrypted_data = self.core.read_key_share(&id)?;
|
||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
||||
Ok(Arc::new(EncryptionSessionImpl::new(EncryptionSessionParams {
|
||||
id: id,
|
||||
self_node_id: self.core.self_node_id.clone(),
|
||||
encrypted_data: encrypted_data,
|
||||
key_storage: self.core.key_storage.clone(),
|
||||
cluster: cluster,
|
||||
nonce: nonce,
|
||||
})?))
|
||||
}
|
||||
}
|
||||
|
||||
/// Decryption session creator.
|
||||
pub struct DecryptionSessionCreator {
|
||||
/// Creator core.
|
||||
pub core: Arc<SessionCreatorCore>,
|
||||
}
|
||||
|
||||
impl ClusterSessionCreator<DecryptionSessionImpl, Signature> for DecryptionSessionCreator {
|
||||
fn creation_data_from_message(message: &Message) -> Result<Option<Signature>, Error> {
|
||||
match *message {
|
||||
Message::Decryption(DecryptionMessage::DecryptionConsensusMessage(ref message)) => match &message.message {
|
||||
&ConsensusMessage::InitializeConsensusSession(ref message) => Ok(Some(message.requestor_signature.clone().into())),
|
||||
_ => Err(Error::InvalidMessage),
|
||||
},
|
||||
Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(ref message)) => Ok(Some(message.requestor_signature.clone().into())),
|
||||
_ => Err(Error::InvalidMessage),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message {
|
||||
message::Message::Decryption(message::DecryptionMessage::DecryptionSessionError(message::DecryptionSessionError {
|
||||
session: sid.id.into(),
|
||||
sub_session: sid.access_key.into(),
|
||||
session_nonce: nonce,
|
||||
error: err.into(),
|
||||
}))
|
||||
}
|
||||
|
||||
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionIdWithSubSession, requester_signature: Option<Signature>) -> Result<Arc<DecryptionSessionImpl>, Error> {
|
||||
let encrypted_data = self.core.read_key_share(&id.id)?;
|
||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
||||
Ok(Arc::new(DecryptionSessionImpl::new(DecryptionSessionParams {
|
||||
meta: SessionMeta {
|
||||
id: id.id,
|
||||
self_node_id: self.core.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
threshold: encrypted_data.as_ref().map(|ks| ks.threshold).unwrap_or_default(),
|
||||
},
|
||||
access_key: id.access_key,
|
||||
key_share: encrypted_data,
|
||||
acl_storage: self.core.acl_storage.clone(),
|
||||
cluster: cluster,
|
||||
nonce: nonce,
|
||||
}, requester_signature)?))
|
||||
}
|
||||
}
|
||||
|
||||
/// Signing session creator.
|
||||
pub struct SigningSessionCreator {
|
||||
/// Creator core.
|
||||
pub core: Arc<SessionCreatorCore>,
|
||||
}
|
||||
|
||||
impl ClusterSessionCreator<SigningSessionImpl, Signature> for SigningSessionCreator {
|
||||
fn creation_data_from_message(message: &Message) -> Result<Option<Signature>, Error> {
|
||||
match *message {
|
||||
Message::Signing(SigningMessage::SigningConsensusMessage(ref message)) => match &message.message {
|
||||
&ConsensusMessage::InitializeConsensusSession(ref message) => Ok(Some(message.requestor_signature.clone().into())),
|
||||
_ => Err(Error::InvalidMessage),
|
||||
},
|
||||
Message::Signing(SigningMessage::SigningSessionDelegation(ref message)) => Ok(Some(message.requestor_signature.clone().into())),
|
||||
_ => Err(Error::InvalidMessage),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message {
|
||||
message::Message::Signing(message::SigningMessage::SigningSessionError(message::SigningSessionError {
|
||||
session: sid.id.into(),
|
||||
sub_session: sid.access_key.into(),
|
||||
session_nonce: nonce,
|
||||
error: err.into(),
|
||||
}))
|
||||
}
|
||||
|
||||
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionIdWithSubSession, requester_signature: Option<Signature>) -> Result<Arc<SigningSessionImpl>, Error> {
|
||||
let encrypted_data = self.core.read_key_share(&id.id)?;
|
||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
||||
Ok(Arc::new(SigningSessionImpl::new(SigningSessionParams {
|
||||
meta: SessionMeta {
|
||||
id: id.id,
|
||||
self_node_id: self.core.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
threshold: encrypted_data.as_ref().map(|ks| ks.threshold).unwrap_or_default(),
|
||||
},
|
||||
access_key: id.access_key,
|
||||
key_share: encrypted_data,
|
||||
acl_storage: self.core.acl_storage.clone(),
|
||||
cluster: cluster,
|
||||
nonce: nonce,
|
||||
}, requester_signature)?))
|
||||
}
|
||||
}
|
||||
|
||||
/// Key version negotiation session creator.
|
||||
pub struct KeyVersionNegotiationSessionCreator {
|
||||
/// Creator core.
|
||||
pub core: Arc<SessionCreatorCore>,
|
||||
}
|
||||
|
||||
impl ClusterSessionCreator<KeyVersionNegotiationSessionImpl<VersionNegotiationTransport>, ()> for KeyVersionNegotiationSessionCreator {
|
||||
fn make_error_message(sid: SessionIdWithSubSession, nonce: u64, err: Error) -> Message {
|
||||
message::Message::KeyVersionNegotiation(message::KeyVersionNegotiationMessage::KeyVersionsError(message::KeyVersionsError {
|
||||
session: sid.id.into(),
|
||||
sub_session: sid.access_key.into(),
|
||||
session_nonce: nonce,
|
||||
error: err.into(),
|
||||
}))
|
||||
}
|
||||
|
||||
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionIdWithSubSession, _creation_data: Option<()>) -> Result<Arc<KeyVersionNegotiationSessionImpl<VersionNegotiationTransport>>, Error> {
|
||||
let encrypted_data = self.core.read_key_share(&id.id)?;
|
||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
||||
let computer = Arc::new(FastestResultKeyVersionsResultComputer::new(self.core.self_node_id.clone(), encrypted_data.as_ref()));
|
||||
Ok(Arc::new(KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams {
|
||||
meta: ShareChangeSessionMeta {
|
||||
id: id.id.clone(),
|
||||
self_node_id: self.core.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
},
|
||||
sub_session: id.access_key.clone(),
|
||||
key_share: encrypted_data,
|
||||
result_computer: computer,
|
||||
transport: VersionNegotiationTransport {
|
||||
cluster: cluster,
|
||||
key_id: id.id,
|
||||
sub_session: id.access_key.clone(),
|
||||
nonce: nonce,
|
||||
},
|
||||
nonce: nonce,
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
/// Administrative session creator.
|
||||
pub struct AdminSessionCreator {
|
||||
/// Creator core.
|
||||
pub core: Arc<SessionCreatorCore>,
|
||||
/// Administrator public.
|
||||
pub admin_public: Option<Public>,
|
||||
}
|
||||
|
||||
impl ClusterSessionCreator<AdminSession, AdminSessionCreationData> for AdminSessionCreator {
|
||||
fn creation_data_from_message(message: &Message) -> Result<Option<AdminSessionCreationData>, Error> {
|
||||
match *message {
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref message)) => match &message.message {
|
||||
&ConsensusMessageWithServersSet::InitializeConsensusSession(_) => Ok(Some(AdminSessionCreationData::ServersSetChange)),
|
||||
_ => Err(Error::InvalidMessage),
|
||||
},
|
||||
Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ref message)) => match &message.message {
|
||||
&ConsensusMessageOfShareAdd::InitializeConsensusSession(ref message) => Ok(Some(AdminSessionCreationData::ShareAdd(message.version.clone().into()))),
|
||||
_ => Err(Error::InvalidMessage),
|
||||
},
|
||||
_ => Err(Error::InvalidMessage),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_error_message(sid: SessionId, nonce: u64, err: Error) -> Message {
|
||||
message::Message::ServersSetChange(message::ServersSetChangeMessage::ServersSetChangeError(message::ServersSetChangeError {
|
||||
session: sid.into(),
|
||||
session_nonce: nonce,
|
||||
error: err.into(),
|
||||
}))
|
||||
}
|
||||
|
||||
fn create(&self, cluster: Arc<Cluster>, master: NodeId, nonce: Option<u64>, id: SessionId, creation_data: Option<AdminSessionCreationData>) -> Result<Arc<AdminSession>, Error> {
|
||||
let nonce = self.core.check_session_nonce(&master, nonce)?;
|
||||
let admin_public = self.admin_public.clone().ok_or(Error::AccessDenied)?;
|
||||
Ok(Arc::new(match creation_data {
|
||||
Some(AdminSessionCreationData::ShareAdd(version)) => {
|
||||
AdminSession::ShareAdd(ShareAddSessionImpl::new(ShareAddSessionParams {
|
||||
meta: ShareChangeSessionMeta {
|
||||
id: id.clone(),
|
||||
self_node_id: self.core.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
},
|
||||
transport: ShareAddTransport::new(id.clone(), Some(version), nonce, cluster),
|
||||
key_storage: self.core.key_storage.clone(),
|
||||
nonce: nonce,
|
||||
admin_public: Some(admin_public),
|
||||
})?)
|
||||
},
|
||||
Some(AdminSessionCreationData::ServersSetChange) => {
|
||||
AdminSession::ServersSetChange(ServersSetChangeSessionImpl::new(ServersSetChangeSessionParams {
|
||||
meta: ShareChangeSessionMeta {
|
||||
id: id.clone(),
|
||||
self_node_id: self.core.self_node_id.clone(),
|
||||
master_node_id: master,
|
||||
},
|
||||
cluster: cluster.clone(),
|
||||
key_storage: self.core.key_storage.clone(),
|
||||
nonce: nonce,
|
||||
all_nodes_set: cluster.nodes(),
|
||||
admin_public: admin_public,
|
||||
})?)
|
||||
},
|
||||
None => unreachable!("expected to call with non-empty creation data; qed"),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoSessionId<SessionId> for Message {
|
||||
fn into_session_id(&self) -> Result<SessionId, Error> {
|
||||
match *self {
|
||||
Message::Generation(ref message) => Ok(message.session_id().clone()),
|
||||
Message::Encryption(ref message) => Ok(message.session_id().clone()),
|
||||
Message::Decryption(_) => Err(Error::InvalidMessage),
|
||||
Message::Signing(_) => Err(Error::InvalidMessage),
|
||||
Message::ServersSetChange(ref message) => Ok(message.session_id().clone()),
|
||||
Message::ShareAdd(ref message) => Ok(message.session_id().clone()),
|
||||
Message::KeyVersionNegotiation(_) => Err(Error::InvalidMessage),
|
||||
Message::Cluster(_) => Err(Error::InvalidMessage),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoSessionId<SessionIdWithSubSession> for Message {
|
||||
fn into_session_id(&self) -> Result<SessionIdWithSubSession, Error> {
|
||||
match *self {
|
||||
Message::Generation(_) => Err(Error::InvalidMessage),
|
||||
Message::Encryption(_) => Err(Error::InvalidMessage),
|
||||
Message::Decryption(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())),
|
||||
Message::Signing(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())),
|
||||
Message::ServersSetChange(_) => Err(Error::InvalidMessage),
|
||||
Message::ShareAdd(_) => Err(Error::InvalidMessage),
|
||||
Message::KeyVersionNegotiation(ref message) => Ok(SessionIdWithSubSession::new(message.session_id().clone(), message.sub_session_id().clone())),
|
||||
Message::Cluster(_) => Err(Error::InvalidMessage),
|
||||
}
|
||||
}
|
||||
}
|
@ -26,8 +26,7 @@ use bigint::prelude::U256;
|
||||
use bigint::hash::H256;
|
||||
use key_server_cluster::Error;
|
||||
use key_server_cluster::message::{Message, ClusterMessage, GenerationMessage, EncryptionMessage,
|
||||
DecryptionMessage, SigningMessage, ServersSetChangeMessage, ShareAddMessage, ShareMoveMessage,
|
||||
ShareRemoveMessage};
|
||||
DecryptionMessage, SigningMessage, ServersSetChangeMessage, ShareAddMessage, KeyVersionNegotiationMessage};
|
||||
|
||||
/// Size of serialized header.
|
||||
pub const MESSAGE_HEADER_SIZE: usize = 18;
|
||||
@ -88,6 +87,9 @@ pub fn serialize_message(message: Message) -> Result<SerializedMessage, Error> {
|
||||
Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (152, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (153, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(payload)) => (154, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(payload)) => (155, serde_json::to_vec(&payload)),
|
||||
Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted(payload))
|
||||
=> (156, serde_json::to_vec(&payload)),
|
||||
|
||||
Message::Signing(SigningMessage::SigningConsensusMessage(payload)) => (200, serde_json::to_vec(&payload)),
|
||||
Message::Signing(SigningMessage::SigningGenerationMessage(payload)) => (201, serde_json::to_vec(&payload)),
|
||||
@ -95,45 +97,40 @@ pub fn serialize_message(message: Message) -> Result<SerializedMessage, Error> {
|
||||
Message::Signing(SigningMessage::PartialSignature(payload)) => (203, serde_json::to_vec(&payload)),
|
||||
Message::Signing(SigningMessage::SigningSessionError(payload)) => (204, serde_json::to_vec(&payload)),
|
||||
Message::Signing(SigningMessage::SigningSessionCompleted(payload)) => (205, serde_json::to_vec(&payload)),
|
||||
Message::Signing(SigningMessage::SigningSessionDelegation(payload)) => (206, serde_json::to_vec(&payload)),
|
||||
Message::Signing(SigningMessage::SigningSessionDelegationCompleted(payload)) => (207, serde_json::to_vec(&payload)),
|
||||
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(payload))
|
||||
=> (250, serde_json::to_vec(&payload)),
|
||||
Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(payload)) => (251, serde_json::to_vec(&payload)),
|
||||
Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(payload)) => (252, serde_json::to_vec(&payload)),
|
||||
Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(payload))
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(payload))
|
||||
=> (253, serde_json::to_vec(&payload)),
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(payload))
|
||||
Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(payload))
|
||||
=> (254, serde_json::to_vec(&payload)),
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(payload))
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(payload))
|
||||
=> (255, serde_json::to_vec(&payload)),
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(payload))
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(payload))
|
||||
=> (256, serde_json::to_vec(&payload)),
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(payload))
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(payload))
|
||||
=> (257, serde_json::to_vec(&payload)),
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareMoveMessage(payload))
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(payload))
|
||||
=> (258, serde_json::to_vec(&payload)),
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(payload))
|
||||
=> (259, serde_json::to_vec(&payload)),
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(payload)) => (260, serde_json::to_vec(&payload)),
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(payload)) => (261, serde_json::to_vec(&payload)),
|
||||
Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(payload))
|
||||
=> (261, serde_json::to_vec(&payload)),
|
||||
=> (262, serde_json::to_vec(&payload)),
|
||||
|
||||
Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(payload)) => (300, serde_json::to_vec(&payload)),
|
||||
Message::ShareAdd(ShareAddMessage::KeyShareCommon(payload)) => (301, serde_json::to_vec(&payload)),
|
||||
Message::ShareAdd(ShareAddMessage::NewAbsoluteTermShare(payload)) => (302, serde_json::to_vec(&payload)),
|
||||
Message::ShareAdd(ShareAddMessage::NewKeysDissemination(payload)) => (303, serde_json::to_vec(&payload)),
|
||||
Message::ShareAdd(ShareAddMessage::ShareAddError(payload)) => (304, serde_json::to_vec(&payload)),
|
||||
Message::ShareAdd(ShareAddMessage::NewKeysDissemination(payload)) => (302, serde_json::to_vec(&payload)),
|
||||
Message::ShareAdd(ShareAddMessage::ShareAddError(payload)) => (303, serde_json::to_vec(&payload)),
|
||||
|
||||
Message::ShareMove(ShareMoveMessage::ShareMoveConsensusMessage(payload)) => (350, serde_json::to_vec(&payload)),
|
||||
Message::ShareMove(ShareMoveMessage::ShareMoveRequest(payload)) => (351, serde_json::to_vec(&payload)),
|
||||
Message::ShareMove(ShareMoveMessage::ShareMove(payload)) => (352, serde_json::to_vec(&payload)),
|
||||
Message::ShareMove(ShareMoveMessage::ShareMoveConfirm(payload)) => (353, serde_json::to_vec(&payload)),
|
||||
Message::ShareMove(ShareMoveMessage::ShareMoveError(payload)) => (354, serde_json::to_vec(&payload)),
|
||||
|
||||
Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(payload)) => (400, serde_json::to_vec(&payload)),
|
||||
Message::ShareRemove(ShareRemoveMessage::ShareRemoveRequest(payload)) => (401, serde_json::to_vec(&payload)),
|
||||
Message::ShareRemove(ShareRemoveMessage::ShareRemoveConfirm(payload)) => (402, serde_json::to_vec(&payload)),
|
||||
Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(payload)) => (403, serde_json::to_vec(&payload)),
|
||||
Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions(payload))
|
||||
=> (450, serde_json::to_vec(&payload)),
|
||||
Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersions(payload))
|
||||
=> (451, serde_json::to_vec(&payload)),
|
||||
Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError(payload))
|
||||
=> (452, serde_json::to_vec(&payload)),
|
||||
};
|
||||
|
||||
let payload = payload.map_err(|err| Error::Serde(err.to_string()))?;
|
||||
@ -169,6 +166,8 @@ pub fn deserialize_message(header: &MessageHeader, payload: Vec<u8>) -> Result<M
|
||||
152 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
153 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
154 => Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
155 => Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
156 => Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
|
||||
200 => Message::Signing(SigningMessage::SigningConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
201 => Message::Signing(SigningMessage::SigningGenerationMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
@ -176,36 +175,29 @@ pub fn deserialize_message(header: &MessageHeader, payload: Vec<u8>) -> Result<M
|
||||
203 => Message::Signing(SigningMessage::PartialSignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
204 => Message::Signing(SigningMessage::SigningSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
205 => Message::Signing(SigningMessage::SigningSessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
206 => Message::Signing(SigningMessage::SigningSessionDelegation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
207 => Message::Signing(SigningMessage::SigningSessionDelegationCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
|
||||
250 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
251 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
252 => Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
253 => Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
254 => Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
255 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
256 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
257 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
258 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareMoveMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
259 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareRemoveMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
260 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
261 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
253 => Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
254 => Message::ServersSetChange(ServersSetChangeMessage::InitializeShareChangeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
255 => Message::ServersSetChange(ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
256 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
257 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
258 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
261 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
262 => Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
|
||||
300 => Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
301 => Message::ShareAdd(ShareAddMessage::KeyShareCommon(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
302 => Message::ShareAdd(ShareAddMessage::NewAbsoluteTermShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
303 => Message::ShareAdd(ShareAddMessage::NewKeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
304 => Message::ShareAdd(ShareAddMessage::ShareAddError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
302 => Message::ShareAdd(ShareAddMessage::NewKeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
303 => Message::ShareAdd(ShareAddMessage::ShareAddError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
|
||||
350 => Message::ShareMove(ShareMoveMessage::ShareMoveConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
351 => Message::ShareMove(ShareMoveMessage::ShareMoveRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
352 => Message::ShareMove(ShareMoveMessage::ShareMove(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
353 => Message::ShareMove(ShareMoveMessage::ShareMoveConfirm(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
354 => Message::ShareMove(ShareMoveMessage::ShareMoveError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
|
||||
400 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveConsensusMessage(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
401 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveRequest(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
402 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveConfirm(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
403 => Message::ShareRemove(ShareRemoveMessage::ShareRemoveError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
450 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::RequestKeyVersions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
451 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersions(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
452 => Message::KeyVersionNegotiation(KeyVersionNegotiationMessage::KeyVersionsError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
|
||||
|
||||
_ => return Err(Error::Serde(format!("unknown message type {}", header.kind))),
|
||||
})
|
||||
|
@ -44,7 +44,6 @@ pub fn write_encrypted_message<A>(a: A, key: &KeyPair, message: Message) -> Writ
|
||||
Err(error) => (Some(error), write_all(a, Vec::new())),
|
||||
};
|
||||
|
||||
|
||||
WriteMessage {
|
||||
error: error,
|
||||
future: future,
|
||||
|
@ -98,6 +98,11 @@ impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTran
|
||||
&self.consensus_job
|
||||
}
|
||||
|
||||
/// Get mutable consensus job reference.
|
||||
pub fn consensus_job_mut(&mut self) -> &mut JobSession<ConsensusExecutor, ConsensusTransport> {
|
||||
&mut self.consensus_job
|
||||
}
|
||||
|
||||
/// Get all nodes, which has not rejected consensus request.
|
||||
pub fn consensus_non_rejected_nodes(&self) -> BTreeSet<NodeId> {
|
||||
self.consensus_job.responses().iter()
|
||||
@ -231,8 +236,9 @@ impl<ConsensusExecutor, ConsensusTransport, ComputationExecutor, ComputationTran
|
||||
let (is_restart_needed, timeout_result) = match self.state {
|
||||
ConsensusSessionState::WaitingForInitialization if is_self_master => {
|
||||
// it is strange to receive error before session is initialized && slave doesn't know access_key
|
||||
// => ignore this error for now
|
||||
(false, Ok(()))
|
||||
// => fatal error
|
||||
self.state = ConsensusSessionState::Failed;
|
||||
(false, Err(Error::ConsensusUnreachable))
|
||||
}
|
||||
ConsensusSessionState::WaitingForInitialization if is_node_master => {
|
||||
// can not establish consensus
|
||||
@ -496,6 +502,7 @@ mod tests {
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization);
|
||||
session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||
requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
version: Default::default(),
|
||||
})).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
assert_eq!(session.on_job_request(&NodeId::from(1), 20, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap_err(), Error::InvalidMessage);
|
||||
@ -508,6 +515,7 @@ mod tests {
|
||||
assert_eq!(session.state(), ConsensusSessionState::WaitingForInitialization);
|
||||
session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||
requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
version: Default::default(),
|
||||
})).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::ConsensusEstablished);
|
||||
session.on_job_request(&NodeId::from(1), 2, SquaredSumJobExecutor, DummyJobTransport::default()).unwrap();
|
||||
@ -537,15 +545,17 @@ mod tests {
|
||||
let mut session = make_slave_consensus_session(0, None);
|
||||
session.on_consensus_message(&NodeId::from(1), &ConsensusMessage::InitializeConsensusSession(InitializeConsensusSession {
|
||||
requestor_signature: sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
version: Default::default(),
|
||||
})).unwrap();
|
||||
session.on_session_completed(&NodeId::from(1)).unwrap();
|
||||
assert_eq!(session.state(), ConsensusSessionState::Finished);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consensus_session_continues_if_node_error_received_by_uninitialized_master() {
|
||||
fn consensus_session_fails_if_node_error_received_by_uninitialized_master() {
|
||||
let mut session = make_master_consensus_session(0, None, None);
|
||||
assert_eq!(session.on_node_error(&NodeId::from(2)), Ok(false));
|
||||
assert_eq!(session.on_node_error(&NodeId::from(2)), Err(Error::ConsensusUnreachable));
|
||||
assert_eq!(session.state(), ConsensusSessionState::Failed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -15,6 +15,7 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::{BTreeSet, BTreeMap};
|
||||
use bigint::hash::H256;
|
||||
use ethkey::{Public, Secret};
|
||||
use ethcrypto::ecies::encrypt;
|
||||
use ethcrypto::DEFAULT_MAC;
|
||||
@ -32,6 +33,8 @@ pub struct DecryptionJob {
|
||||
requester: Public,
|
||||
/// Key share.
|
||||
key_share: DocumentKeyShare,
|
||||
/// Key version.
|
||||
key_version: H256,
|
||||
/// Request id.
|
||||
request_id: Option<Secret>,
|
||||
/// Is shadow decryption requested.
|
||||
@ -59,25 +62,27 @@ pub struct PartialDecryptionResponse {
|
||||
}
|
||||
|
||||
impl DecryptionJob {
|
||||
pub fn new_on_slave(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare) -> Result<Self, Error> {
|
||||
pub fn new_on_slave(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare, key_version: H256) -> Result<Self, Error> {
|
||||
debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some());
|
||||
Ok(DecryptionJob {
|
||||
self_node_id: self_node_id,
|
||||
access_key: access_key,
|
||||
requester: requester,
|
||||
key_share: key_share,
|
||||
key_version: key_version,
|
||||
request_id: None,
|
||||
is_shadow_decryption: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_on_master(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare, is_shadow_decryption: bool) -> Result<Self, Error> {
|
||||
pub fn new_on_master(self_node_id: NodeId, access_key: Secret, requester: Public, key_share: DocumentKeyShare, key_version: H256, is_shadow_decryption: bool) -> Result<Self, Error> {
|
||||
debug_assert!(key_share.common_point.is_some() && key_share.encrypted_point.is_some());
|
||||
Ok(DecryptionJob {
|
||||
self_node_id: self_node_id,
|
||||
access_key: access_key,
|
||||
requester: requester,
|
||||
key_share: key_share,
|
||||
key_version: key_version,
|
||||
request_id: Some(math::generate_random_scalar()?),
|
||||
is_shadow_decryption: Some(is_shadow_decryption),
|
||||
})
|
||||
@ -107,15 +112,16 @@ impl JobExecutor for DecryptionJob {
|
||||
}
|
||||
|
||||
fn process_partial_request(&mut self, partial_request: PartialDecryptionRequest) -> Result<JobPartialRequestAction<PartialDecryptionResponse>, Error> {
|
||||
let key_version = self.key_share.version(&self.key_version).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
||||
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
||||
|| partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) {
|
||||
|| partial_request.other_nodes_ids.iter().any(|n| !key_version.id_numbers.contains_key(n)) {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
let self_id_number = &self.key_share.id_numbers[&self.self_node_id];
|
||||
let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &self.key_share.id_numbers[n]);
|
||||
let node_shadow = math::compute_node_shadow(&self.key_share.secret_share, &self_id_number, other_id_numbers)?;
|
||||
let self_id_number = &key_version.id_numbers[&self.self_node_id];
|
||||
let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &key_version.id_numbers[n]);
|
||||
let node_shadow = math::compute_node_shadow(&key_version.secret_share, &self_id_number, other_id_numbers)?;
|
||||
let decrypt_shadow = if partial_request.is_shadow_decryption { Some(math::generate_random_scalar()?) } else { None };
|
||||
let common_point = self.key_share.common_point.as_ref().expect("DecryptionJob is only created when common_point is known; qed");
|
||||
let (shadow_point, decrypt_shadow) = math::compute_node_shadow_point(&self.access_key, &common_point, &node_shadow, decrypt_shadow)?;
|
||||
@ -129,7 +135,7 @@ impl JobExecutor for DecryptionJob {
|
||||
}))
|
||||
}
|
||||
|
||||
fn check_partial_response(&self, partial_response: &PartialDecryptionResponse) -> Result<JobPartialResponseAction, Error> {
|
||||
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &PartialDecryptionResponse) -> Result<JobPartialResponseAction, Error> {
|
||||
if Some(&partial_response.request_id) != self.request_id.as_ref() {
|
||||
return Ok(JobPartialResponseAction::Ignore);
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ impl JobExecutor for DummyJob {
|
||||
unreachable!("dummy job methods are never called")
|
||||
}
|
||||
|
||||
fn check_partial_response(&self, _r: &()) -> Result<JobPartialResponseAction, Error> {
|
||||
fn check_partial_response(&mut self, _s: &NodeId, _r: &()) -> Result<JobPartialResponseAction, Error> {
|
||||
unreachable!("dummy job methods are never called")
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ pub trait JobExecutor {
|
||||
/// Process partial request.
|
||||
fn process_partial_request(&mut self, partial_request: Self::PartialJobRequest) -> Result<JobPartialRequestAction<Self::PartialJobResponse>, Error>;
|
||||
/// Check partial response of given node.
|
||||
fn check_partial_response(&self, partial_response: &Self::PartialJobResponse) -> Result<JobPartialResponseAction, Error>;
|
||||
fn check_partial_response(&mut self, sender: &NodeId, partial_response: &Self::PartialJobResponse) -> Result<JobPartialResponseAction, Error>;
|
||||
/// Compute final job response.
|
||||
fn compute_response(&self, partial_responses: &BTreeMap<NodeId, Self::PartialJobResponse>) -> Result<Self::JobResponse, Error>;
|
||||
}
|
||||
@ -127,11 +127,21 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
||||
&self.transport
|
||||
}
|
||||
|
||||
/// Get mutable transport reference.
|
||||
pub fn transport_mut(&mut self) -> &mut Transport {
|
||||
&mut self.transport
|
||||
}
|
||||
|
||||
/// Get executor reference.
|
||||
pub fn executor(&self) -> &Executor {
|
||||
&self.executor
|
||||
}
|
||||
|
||||
/// Get mutable executor reference.
|
||||
pub fn executor_mut(&mut self) -> &mut Executor {
|
||||
&mut self.executor
|
||||
}
|
||||
|
||||
/// Get job state.
|
||||
pub fn state(&self) -> JobSessionState {
|
||||
self.data.state
|
||||
@ -181,7 +191,10 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
||||
/// Initialize.
|
||||
pub fn initialize(&mut self, nodes: BTreeSet<NodeId>) -> Result<(), Error> {
|
||||
debug_assert!(self.meta.self_node_id == self.meta.master_node_id);
|
||||
debug_assert!(nodes.len() >= self.meta.threshold + 1);
|
||||
|
||||
if nodes.len() < self.meta.threshold + 1 {
|
||||
return Err(Error::ConsensusUnreachable);
|
||||
}
|
||||
|
||||
if self.data.state != JobSessionState::Inactive {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
@ -266,7 +279,7 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
||||
return Err(Error::InvalidNodeForRequest);
|
||||
}
|
||||
|
||||
match self.executor.check_partial_response(&response)? {
|
||||
match self.executor.check_partial_response(node, &response)? {
|
||||
JobPartialResponseAction::Ignore => Ok(()),
|
||||
JobPartialResponseAction::Reject => {
|
||||
active_data.rejects.insert(node.clone());
|
||||
@ -279,7 +292,6 @@ impl<Executor, Transport> JobSession<Executor, Transport> where Executor: JobExe
|
||||
},
|
||||
JobPartialResponseAction::Accept => {
|
||||
active_data.responses.insert(node.clone(), response);
|
||||
|
||||
if active_data.responses.len() < self.meta.threshold + 1 {
|
||||
return Ok(());
|
||||
}
|
||||
@ -351,7 +363,7 @@ pub mod tests {
|
||||
|
||||
fn prepare_partial_request(&self, _n: &NodeId, _nodes: &BTreeSet<NodeId>) -> Result<u32, Error> { Ok(2) }
|
||||
fn process_partial_request(&mut self, r: u32) -> Result<JobPartialRequestAction<u32>, Error> { if r <= 10 { Ok(JobPartialRequestAction::Respond(r * r)) } else { Err(Error::InvalidMessage) } }
|
||||
fn check_partial_response(&self, r: &u32) -> Result<JobPartialResponseAction, Error> { if r % 2 == 0 { Ok(JobPartialResponseAction::Accept) } else { Ok(JobPartialResponseAction::Reject) } }
|
||||
fn check_partial_response(&mut self, _s: &NodeId, r: &u32) -> Result<JobPartialResponseAction, Error> { if r % 2 == 0 { Ok(JobPartialResponseAction::Accept) } else { Ok(JobPartialResponseAction::Reject) } }
|
||||
fn compute_response(&self, r: &BTreeMap<NodeId, u32>) -> Result<u32, Error> { Ok(r.values().fold(0, |v1, v2| v1 + v2)) }
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,8 @@ use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartial
|
||||
pub struct KeyAccessJob {
|
||||
/// Key id.
|
||||
id: SessionId,
|
||||
/// Has key share?
|
||||
has_key_share: bool,
|
||||
/// ACL storage.
|
||||
acl_storage: Arc<AclStorage>,
|
||||
/// Requester signature.
|
||||
@ -34,6 +36,7 @@ impl KeyAccessJob {
|
||||
pub fn new_on_slave(id: SessionId, acl_storage: Arc<AclStorage>) -> Self {
|
||||
KeyAccessJob {
|
||||
id: id,
|
||||
has_key_share: true,
|
||||
acl_storage: acl_storage,
|
||||
signature: None,
|
||||
}
|
||||
@ -42,11 +45,24 @@ impl KeyAccessJob {
|
||||
pub fn new_on_master(id: SessionId, acl_storage: Arc<AclStorage>, signature: Signature) -> Self {
|
||||
KeyAccessJob {
|
||||
id: id,
|
||||
has_key_share: true,
|
||||
acl_storage: acl_storage,
|
||||
signature: Some(signature),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_has_key_share(&mut self, has_key_share: bool) {
|
||||
self.has_key_share = has_key_share;
|
||||
}
|
||||
|
||||
pub fn set_requester_signature(&mut self, signature: Signature) {
|
||||
self.signature = Some(signature);
|
||||
}
|
||||
|
||||
pub fn requester_signature(&self) -> Option<&Signature> {
|
||||
self.signature.as_ref()
|
||||
}
|
||||
|
||||
pub fn requester(&self) -> Result<Option<Public>, Error> {
|
||||
match self.signature.as_ref() {
|
||||
Some(signature) => Ok(Some(recover(signature, &self.id)?)),
|
||||
@ -65,13 +81,17 @@ impl JobExecutor for KeyAccessJob {
|
||||
}
|
||||
|
||||
fn process_partial_request(&mut self, partial_request: Signature) -> Result<JobPartialRequestAction<bool>, Error> {
|
||||
if !self.has_key_share {
|
||||
return Ok(JobPartialRequestAction::Reject(false));
|
||||
}
|
||||
|
||||
self.signature = Some(partial_request.clone());
|
||||
self.acl_storage.check(&recover(&partial_request, &self.id)?, &self.id)
|
||||
.map_err(|_| Error::AccessDenied)
|
||||
.map(|is_confirmed| if is_confirmed { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
||||
}
|
||||
|
||||
fn check_partial_response(&self, partial_response: &bool) -> Result<JobPartialResponseAction, Error> {
|
||||
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &bool) -> Result<JobPartialResponseAction, Error> {
|
||||
Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject })
|
||||
}
|
||||
|
||||
|
@ -18,16 +18,13 @@ use std::collections::{BTreeSet, BTreeMap};
|
||||
use ethkey::{Public, Signature, recover};
|
||||
use tiny_keccak::Keccak;
|
||||
use key_server_cluster::{Error, NodeId, SessionId};
|
||||
use key_server_cluster::message::{InitializeConsensusSessionWithServersSet, InitializeConsensusSessionWithServersMap,
|
||||
InitializeConsensusSessionWithServersSecretMap};
|
||||
use key_server_cluster::message::{InitializeConsensusSessionWithServersSet, InitializeConsensusSessionOfShareAdd};
|
||||
use key_server_cluster::jobs::job_session::{JobPartialResponseAction, JobPartialRequestAction, JobExecutor};
|
||||
|
||||
/// Purpose of this job is to check if requestor is administrator of SecretStore (i.e. it have access to change key servers set).
|
||||
pub struct ServersSetChangeAccessJob {
|
||||
/// Servers set administrator public key (this could be changed to ACL-based check later).
|
||||
administrator: Public,
|
||||
/// Current servers set (in session/cluster).
|
||||
current_servers_set: BTreeSet<NodeId>,
|
||||
/// Old servers set.
|
||||
old_servers_set: Option<BTreeSet<NodeId>>,
|
||||
/// New servers set.
|
||||
@ -61,22 +58,11 @@ impl<'a> From<&'a InitializeConsensusSessionWithServersSet> for ServersSetChange
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a InitializeConsensusSessionWithServersMap> for ServersSetChangeAccessRequest {
|
||||
fn from(message: &InitializeConsensusSessionWithServersMap) -> Self {
|
||||
impl<'a> From<&'a InitializeConsensusSessionOfShareAdd> for ServersSetChangeAccessRequest {
|
||||
fn from(message: &InitializeConsensusSessionOfShareAdd) -> Self {
|
||||
ServersSetChangeAccessRequest {
|
||||
old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(),
|
||||
new_servers_set: message.new_nodes_set.keys().cloned().map(Into::into).collect(),
|
||||
old_set_signature: message.old_set_signature.clone().into(),
|
||||
new_set_signature: message.new_set_signature.clone().into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a InitializeConsensusSessionWithServersSecretMap> for ServersSetChangeAccessRequest {
|
||||
fn from(message: &InitializeConsensusSessionWithServersSecretMap) -> Self {
|
||||
ServersSetChangeAccessRequest {
|
||||
old_servers_set: message.old_nodes_set.iter().cloned().map(Into::into).collect(),
|
||||
new_servers_set: message.new_nodes_set.keys().cloned().map(Into::into).collect(),
|
||||
new_servers_set: message.new_nodes_map.keys().cloned().map(Into::into).collect(),
|
||||
old_set_signature: message.old_set_signature.clone().into(),
|
||||
new_set_signature: message.new_set_signature.clone().into(),
|
||||
}
|
||||
@ -84,10 +70,9 @@ impl<'a> From<&'a InitializeConsensusSessionWithServersSecretMap> for ServersSet
|
||||
}
|
||||
|
||||
impl ServersSetChangeAccessJob {
|
||||
pub fn new_on_slave(administrator: Public, current_servers_set: BTreeSet<NodeId>) -> Self {
|
||||
pub fn new_on_slave(administrator: Public) -> Self {
|
||||
ServersSetChangeAccessJob {
|
||||
administrator: administrator,
|
||||
current_servers_set: current_servers_set,
|
||||
old_servers_set: None,
|
||||
new_servers_set: None,
|
||||
old_set_signature: None,
|
||||
@ -95,10 +80,9 @@ impl ServersSetChangeAccessJob {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_on_master(administrator: Public, current_servers_set: BTreeSet<NodeId>, old_servers_set: BTreeSet<NodeId>, new_servers_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Self {
|
||||
pub fn new_on_master(administrator: Public, old_servers_set: BTreeSet<NodeId>, new_servers_set: BTreeSet<NodeId>, old_set_signature: Signature, new_set_signature: Signature) -> Self {
|
||||
ServersSetChangeAccessJob {
|
||||
administrator: administrator,
|
||||
current_servers_set: current_servers_set,
|
||||
old_servers_set: Some(old_servers_set),
|
||||
new_servers_set: Some(new_servers_set),
|
||||
old_set_signature: Some(old_set_signature),
|
||||
@ -134,11 +118,6 @@ impl JobExecutor for ServersSetChangeAccessJob {
|
||||
new_set_signature,
|
||||
} = partial_request;
|
||||
|
||||
// check that current set is exactly the same set as old set
|
||||
if self.current_servers_set.symmetric_difference(&old_servers_set).next().is_some() {
|
||||
return Ok(JobPartialRequestAction::Reject(false));
|
||||
}
|
||||
|
||||
// check old servers set signature
|
||||
let old_actual_public = recover(&old_set_signature, &ordered_nodes_hash(&old_servers_set).into())?;
|
||||
let new_actual_public = recover(&new_set_signature, &ordered_nodes_hash(&new_servers_set).into())?;
|
||||
@ -148,7 +127,7 @@ impl JobExecutor for ServersSetChangeAccessJob {
|
||||
Ok(if is_administrator { JobPartialRequestAction::Respond(true) } else { JobPartialRequestAction::Reject(false) })
|
||||
}
|
||||
|
||||
fn check_partial_response(&self, partial_response: &bool) -> Result<JobPartialResponseAction, Error> {
|
||||
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &bool) -> Result<JobPartialResponseAction, Error> {
|
||||
Ok(if *partial_response { JobPartialResponseAction::Accept } else { JobPartialResponseAction::Reject })
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,8 @@ pub struct SigningJob {
|
||||
self_node_id: NodeId,
|
||||
/// Key share.
|
||||
key_share: DocumentKeyShare,
|
||||
/// Key version.
|
||||
key_version: H256,
|
||||
/// Session public key.
|
||||
session_public: Public,
|
||||
/// Session secret coefficient.
|
||||
@ -56,10 +58,11 @@ pub struct PartialSigningResponse {
|
||||
}
|
||||
|
||||
impl SigningJob {
|
||||
pub fn new_on_slave(self_node_id: NodeId, key_share: DocumentKeyShare, session_public: Public, session_secret_coeff: Secret) -> Result<Self, Error> {
|
||||
pub fn new_on_slave(self_node_id: NodeId, key_share: DocumentKeyShare, key_version: H256, session_public: Public, session_secret_coeff: Secret) -> Result<Self, Error> {
|
||||
Ok(SigningJob {
|
||||
self_node_id: self_node_id,
|
||||
key_share: key_share,
|
||||
key_version: key_version,
|
||||
session_public: session_public,
|
||||
session_secret_coeff: session_secret_coeff,
|
||||
request_id: None,
|
||||
@ -67,10 +70,11 @@ impl SigningJob {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_on_master(self_node_id: NodeId, key_share: DocumentKeyShare, session_public: Public, session_secret_coeff: Secret, message_hash: H256) -> Result<Self, Error> {
|
||||
pub fn new_on_master(self_node_id: NodeId, key_share: DocumentKeyShare, key_version: H256, session_public: Public, session_secret_coeff: Secret, message_hash: H256) -> Result<Self, Error> {
|
||||
Ok(SigningJob {
|
||||
self_node_id: self_node_id,
|
||||
key_share: key_share,
|
||||
key_version: key_version,
|
||||
session_public: session_public,
|
||||
session_secret_coeff: session_secret_coeff,
|
||||
request_id: Some(math::generate_random_scalar()?),
|
||||
@ -102,14 +106,15 @@ impl JobExecutor for SigningJob {
|
||||
}
|
||||
|
||||
fn process_partial_request(&mut self, partial_request: PartialSigningRequest) -> Result<JobPartialRequestAction<PartialSigningResponse>, Error> {
|
||||
let key_version = self.key_share.version(&self.key_version).map_err(|e| Error::KeyStorage(e.into()))?;
|
||||
if partial_request.other_nodes_ids.len() != self.key_share.threshold
|
||||
|| partial_request.other_nodes_ids.contains(&self.self_node_id)
|
||||
|| partial_request.other_nodes_ids.iter().any(|n| !self.key_share.id_numbers.contains_key(n)) {
|
||||
|| partial_request.other_nodes_ids.iter().any(|n| !key_version.id_numbers.contains_key(n)) {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
let self_id_number = &self.key_share.id_numbers[&self.self_node_id];
|
||||
let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &self.key_share.id_numbers[n]);
|
||||
let self_id_number = &key_version.id_numbers[&self.self_node_id];
|
||||
let other_id_numbers = partial_request.other_nodes_ids.iter().map(|n| &key_version.id_numbers[n]);
|
||||
let combined_hash = math::combine_message_hash_with_public(&partial_request.message_hash, &self.session_public)?;
|
||||
Ok(JobPartialRequestAction::Respond(PartialSigningResponse {
|
||||
request_id: partial_request.id,
|
||||
@ -117,14 +122,14 @@ impl JobExecutor for SigningJob {
|
||||
self.key_share.threshold,
|
||||
&combined_hash,
|
||||
&self.session_secret_coeff,
|
||||
&self.key_share.secret_share,
|
||||
&key_version.secret_share,
|
||||
self_id_number,
|
||||
other_id_numbers
|
||||
)?,
|
||||
}))
|
||||
}
|
||||
|
||||
fn check_partial_response(&self, partial_response: &PartialSigningResponse) -> Result<JobPartialResponseAction, Error> {
|
||||
fn check_partial_response(&mut self, _sender: &NodeId, partial_response: &PartialSigningResponse) -> Result<JobPartialResponseAction, Error> {
|
||||
if Some(&partial_response.request_id) != self.request_id.as_ref() {
|
||||
return Ok(JobPartialResponseAction::Ignore);
|
||||
}
|
||||
|
@ -54,12 +54,12 @@ impl JobExecutor for UnknownSessionsJob {
|
||||
|
||||
fn process_partial_request(&mut self, partial_request: NodeId) -> Result<JobPartialRequestAction<BTreeSet<SessionId>>, Error> {
|
||||
Ok(JobPartialRequestAction::Respond(self.key_storage.iter()
|
||||
.filter(|&(_, ref key_share)| !key_share.id_numbers.contains_key(&partial_request))
|
||||
.filter(|&(_, ref key_share)| !key_share.versions.last().map(|v| v.id_numbers.contains_key(&partial_request)).unwrap_or(true))
|
||||
.map(|(id, _)| id.clone())
|
||||
.collect()))
|
||||
}
|
||||
|
||||
fn check_partial_response(&self, _partial_response: &BTreeSet<SessionId>) -> Result<JobPartialResponseAction, Error> {
|
||||
fn check_partial_response(&mut self, _sender: &NodeId, _partial_response: &BTreeSet<SessionId>) -> Result<JobPartialResponseAction, Error> {
|
||||
Ok(JobPartialResponseAction::Accept)
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user