From 2f757babb9d29607682fcb5a8c1f6af18033281c Mon Sep 17 00:00:00 2001 From: Vadim Sloun Date: Thu, 30 Mar 2017 23:16:54 +0300 Subject: [PATCH 1/6] fix for Ubuntu Dockerfile --- docker/ubuntu/Dockerfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docker/ubuntu/Dockerfile b/docker/ubuntu/Dockerfile index 475555be9..0ee84e1c5 100644 --- a/docker/ubuntu/Dockerfile +++ b/docker/ubuntu/Dockerfile @@ -8,7 +8,10 @@ RUN apt-get update && \ curl \ git \ file \ - binutils + binutils \ + libssl-dev \ + pkg-config \ + libudev-dev # install rustup RUN curl https://sh.rustup.rs -sSf | sh -s -- -y From c9c8f920d2b7b8cd636e490a3b10aea17a464249 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 3 Apr 2017 09:40:18 +0200 Subject: [PATCH 2/6] Futures-based native wrappers for contract ABIs (#5341) * initial native contract generator * get generated code compiling * unit tests for type codegen * autogenerate registry contract * native_contracts entry for registry * service_transaction_checker * fixed indentation --- Cargo.lock | 46 ++- ethcore/Cargo.toml | 74 ++-- ethcore/native_contracts/Cargo.toml | 15 + ethcore/native_contracts/build.rs | 40 ++ ethcore/native_contracts/generator/Cargo.toml | 9 + ethcore/native_contracts/generator/src/lib.rs | 346 ++++++++++++++++++ ethcore/native_contracts/src/lib.rs | 30 ++ ethcore/native_contracts/src/registry.rs | 22 ++ .../src/service_transaction.rs | 22 ++ ethcore/src/client/client.rs | 77 ++-- ethcore/src/client/mod.rs | 1 - ethcore/src/client/registry.rs | 338 ----------------- ethcore/src/lib.rs | 54 +-- .../src/miner/service_transaction_checker.rs | 173 +-------- 14 files changed, 642 insertions(+), 605 deletions(-) create mode 100644 ethcore/native_contracts/Cargo.toml create mode 100644 ethcore/native_contracts/build.rs create mode 100644 ethcore/native_contracts/generator/Cargo.toml create mode 100644 ethcore/native_contracts/generator/src/lib.rs create mode 100644 ethcore/native_contracts/src/lib.rs create mode 100644 ethcore/native_contracts/src/registry.rs create mode 100644 ethcore/native_contracts/src/service_transaction.rs delete mode 100644 ethcore/src/client/registry.rs diff --git a/Cargo.lock b/Cargo.lock index c0c904fe3..8a70f35a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -361,7 +361,7 @@ dependencies = [ [[package]] name = "ethabi" -version = "1.0.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", @@ -392,7 +392,7 @@ dependencies = [ "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.7.0", "ethcore-bloom-journal 0.1.0", "ethcore-devtools 1.7.0", @@ -407,6 +407,7 @@ dependencies = [ "ethkey 0.2.0", "ethstore 0.1.0", "evmjit 1.7.0", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "hardware-wallet 1.7.0", "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", @@ -414,6 +415,7 @@ dependencies = [ "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "lru-cache 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "native-contracts 0.1.0", "num 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -944,6 +946,14 @@ dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "heck" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "hidapi" version = "0.3.1" @@ -1394,6 +1404,25 @@ dependencies = [ "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "native-contract-generator" +version = "0.1.0" +dependencies = [ + "ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "heck 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "native-contracts" +version = "0.1.0" +dependencies = [ + "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-util 1.7.0", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "native-contract-generator 0.1.0", +] + [[package]] name = "native-tls" version = "0.1.0" @@ -1593,7 +1622,7 @@ dependencies = [ name = "parity-hash-fetch" version = "1.7.0" dependencies = [ - "ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-util 1.7.0", "fetch 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1703,7 +1732,7 @@ dependencies = [ name = "parity-updater" version = "1.7.0" dependencies = [ - "ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.7.0", "ethcore-ipc 1.7.0", "ethcore-ipc-codegen 1.7.0", @@ -2503,6 +2532,11 @@ name = "unicode-normalization" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "unicode-segmentation" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "unicode-xid" version = "0.0.4" @@ -2678,7 +2712,7 @@ dependencies = [ "checksum elastic-array 0.6.0 (git+https://github.com/paritytech/elastic-array)" = "" "checksum env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e3856f1697098606fc6cb97a93de88ca3f3bc35bb878c725920e6e82ecf05e83" "checksum eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)" = "" -"checksum ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d8f6cc4c1acd005f48e1d17b06a461adac8fb6eeeb331fbf19a0e656fba91cd" +"checksum ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "63df67d0af5e3cb906b667ca1a6e00baffbed87d0d8f5f78468a1f5eb3a66345" "checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa" "checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" "checksum futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8e51e7f9c150ba7fd4cee9df8bf6ea3dea5b63b68955ddad19ccd35b71dcfb4d" @@ -2688,6 +2722,7 @@ dependencies = [ "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" "checksum hamming 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65043da274378d68241eb9a8f8f8aa54e349136f7b8e12f63e3ef44043cc30e1" "checksum heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "abb306abb8d398e053cfb1b3e7b72c2f580be048b85745c52652954f8ad1439c" +"checksum heck 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6f807d2f64cc044a6bcf250ff23e59be0deec7a16612c014f962a06fa7e020f9" "checksum hidapi 0.3.1 (git+https://github.com/paritytech/hidapi-rs)" = "" "checksum httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46534074dbb80b070d60a5cb8ecadd8963a00a438ae1a95268850a7ef73b67ae" "checksum hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)" = "" @@ -2848,6 +2883,7 @@ dependencies = [ "checksum unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "13a5906ca2b98c799f4b1ab4557b76367ebd6ae5ef14930ec841c74aed5f3764" "checksum unicode-bidi 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c1f7ceb96afdfeedee42bade65a0d585a6a0106f681b6749c8ff4daa8df30b3f" "checksum unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "26643a2f83bac55f1976fb716c10234485f9202dcd65cfbdf9da49867b271172" +"checksum unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18127285758f0e2c6cf325bb3f3d138a12fee27de4f23e146cd6a179f26c2cf3" "checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" "checksum unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f2ae5ddb18e1c92664717616dd9549dde73f539f01bd7b77c2edb2446bdff91" "checksum untrusted 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "193df64312e3515fd983ded55ad5bcaa7647a035804828ed757e832ce6029ef3" diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 86d623e93..697ac2e1c 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -11,44 +11,46 @@ build = "build.rs" "ethcore-ipc-codegen" = { path = "../ipc/codegen" } [dependencies] -log = "0.3" -env_logger = "0.4" -rustc-serialize = "0.3" -rust-crypto = "0.2.34" -num_cpus = "1.2" -crossbeam = "0.2.9" -lazy_static = "0.2" -bloomchain = "0.1" -semver = "0.6" bit-set = "0.4" -time = "0.1" -rand = "0.3" -byteorder = "1.0" -transient-hashmap = "0.4" -linked-hash-map = "0.3.0" -lru-cache = "0.1.0" -itertools = "0.5" -ethabi = "1.0.0" -evmjit = { path = "../evmjit", optional = true } -clippy = { version = "0.0.103", optional = true} -ethash = { path = "../ethash" } -ethcore-util = { path = "../util" } -ethcore-io = { path = "../util/io" } -ethcore-devtools = { path = "../devtools" } -ethjson = { path = "../json" } -ethcore-ipc = { path = "../ipc/rpc" } -ethstore = { path = "../ethstore" } -ethkey = { path = "../ethkey" } -ethcore-ipc-nano = { path = "../ipc/nano" } -rlp = { path = "../util/rlp" } -ethcore-stratum = { path = "../stratum" } -ethcore-bloom-journal = { path = "../util/bloom" } -hardware-wallet = { path = "../hw" } -ethcore-logger = { path = "../logger" } -stats = { path = "../util/stats" } -hyper = { git = "https://github.com/paritytech/hyper", default-features = false } -num = "0.1" +bloomchain = "0.1" bn = { git = "https://github.com/paritytech/bn" } +byteorder = "1.0" +clippy = { version = "0.0.103", optional = true} +crossbeam = "0.2.9" +env_logger = "0.4" +ethabi = "1.0.0" +ethash = { path = "../ethash" } +ethcore-bloom-journal = { path = "../util/bloom" } +ethcore-devtools = { path = "../devtools" } +ethcore-io = { path = "../util/io" } +ethcore-ipc = { path = "../ipc/rpc" } +ethcore-ipc-nano = { path = "../ipc/nano" } +ethcore-logger = { path = "../logger" } +ethcore-stratum = { path = "../stratum" } +ethcore-util = { path = "../util" } +ethjson = { path = "../json" } +ethkey = { path = "../ethkey" } +ethstore = { path = "../ethstore" } +evmjit = { path = "../evmjit", optional = true } +futures = "0.1" +hardware-wallet = { path = "../hw" } +hyper = { git = "https://github.com/paritytech/hyper", default-features = false } +itertools = "0.5" +lazy_static = "0.2" +linked-hash-map = "0.3.0" +log = "0.3" +lru-cache = "0.1.0" +native-contracts = { path = "native_contracts" } +num = "0.1" +num_cpus = "1.2" +rand = "0.3" +rlp = { path = "../util/rlp" } +rust-crypto = "0.2.34" +rustc-serialize = "0.3" +semver = "0.6" +stats = { path = "../util/stats" } +time = "0.1" +transient-hashmap = "0.4" [features] jit = ["evmjit"] diff --git a/ethcore/native_contracts/Cargo.toml b/ethcore/native_contracts/Cargo.toml new file mode 100644 index 000000000..085908509 --- /dev/null +++ b/ethcore/native_contracts/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "native-contracts" +description = "Generated Rust code for Ethereum contract ABIs" +version = "0.1.0" +authors = ["Parity Technologies "] +build = "build.rs" + +[dependencies] +ethabi = "1.0" +futures = "0.1" +byteorder = "1.0" +ethcore-util = { path = "../../util" } + +[build-dependencies] +native-contract-generator = { path = "generator" } diff --git a/ethcore/native_contracts/build.rs b/ethcore/native_contracts/build.rs new file mode 100644 index 000000000..a8488617a --- /dev/null +++ b/ethcore/native_contracts/build.rs @@ -0,0 +1,40 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate native_contract_generator; + +use std::path::Path; +use std::fs::File; +use std::io::Write; + +// TODO: `include!` these from files where they're pretty-printed? +const REGISTRY_ABI: &'static str = r#"[{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"canReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"bytes32"}],"name":"setData","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"}],"name":"confirmReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserve","outputs":[{"name":"success","type":"bool"}],"payable":true,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"drop","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_amount","type":"uint256"}],"name":"setFee","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_to","type":"address"}],"name":"transfer","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getData","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserved","outputs":[{"name":"reserved","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"drain","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"proposeReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"hasReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"fee","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getOwner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getReverse","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"reverse","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"uint256"}],"name":"setUint","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"confirmReverseAs","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"removeReverse","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"address"}],"name":"setAddress","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}]"#; +const SERVICE_TRANSACTION_ABI: &'static str = r#"[{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"certify","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"revoke","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"delegate","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setDelegate","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"certified","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}]"#; + +fn build_file(name: &str, abi: &str, filename: &str) { + let code = ::native_contract_generator::generate_module(name, abi).unwrap(); + + let out_dir = ::std::env::var("OUT_DIR").unwrap(); + let dest_path = Path::new(&out_dir).join(filename); + let mut f = File::create(&dest_path).unwrap(); + + f.write_all(code.as_bytes()).unwrap(); +} + +fn main() { + build_file("Registry", REGISTRY_ABI, "registry.rs"); + build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs"); +} diff --git a/ethcore/native_contracts/generator/Cargo.toml b/ethcore/native_contracts/generator/Cargo.toml new file mode 100644 index 000000000..26e9a6611 --- /dev/null +++ b/ethcore/native_contracts/generator/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "native-contract-generator" +description = "Generates Rust code for ethereum contract ABIs" +version = "0.1.0" +authors = ["Parity Technologies "] + +[dependencies] +ethabi = "1.0.2" +heck = "0.2" diff --git a/ethcore/native_contracts/generator/src/lib.rs b/ethcore/native_contracts/generator/src/lib.rs new file mode 100644 index 000000000..f49caf227 --- /dev/null +++ b/ethcore/native_contracts/generator/src/lib.rs @@ -0,0 +1,346 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Rust code contract generator. +//! The code generated will require a dependence on the `ethcore-util`, +//! `ethabi`, `byteorder`, and `futures` crates. +//! This currently isn't hygienic, so compilation of generated code may fail +//! due to missing crates or name collisions. This will change when +//! it can be ported to a procedural macro. + +use ethabi::Contract; +use ethabi::spec::{Interface, ParamType, Error as AbiError}; +use heck::SnakeCase; + +extern crate ethabi; +extern crate heck; + +/// Errors in generation. +#[derive(Debug)] +pub enum Error { + /// Bad ABI. + Abi(AbiError), + /// Unsupported parameter type in given function. + UnsupportedType(String, ParamType), +} + +/// Given an ABI string, generate code for a a Rust module containing +/// a struct which can be used to call it. +// TODO: make this a proc macro when that's possible. +pub fn generate_module(struct_name: &str, abi: &str) -> Result { + let contract = Contract::new(Interface::load(abi.as_bytes()).map_err(Error::Abi)?); + let functions = generate_functions(&contract)?; + + Ok(format!(r##" +use byteorder::{{BigEndian, ByteOrder}}; +use futures::{{future, Future, BoxFuture}}; +use ethabi::{{Contract, Interface, Token}}; +use util::{{self, Uint}}; + +pub struct {name} {{ + contract: Contract, + /// Address to make calls to. + pub address: util::Address, +}} + +const ABI: &'static str = r#"{abi_str}"#; + +impl {name} {{ + /// Create a new instance of `{name}` with an address. + /// Calls can be made, given a callback for dispatching calls asynchronously. + pub fn new(address: util::Address) -> Self {{ + let contract = Contract::new(Interface::load(ABI.as_bytes()) + .expect("ABI checked at generation-time; qed")); + {name} {{ + contract: contract, + address: address, + }} + }} + + {functions} +}} +"##, + name = struct_name, + abi_str = abi, + functions = functions, + )) +} + +// generate function bodies from the ABI. +fn generate_functions(contract: &Contract) -> Result { + let mut functions = String::new(); + for function in contract.functions() { + let name = function.name(); + let snake_name = name.to_snake_case(); + let inputs = function.input_params(); + let outputs = function.output_params(); + + let (input_params, to_tokens) = input_params_codegen(&inputs) + .map_err(|bad_type| Error::UnsupportedType(name.into(), bad_type))?; + + let (output_type, decode_outputs) = output_params_codegen(&outputs) + .map_err(|bad_type| Error::UnsupportedType(name.into(), bad_type))?; + + functions.push_str(&format!(r##" +/// Call the function "{abi_name}" on the contract. +/// Inputs: {abi_inputs:?} +/// Outputs: {abi_outputs:?} +pub fn {snake_name}(&self, call: F, {params}) -> BoxFuture<{output_type}, String> + where F: Fn(util::Address, Vec) -> U, U: Future, Error=String> + Send + 'static +{{ + let function = self.contract.function(r#"{abi_name}"#.to_string()) + .expect("function existence checked at compile-time; qed"); + let call_addr = self.address; + + let call_future = match function.encode_call({to_tokens}) {{ + Ok(call_data) => (call)(call_addr, call_data), + Err(e) => return future::err(format!("Error encoding call: {{:?}}", e)).boxed(), + }}; + + call_future + .and_then(move |out| function.decode_output(out).map_err(|e| format!("{{:?}}", e))) + .map(::std::collections::VecDeque::from) + .and_then(|mut outputs| {decode_outputs}) + .boxed() +}} + "##, + abi_name = name, + abi_inputs = inputs, + abi_outputs = outputs, + snake_name = snake_name, + params = input_params, + output_type = output_type, + to_tokens = to_tokens, + decode_outputs = decode_outputs, + )) + } + + Ok(functions) +} + +// generate code for params in function signature and turning them into tokens. +// +// two pieces of code are generated: the first gives input types for the function signature, +// and the second gives code to tokenize those inputs. +// +// params of form `param_0: type_0, param_1: type_1, ...` +// tokenizing code of form `{let mut tokens = Vec::new(); tokens.push({param_X}); tokens }` +// +// returns any unsupported param type encountered. +fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String), ParamType> { + let mut params = String::new(); + let mut to_tokens = "{ let mut tokens = Vec::new();".to_string(); + + for (index, param_type) in inputs.iter().enumerate() { + let param_name = format!("param_{}", index); + let rust_type = rust_type(param_type.clone())?; + let (needs_mut, tokenize_code) = tokenize(¶m_name, param_type.clone()); + + params.push_str(&format!("{}{}: {}, ", + if needs_mut { "mut " } else { "" }, param_name, rust_type)); + + to_tokens.push_str(&format!("tokens.push({{ {} }});", tokenize_code)); + } + + to_tokens.push_str(" tokens }"); + Ok((params, to_tokens)) +} + +// generate code for outputs of the function and detokenizing them. +// +// two pieces of code are generated: the first gives an output type for the function signature +// as a tuple, and the second gives code to get that tuple from a deque of tokens. +// +// produce output type of the form (type_1, type_2, ...) without trailing comma. +// produce code for getting this output type from `outputs: VecDeque`, where +// an `Err(String)` can be returned. +// +// returns any unsupported param type encountered. +fn output_params_codegen(outputs: &[ParamType]) -> Result<(String, String), ParamType> { + let mut output_type = "(".to_string(); + let mut decode_outputs = "Ok((".to_string(); + + for (index, output) in outputs.iter().cloned().enumerate() { + let rust_type = rust_type(output.clone())?; + + output_type.push_str(&rust_type); + + decode_outputs.push_str(&format!( + r#" + outputs + .pop_front() + .and_then(|output| {{ {} }}) + .ok_or_else(|| "Wrong output type".to_string())? + "#, + detokenize("output", output) + )); + + // don't append trailing commas for the last element + // so we can reuse the same code for single-output contracts, + // since T == (T) != (T,) + if index < outputs.len() - 1 { + output_type.push_str(", "); + decode_outputs.push_str(", "); + } + } + + output_type.push_str(")"); + decode_outputs.push_str("))"); + Ok((output_type, decode_outputs)) +} + +// create code for an argument type from param type. +fn rust_type(input: ParamType) -> Result { + Ok(match input { + ParamType::Address => "util::Address".into(), + ParamType::FixedBytes(len) if len <= 32 => format!("util::H{}", len * 8), + ParamType::Bytes | ParamType::FixedBytes(_) => "Vec".into(), + ParamType::Int(width) => match width { + 8 | 16 | 32 | 64 => format!("i{}", width), + _ => return Err(ParamType::Int(width)), + }, + ParamType::Uint(width) => match width { + 8 | 16 | 32 | 64 => format!("u{}", width), + 128 | 160 | 256 => format!("util::U{}", width), + _ => return Err(ParamType::Uint(width)), + }, + ParamType::Bool => "bool".into(), + ParamType::String => "String".into(), + ParamType::Array(kind) => format!("Vec<{}>", rust_type(*kind)?), + other => return Err(other), + }) +} + +// create code for tokenizing this parameter. +// returns (needs_mut, code), where needs_mut indicates mutability required. +// panics on unsupported types. +fn tokenize(name: &str, input: ParamType) -> (bool, String) { + let mut needs_mut = false; + let code = match input { + ParamType::Address => format!("Token::Address({}.0)", name), + ParamType::Bytes => format!("Token::Bytes({})", name), + ParamType::FixedBytes(len) if len <= 32 => + format!("Token::FixedBytes({}.0.to_vec())", name), + ParamType::FixedBytes(len) => { + needs_mut = true; + format!("{}.resize({}, 0); Token::FixedBytes({})", name, len, name) + } + ParamType::Int(width) => match width { + 8 => format!("let mut r = [0xff; 32]; r[31] = {}; Token::Int(r)", name), + 16 | 32 | 64 => + format!("let mut r = [0xff; 32]; BigEndian::write_i{}(&mut r[{}..], {}); Token::Int(r))", + width, 32 - (width / 8), name), + _ => panic!("Signed int with more than 64 bits not supported."), + }, + ParamType::Uint(width) => format!( + "let mut r = [0; 32]; {}.to_big_endian(&mut r); Token::Uint(r)", + if width <= 64 { format!("util::U256::from({} as u64)", name) } + else { format!("util::U256::from({})", name) } + ), + ParamType::Bool => format!("Token::Bool({})", name), + ParamType::String => format!("Token::String({})", name), + ParamType::Array(kind) => { + let (needs_mut, code) = tokenize("i", *kind); + format!("Token::Array({}.into_iter().map(|{}i| {{ {} }}).collect())", + name, if needs_mut { "mut " } else { "" }, code) + } + ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported."), + }; + + (needs_mut, code) +} + +// create code for detokenizing this parameter. +// takes an output type and the identifier of a token. +// expands to code that evaluates to a Option +// panics on unsupported types. +fn detokenize(name: &str, output_type: ParamType) -> String { + match output_type { + ParamType::Address => format!("{}.to_address().map(util::H160)", name), + ParamType::Bytes => format!("{}.to_bytes()", name), + ParamType::FixedBytes(len) if len <= 32 => { + // ensure no panic on slice too small. + let read_hash = format!("b.resize({}, 0); util::H{}::from_slice(&b[..{}])", + len, len * 8, len); + + format!("{}.to_fixed_bytes().map(|mut b| {{ {} }})", + name, read_hash) + } + ParamType::FixedBytes(_) => format!("{}.to_fixed_bytes()", name), + ParamType::Int(width) => { + let read_int = match width { + 8 => "i[31] as i8".into(), + 16 | 32 | 64 => format!("BigEndian::read_i{}(&i[{}..])", width, 32 - (width / 8)), + _ => panic!("Signed integers over 64 bytes not allowed."), + }; + format!("{}.to_int().map(|i| {})", name, read_int) + } + ParamType::Uint(width) => { + let read_uint = match width { + 8 | 16 | 32 | 64 => format!("util::U256(u).low_u64() as u{}", width), + _ => format!("util::U{}::from(&u[..])", width), + }; + + format!("{}.to_uint().map(|u| {})", name, read_uint) + } + ParamType::Bool => format!("{}.to_bool()", name), + ParamType::String => format!("{}.to_string()", name), + ParamType::Array(kind) => { + let read_array = format!("x.into_iter().map(|a| {{ {} }}).collect::>()", + detokenize("a", *kind)); + + format!("{}.to_array().and_then(|x| {})", + name, read_array) + } + ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported.") + } +} + +#[cfg(test)] +mod tests { + use ethabi::spec::ParamType; + + #[test] + fn input_types() { + assert_eq!(::input_params_codegen(&[]).unwrap().0, ""); + assert_eq!(::input_params_codegen(&[ParamType::Address]).unwrap().0, "param_0: util::Address, "); + assert_eq!(::input_params_codegen(&[ParamType::Address, ParamType::Bytes]).unwrap().0, + "param_0: util::Address, param_1: Vec, "); + } + + #[test] + fn output_types() { + assert_eq!(::output_params_codegen(&[]).unwrap().0, "()"); + assert_eq!(::output_params_codegen(&[ParamType::Address]).unwrap().0, "(util::Address)"); + assert_eq!(::output_params_codegen(&[ParamType::Address, ParamType::Array(Box::new(ParamType::Bytes))]).unwrap().0, + "(util::Address, Vec>)"); + } + + #[test] + fn rust_type() { + assert_eq!(::rust_type(ParamType::FixedBytes(32)).unwrap(), "util::H256"); + assert_eq!(::rust_type(ParamType::Array(Box::new(ParamType::FixedBytes(32)))).unwrap(), + "Vec"); + + assert_eq!(::rust_type(ParamType::Uint(64)).unwrap(), "u64"); + assert!(::rust_type(ParamType::Uint(63)).is_err()); + + assert_eq!(::rust_type(ParamType::Int(32)).unwrap(), "i32"); + assert_eq!(::rust_type(ParamType::Uint(256)).unwrap(), "util::U256"); + } + + // codegen tests will need bootstrapping of some kind. +} diff --git a/ethcore/native_contracts/src/lib.rs b/ethcore/native_contracts/src/lib.rs new file mode 100644 index 000000000..55c6446b7 --- /dev/null +++ b/ethcore/native_contracts/src/lib.rs @@ -0,0 +1,30 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Native contracts useful for Parity. These are type-safe wrappers +//! autogenerated at compile-time from Ethereum ABIs, and can be instantiated +//! given any closure which can dispatch calls to them asynchronously. + +extern crate futures; +extern crate byteorder; +extern crate ethabi; +extern crate ethcore_util as util; + +mod registry; +mod service_transaction; + +pub use self::registry::Registry; +pub use self::service_transaction::ServiceTransactionChecker; diff --git a/ethcore/native_contracts/src/registry.rs b/ethcore/native_contracts/src/registry.rs new file mode 100644 index 000000000..3b3a6414d --- /dev/null +++ b/ethcore/native_contracts/src/registry.rs @@ -0,0 +1,22 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#![allow(unused_mut, unused_variables, unused_imports)] + +//! Registrar contract: maps names to addresses and data. +// TODO: testing. + +include!(concat!(env!("OUT_DIR"), "/registry.rs")); diff --git a/ethcore/native_contracts/src/service_transaction.rs b/ethcore/native_contracts/src/service_transaction.rs new file mode 100644 index 000000000..ee3b17552 --- /dev/null +++ b/ethcore/native_contracts/src/service_transaction.rs @@ -0,0 +1,22 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#![allow(unused_mut, unused_variables, unused_imports)] + +//! Service transaction contract. +// TODO: testing. + +include!(concat!(env!("OUT_DIR"), "/service_transaction.rs")); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 9da044573..3accc777f 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -30,47 +30,48 @@ use util::trie::TrieSpec; use util::kvdb::*; // other -use io::*; -use views::BlockView; -use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError}; -use header::BlockNumber; -use state::{self, State, CleanupMode}; -use spec::Spec; use basic_types::Seal; -use engines::Engine; -use service::ClientIoMessage; -use env_info::LastHashes; -use verification; -use verification::{PreverifiedBlock, Verifier}; use block::*; -use transaction::{LocalizedTransaction, UnverifiedTransaction, SignedTransaction, Transaction, PendingTransaction, Action}; -use blockchain::extras::TransactionAddress; -use types::filter::Filter; -use types::mode::Mode as IpcMode; -use log_entry::LocalizedLogEntry; -use verification::queue::BlockQueue; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; +use blockchain::extras::TransactionAddress; +use client::Error as ClientError; use client::{ BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient, MiningBlockChainClient, EngineClient, TraceFilter, CallAnalytics, BlockImportError, Mode, ChainNotify, PruningInfo, }; -use client::Error as ClientError; -use env_info::EnvInfo; -use executive::{Executive, Executed, TransactOptions, contract_address}; -use receipt::{Receipt, LocalizedReceipt}; -use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase}; -use trace; -use trace::FlatTransactionTraces; -use evm::{Factory as EvmFactory, Schedule}; -use miner::{Miner, MinerService, TransactionImportResult}; -use snapshot::{self, io as snapshot_io}; -use factory::Factories; -use rlp::UntrustedRlp; -use state_db::StateDB; -use rand::OsRng; -use client::registry::Registry; use encoded; +use engines::Engine; +use env_info::EnvInfo; +use env_info::LastHashes; +use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError}; +use evm::{Factory as EvmFactory, Schedule}; +use executive::{Executive, Executed, TransactOptions, contract_address}; +use factory::Factories; +use futures::{future, Future}; +use header::BlockNumber; +use io::*; +use log_entry::LocalizedLogEntry; +use miner::{Miner, MinerService, TransactionImportResult}; +use native_contracts::Registry; +use rand::OsRng; +use receipt::{Receipt, LocalizedReceipt}; +use rlp::UntrustedRlp; +use service::ClientIoMessage; +use snapshot::{self, io as snapshot_io}; +use spec::Spec; +use state_db::StateDB; +use state::{self, State, CleanupMode}; +use trace; +use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase}; +use trace::FlatTransactionTraces; +use transaction::{LocalizedTransaction, UnverifiedTransaction, SignedTransaction, Transaction, PendingTransaction, Action}; +use types::filter::Filter; +use types::mode::Mode as IpcMode; +use verification; +use verification::{PreverifiedBlock, Verifier}; +use verification::queue::BlockQueue; +use views::BlockView; // re-export pub use types::blockchain_info::BlockChainInfo; @@ -254,8 +255,7 @@ impl Client { if let Some(reg_addr) = client.additional_params().get("registrar").and_then(|s| Address::from_str(s).ok()) { trace!(target: "client", "Found registrar at {}", reg_addr); - let weak = Arc::downgrade(&client); - let registrar = Registry::new(reg_addr, move |a, d| weak.upgrade().ok_or("No client!".into()).and_then(|c| c.call_contract(BlockId::Latest, a, d))); + let registrar = Registry::new(reg_addr); *client.registrar.lock() = Some(registrar); } Ok(client) @@ -1488,12 +1488,17 @@ impl BlockChainClient for Client { } fn registrar_address(&self) -> Option
{ - self.registrar.lock().as_ref().map(|r| r.address.clone()) + self.registrar.lock().as_ref().map(|r| r.address) } fn registry_address(&self, name: String) -> Option
{ self.registrar.lock().as_ref() - .and_then(|r| r.get_address(&(name.as_bytes().sha3()), "A").ok()) + .and_then(|r| { + let dispatch = move |reg_addr, data| { + future::done(self.call_contract(BlockId::Latest, reg_addr, data)) + }; + r.get_address(dispatch, name.as_bytes().sha3(), "A".to_string()).wait().ok() + }) .and_then(|a| if a.is_zero() { None } else { Some(a) }) } } diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index f2479b17e..6c1280de7 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -16,7 +16,6 @@ //! Blockchain database client. -mod registry; mod config; mod error; mod test_client; diff --git a/ethcore/src/client/registry.rs b/ethcore/src/client/registry.rs deleted file mode 100644 index fb74ec36b..000000000 --- a/ethcore/src/client/registry.rs +++ /dev/null @@ -1,338 +0,0 @@ -// Autogenerated from JSON contract definition using Rust contract convertor. -// Command line: --name=Registry --jsonabi=/Users/gav/registry.abi -#![allow(unused_imports)] -use std::string::String; -use std::result::Result; -use std::fmt; -use {util, ethabi}; -use util::{Uint}; - -pub struct Registry { - contract: ethabi::Contract, - pub address: util::Address, - do_call: Box) -> Result, String> + Send + Sync + 'static>, -} -impl Registry { - pub fn new(address: util::Address, do_call: F) -> Self - where F: Fn(util::Address, Vec) -> Result, String> + Send + Sync + 'static { - Registry { - contract: ethabi::Contract::new(ethabi::Interface::load(b"[{\"constant\":true,\"inputs\":[{\"name\":\"_data\",\"type\":\"address\"}],\"name\":\"canReverse\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_new\",\"type\":\"address\"}],\"name\":\"setOwner\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_key\",\"type\":\"string\"},{\"name\":\"_value\",\"type\":\"bytes32\"}],\"name\":\"setData\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"string\"}],\"name\":\"confirmReverse\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"}],\"name\":\"reserve\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":true,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"}],\"name\":\"drop\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_key\",\"type\":\"string\"}],\"name\":\"getAddress\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"setFee\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_to\",\"type\":\"address\"}],\"name\":\"transfer\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_key\",\"type\":\"string\"}],\"name\":\"getData\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"}],\"name\":\"reserved\",\"outputs\":[{\"name\":\"reserved\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[],\"name\":\"drain\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"string\"},{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"proposeReverse\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"}],\"name\":\"hasReverse\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_key\",\"type\":\"string\"}],\"name\":\"getUint\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"fee\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"}],\"name\":\"getOwner\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"}],\"name\":\"getReverse\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_data\",\"type\":\"address\"}],\"name\":\"reverse\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_key\",\"type\":\"string\"},{\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"setUint\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"string\"},{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"confirmReverseAs\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[],\"name\":\"removeReverse\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_key\",\"type\":\"string\"},{\"name\":\"_value\",\"type\":\"address\"}],\"name\":\"setAddress\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"}]").expect("JSON is autogenerated; qed")), - address: address, - do_call: Box::new(do_call), - } - } - fn as_string(e: T) -> String { format!("{:?}", e) } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"canReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn can_reverse(&self, _data: &util::Address) -> Result - { - let call = self.contract.function("canReverse".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::Address(_data.clone().0)] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn set_owner(&self, _new: &util::Address) -> Result<(), String> - { - let call = self.contract.function("setOwner".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::Address(_new.clone().0)] - ).map_err(Self::as_string)?; - call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - - Ok(()) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"bytes32"}],"name":"setData","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn set_data(&self, _name: &util::H256, _key: &str, _value: &util::H256) -> Result - { - let call = self.contract.function("setData".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::String(_key.to_owned()), ethabi::Token::FixedBytes(_value.as_ref().to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"string"}],"name":"confirmReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn confirm_reverse(&self, _name: &str) -> Result - { - let call = self.contract.function("confirmReverse".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::String(_name.to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserve","outputs":[{"name":"success","type":"bool"}],"payable":true,"type":"function"}` - #[allow(dead_code)] - pub fn reserve(&self, _name: &util::H256) -> Result - { - let call = self.contract.function("reserve".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"drop","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn drop(&self, _name: &util::H256) -> Result - { - let call = self.contract.function("drop".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn get_address(&self, _name: &util::H256, _key: &str) -> Result - { - let call = self.contract.function("getAddress".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::String(_key.to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_amount","type":"uint256"}],"name":"setFee","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn set_fee(&self, _amount: util::U256) -> Result - { - let call = self.contract.function("setFee".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::Uint({ let mut r = [0u8; 32]; _amount.to_big_endian(&mut r); r })] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_to","type":"address"}],"name":"transfer","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn transfer(&self, _name: &util::H256, _to: &util::Address) -> Result - { - let call = self.contract.function("transfer".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::Address(_to.clone().0)] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn owner(&self) -> Result - { - let call = self.contract.function("owner".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getData","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn get_data(&self, _name: &util::H256, _key: &str) -> Result - { - let call = self.contract.function("getData".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::String(_key.to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserved","outputs":[{"name":"reserved","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn reserved(&self, _name: &util::H256) -> Result - { - let call = self.contract.function("reserved".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[],"name":"drain","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn drain(&self) -> Result - { - let call = self.contract.function("drain".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"proposeReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn propose_reverse(&self, _name: &str, _who: &util::Address) -> Result - { - let call = self.contract.function("proposeReverse".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::String(_name.to_owned()), ethabi::Token::Address(_who.clone().0)] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"hasReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn has_reverse(&self, _name: &util::H256) -> Result - { - let call = self.contract.function("hasReverse".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn get_uint(&self, _name: &util::H256, _key: &str) -> Result - { - let call = self.contract.function("getUint".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::String(_key.to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[],"name":"fee","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn fee(&self) -> Result - { - let call = self.contract.function("fee".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getOwner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn get_owner(&self, _name: &util::H256) -> Result - { - let call = self.contract.function("getOwner".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getReverse","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn get_reverse(&self, _name: &util::H256) -> Result - { - let call = self.contract.function("getReverse".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"reverse","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn reverse(&self, _data: &util::Address) -> Result - { - let call = self.contract.function("reverse".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::Address(_data.clone().0)] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_string().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"uint256"}],"name":"setUint","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn set_uint(&self, _name: &util::H256, _key: &str, _value: util::U256) -> Result - { - let call = self.contract.function("setUint".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::String(_key.to_owned()), ethabi::Token::Uint({ let mut r = [0u8; 32]; _value.to_big_endian(&mut r); r })] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"confirmReverseAs","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn confirm_reverse_as(&self, _name: &str, _who: &util::Address) -> Result - { - let call = self.contract.function("confirmReverseAs".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::String(_name.to_owned()), ethabi::Token::Address(_who.clone().0)] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[],"name":"removeReverse","outputs":[],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn remove_reverse(&self) -> Result<(), String> - { - let call = self.contract.function("removeReverse".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![] - ).map_err(Self::as_string)?; - call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - - Ok(()) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"address"}],"name":"setAddress","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn set_address(&self, _name: &util::H256, _key: &str, _value: &util::Address) -> Result - { - let call = self.contract.function("setAddress".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::String(_key.to_owned()), ethabi::Token::Address(_value.clone().0)] - ).map_err(Self::as_string)?; - let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } -} - diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index c4790f4d2..8f9ed2e5f 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -79,37 +79,39 @@ //! cargo build --release //! ``` -extern crate ethcore_io as io; -extern crate rustc_serialize; -extern crate crypto; -extern crate time; -extern crate env_logger; -extern crate num_cpus; -extern crate crossbeam; -extern crate ethjson; -extern crate bloomchain; -extern crate hyper; -extern crate ethash; -extern crate ethkey; -extern crate semver; -extern crate ethcore_ipc_nano as nanoipc; -extern crate ethcore_devtools as devtools; -extern crate rand; extern crate bit_set; -extern crate rlp; -extern crate ethcore_bloom_journal as bloom_journal; +extern crate bloomchain; +extern crate bn; extern crate byteorder; -extern crate transient_hashmap; +extern crate crossbeam; +extern crate crypto; +extern crate env_logger; +extern crate ethabi; +extern crate ethash; +extern crate ethcore_bloom_journal as bloom_journal; +extern crate ethcore_devtools as devtools; +extern crate ethcore_io as io; +extern crate ethcore_ipc_nano as nanoipc; +extern crate ethcore_logger; +extern crate ethcore_stratum; +extern crate ethjson; +extern crate ethkey; +extern crate futures; +extern crate hardware_wallet; +extern crate hyper; +extern crate itertools; extern crate linked_hash_map; extern crate lru_cache; -extern crate ethcore_stratum; -extern crate ethabi; -extern crate hardware_wallet; -extern crate stats; -extern crate ethcore_logger; +extern crate native_contracts; +extern crate num_cpus; extern crate num; -extern crate bn; -extern crate itertools; +extern crate rand; +extern crate rlp; +extern crate rustc_serialize; +extern crate semver; +extern crate stats; +extern crate time; +extern crate transient_hashmap; #[macro_use] extern crate log; diff --git a/ethcore/src/miner/service_transaction_checker.rs b/ethcore/src/miner/service_transaction_checker.rs index 8a97ed9dc..a0a75647f 100644 --- a/ethcore/src/miner/service_transaction_checker.rs +++ b/ethcore/src/miner/service_transaction_checker.rs @@ -14,9 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use types::ids::BlockId; use client::MiningBlockChainClient; use transaction::SignedTransaction; +use types::ids::BlockId; + +use futures::{future, Future}; +use native_contracts::ServiceTransactionChecker as Contract; use util::{U256, Uint, Mutex}; const SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME: &'static str = "service_transaction_checker"; @@ -24,7 +27,7 @@ const SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME: &'static str = "service_transa /// Service transactions checker. #[derive(Default)] pub struct ServiceTransactionChecker { - contract: Mutex>, + contract: Mutex>, } impl ServiceTransactionChecker { @@ -36,7 +39,7 @@ impl ServiceTransactionChecker { .and_then(|contract_addr| { trace!(target: "txqueue", "Configuring for service transaction checker contract from {}", contract_addr); - Some(provider::Contract::new(contract_addr)) + Some(Contract::new(contract_addr)) }) } } @@ -46,168 +49,12 @@ impl ServiceTransactionChecker { debug_assert_eq!(tx.gas_price, U256::zero()); if let Some(ref contract) = *self.contract.lock() { - let do_call = |a, d| client.call_contract(BlockId::Latest, a, d); - contract.certified(&do_call, &tx.sender()) + contract.certified( + |addr, data| future::done(client.call_contract(BlockId::Latest, addr, data)), + tx.sender() + ).wait() } else { Err("contract is not configured".to_owned()) } } } - -mod provider { - // Autogenerated from JSON contract definition using Rust contract convertor. - // Command line: --jsonabi=SimpleCertifier.abi --explicit-do-call - #![allow(unused_imports)] - use std::string::String; - use std::result::Result; - use std::fmt; - use {util, ethabi}; - use util::{Uint}; - - pub struct Contract { - contract: ethabi::Contract, - address: util::Address, - - } - impl Contract { - pub fn new(address: util::Address) -> Self - { - Contract { - contract: ethabi::Contract::new(ethabi::Interface::load(b"[{\"constant\":false,\"inputs\":[{\"name\":\"_new\",\"type\":\"address\"}],\"name\":\"setOwner\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"certify\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"},{\"name\":\"_field\",\"type\":\"string\"}],\"name\":\"getAddress\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"revoke\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"delegate\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"},{\"name\":\"_field\",\"type\":\"string\"}],\"name\":\"getUint\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_new\",\"type\":\"address\"}],\"name\":\"setDelegate\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"certified\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"},{\"name\":\"_field\",\"type\":\"string\"}],\"name\":\"get\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"type\":\"function\"}]").expect("JSON is autogenerated; qed")), - address: address, - - } - } - fn as_string(e: T) -> String { format!("{:?}", e) } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn set_owner(&self, do_call: &F, _new: &util::Address) -> Result<(), String> - where F: Fn(util::Address, Vec) -> Result, String> + Send { - let call = self.contract.function("setOwner".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::Address(_new.clone().0)] - ).map_err(Self::as_string)?; - call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - - Ok(()) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"certify","outputs":[],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn certify(&self, do_call: &F, _who: &util::Address) -> Result<(), String> - where F: Fn(util::Address, Vec) -> Result, String> + Send { - let call = self.contract.function("certify".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::Address(_who.clone().0)] - ).map_err(Self::as_string)?; - call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - - Ok(()) - } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn get_address(&self, do_call: &F, _who: &util::Address, _field: &str) -> Result - where F: Fn(util::Address, Vec) -> Result, String> + Send { - let call = self.contract.function("getAddress".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::Address(_who.clone().0), ethabi::Token::String(_field.to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"revoke","outputs":[],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn revoke(&self, do_call: &F, _who: &util::Address) -> Result<(), String> - where F: Fn(util::Address, Vec) -> Result, String> + Send { - let call = self.contract.function("revoke".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::Address(_who.clone().0)] - ).map_err(Self::as_string)?; - call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - - Ok(()) - } - - /// Auto-generated from: `{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn owner(&self, do_call: &F) -> Result - where F: Fn(util::Address, Vec) -> Result, String> + Send { - let call = self.contract.function("owner".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![] - ).map_err(Self::as_string)?; - let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[],"name":"delegate","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn delegate(&self, do_call: &F) -> Result - where F: Fn(util::Address, Vec) -> Result, String> + Send { - let call = self.contract.function("delegate".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![] - ).map_err(Self::as_string)?; - let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn get_uint(&self, do_call: &F, _who: &util::Address, _field: &str) -> Result - where F: Fn(util::Address, Vec) -> Result, String> + Send { - let call = self.contract.function("getUint".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::Address(_who.clone().0), ethabi::Token::String(_field.to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) })) - } - - /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setDelegate","outputs":[],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn set_delegate(&self, do_call: &F, _new: &util::Address) -> Result<(), String> - where F: Fn(util::Address, Vec) -> Result, String> + Send { - let call = self.contract.function("setDelegate".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::Address(_new.clone().0)] - ).map_err(Self::as_string)?; - call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - - Ok(()) - } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"certified","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn certified(&self, do_call: &F, _who: &util::Address) -> Result - where F: Fn(util::Address, Vec) -> Result, String> + Send { - let call = self.contract.function("certified".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::Address(_who.clone().0)] - ).map_err(Self::as_string)?; - let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) - } - - /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}` - #[allow(dead_code)] - pub fn get(&self, do_call: &F, _who: &util::Address, _field: &str) -> Result - where F: Fn(util::Address, Vec) -> Result, String> + Send { - let call = self.contract.function("get".into()).map_err(Self::as_string)?; - let data = call.encode_call( - vec![ethabi::Token::Address(_who.clone().0), ethabi::Token::String(_field.to_owned())] - ).map_err(Self::as_string)?; - let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; - let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) })) - } - } -} From 41700a6996b5680b6dd7f636f5e6080761a5abae Mon Sep 17 00:00:00 2001 From: Christopher Franko Date: Mon, 3 Apr 2017 03:59:08 -0400 Subject: [PATCH 3/6] Update expanse json with fork at block 600000 (#5351) * Update expanse json with fork at block 600000 * update exp chainID to 2 --- ethcore/res/ethereum/expanse.json | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/ethcore/res/ethereum/expanse.json b/ethcore/res/ethereum/expanse.json index 17e25c22a..3ec04052b 100644 --- a/ethcore/res/ethereum/expanse.json +++ b/ethcore/res/ethereum/expanse.json @@ -15,11 +15,11 @@ "difficultyHardforkTransition": "0x59d9", "difficultyHardforkBoundDivisor": "0x0200", "bombDefuseTransition": "0x30d40", - "eip150Transition": "0x7fffffffffffffff", - "eip155Transition": "0x7fffffffffffffff", - "eip160Transition": "0x7fffffffffffffff", - "eip161abcTransition": "0x7fffffffffffffff", - "eip161dTransition": "0x7fffffffffffffff" + "eip150Transition": "0x927C0", + "eip155Transition": "0x927C0", + "eip160Transition": "0x927C0", + "eip161abcTransition": "0x927C0", + "eip161dTransition": "0x927C0" } } }, @@ -28,6 +28,7 @@ "maximumExtraDataSize": "0x20", "minGasLimit": "0x1388", "networkID": "0x1", + "chainID": "0x2", "subprotocolName": "exp", "eip98Transition": "0x7fffffffffffff" }, From 6a05967bef1f9abcf548b4c9f9bff1aefb3a1ac2 Mon Sep 17 00:00:00 2001 From: Jaco Greeff Date: Mon, 3 Apr 2017 10:25:21 +0200 Subject: [PATCH 4/6] trigger js build release (#5379) --- js/scripts/test.js | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/js/scripts/test.js b/js/scripts/test.js index f5bfb0835..e426642db 100644 --- a/js/scripts/test.js +++ b/js/scripts/test.js @@ -1,2 +1 @@ -// test script 9 -// trigger rebuild on master 15 Mar 2017, 11:19 +// test script 10 From 2df4532d5078ef5cac8ccc61774d4cb8b3ba5438 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 3 Apr 2017 10:27:37 +0200 Subject: [PATCH 5/6] Dapps and RPC server merge (#5365) * Dapps server as a middleware. * Dapps RPC - Work in Progress * Merging Dapps and RPC server. * Fast HTTP server configuration. * Bump jsonrpc * Fixing test target * Re-implementing commented-out tests. --- Cargo.lock | 148 +++++++--- Cargo.toml | 13 +- dapps/Cargo.toml | 7 +- dapps/src/api/api.rs | 40 +-- dapps/src/apps/fetcher/mod.rs | 10 +- dapps/src/endpoint.rs | 3 +- dapps/src/handlers/auth.rs | 44 --- dapps/src/handlers/mod.rs | 2 - dapps/src/lib.rs | 358 +++++-------------------- dapps/src/{router/mod.rs => router.rs} | 130 +++------ dapps/src/router/auth.rs | 106 -------- dapps/src/router/host_validation.rs | 42 --- dapps/src/rpc.rs | 4 +- dapps/src/tests/api.rs | 25 +- dapps/src/tests/authorization.rs | 80 ------ dapps/src/tests/helpers/mod.rs | 212 ++++++++++++--- dapps/src/tests/mod.rs | 1 - dapps/src/tests/rpc.rs | 73 +---- dapps/src/tests/validation.rs | 30 +-- hash-fetch/src/urlhint.rs | 3 +- ipfs/src/lib.rs | 3 +- parity/cli/mod.rs | 51 ++-- parity/cli/usage.txt | 30 +-- parity/configuration.rs | 63 ++--- parity/dapps.rs | 149 +++------- parity/deprecated.rs | 125 +++++---- parity/ipfs.rs | 3 +- parity/main.rs | 7 +- parity/rpc.rs | 133 +++++---- parity/rpc_apis.rs | 2 +- parity/run.rs | 43 ++- rpc/Cargo.toml | 1 + rpc/src/lib.rs | 100 ++++++- rpc/src/metadata.rs | 74 +++++ scripts/targets.sh | 2 +- 35 files changed, 869 insertions(+), 1248 deletions(-) delete mode 100644 dapps/src/handlers/auth.rs rename dapps/src/{router/mod.rs => router.rs} (71%) delete mode 100644 dapps/src/router/auth.rs delete mode 100644 dapps/src/router/host_validation.rs delete mode 100644 dapps/src/tests/authorization.rs create mode 100644 rpc/src/metadata.rs diff --git a/Cargo.lock b/Cargo.lock index 8a70f35a2..83e643900 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,7 +10,6 @@ dependencies = [ "docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.7.0", - "ethcore-dapps 1.7.0", "ethcore-devtools 1.7.0", "ethcore-io 1.7.0", "ethcore-ipc 1.7.0", @@ -27,12 +26,12 @@ dependencies = [ "ethsync 1.7.0", "evmbin 0.1.0", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-dapps 1.7.0", "parity-hash-fetch 1.7.0", "parity-ipfs-api 1.7.0", "parity-local-store 0.1.0", @@ -40,6 +39,7 @@ dependencies = [ "parity-rpc-client 1.4.0", "parity-updater 1.7.0", "path 0.1.0", + "pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", "rpassword 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -309,6 +309,11 @@ dependencies = [ "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "difference" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "docopt" version = "0.7.0" @@ -446,40 +451,6 @@ dependencies = [ "siphasher 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "ethcore-dapps" -version = "1.7.0" -dependencies = [ - "base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore-devtools 1.7.0", - "ethcore-rpc 1.7.0", - "ethcore-util 1.7.0", - "fetch 0.1.0", - "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-hash-fetch 1.7.0", - "parity-reactor 0.1.0", - "parity-ui 1.7.0", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zip 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "ethcore-devtools" version = "1.7.0" @@ -642,6 +613,7 @@ dependencies = [ "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-reactor 0.1.0", @@ -1088,7 +1060,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1100,7 +1072,7 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1113,7 +1085,7 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1125,17 +1097,31 @@ dependencies = [ [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "jsonrpc-minihttp-server" +version = "7.0.0" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" +dependencies = [ + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)", + "tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1145,7 +1131,7 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1156,7 +1142,7 @@ dependencies = [ [[package]] name = "jsonrpc-tcp-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1604,6 +1590,38 @@ dependencies = [ "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "parity-dapps" +version = "1.7.0" +dependencies = [ + "base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-devtools 1.7.0", + "ethcore-util 1.7.0", + "fetch 0.1.0", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-hash-fetch 1.7.0", + "parity-reactor 0.1.0", + "parity-ui 1.7.0", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zip 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "parity-dapps-glue" version = "1.7.0" @@ -1826,6 +1844,14 @@ name = "podio" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "pretty_assertions" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "primal" version = "0.2.3" @@ -2431,6 +2457,21 @@ dependencies = [ "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-minihttp" +version = "0.1.0" +source = "git+https://github.com/tomusdrw/tokio-minihttp#8acbafae3e77e7f7eb516b441ec84695580221dd" +dependencies = [ + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-named-pipes" version = "0.1.0" @@ -2441,6 +2482,22 @@ dependencies = [ "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-proto" +version = "0.1.0" +source = "git+https://github.com/tomusdrw/tokio-proto#f6ee08cb594fa2fc1b4178eaaca0855d66e68fd3" +dependencies = [ + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-proto" version = "0.1.0" @@ -2706,6 +2763,7 @@ dependencies = [ "checksum ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)" = "" "checksum daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "271ec51b7e0bee92f0d04601422c73eb76ececf197026711c97ad25038a010cf" "checksum deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1614659040e711785ed8ea24219140654da1729f3ec8a47a9719d041112fe7bf" +"checksum difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3304d19798a8e067e48d8e69b2c37f0b5e9b4e462504ad9e27e9f3fce02bba8" "checksum docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab32ea6e284d87987066f21a9e809a73c14720571ef34516f0890b3d355ccfd8" "checksum dtoa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5edd69c67b2f8e0911629b7e6b8a34cb3956613cd7c6e6414966dee349c2db4f" "checksum either 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2b503c86dad62aaf414ecf2b8c527439abedb3f8d812537f0b12bfd6f32a91" @@ -2739,6 +2797,7 @@ dependencies = [ "checksum jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" @@ -2802,6 +2861,7 @@ dependencies = [ "checksum phf_shared 0.7.14 (registry+https://github.com/rust-lang/crates.io-index)" = "fee4d039930e4f45123c9b15976cf93a499847b6483dc09c42ea0ec4940f2aa6" "checksum pkg-config 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8cee804ecc7eaf201a4a207241472cc870e825206f6c031e3ee2a72fa425f2fa" "checksum podio 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e5422a1ee1bc57cc47ae717b0137314258138f38fd5f3cea083f43a9725383a0" +"checksum pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2412f3332a07c7a2a50168988dcc184f32180a9758ad470390e5f55e089f6b6e" "checksum primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0e31b86efadeaeb1235452171a66689682783149a6249ff334a2c5d8218d00a4" "checksum primal-bit 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "464a91febc06166783d4f5ba3577b5ed8dda8e421012df80bfe48a971ed7be8f" "checksum primal-check 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "647c81b67bb9551a7b88d0bcd785ac35b7d0bf4b2f358683d7c2375d04daec51" @@ -2871,7 +2931,9 @@ dependencies = [ "checksum tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3d1be481b55126f02ef88ff86748086473cb537a949fc4a8f4be403a530ae54b" "checksum tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6a278fde45f1be68e44995227d426aaa4841e0980bb0a21b981092f28c3c8473" "checksum tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)" = "" +"checksum tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)" = "" "checksum tokio-named-pipes 0.1.0 (git+https://github.com/alexcrichton/tokio-named-pipes)" = "" +"checksum tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)" = "" "checksum tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c0d6031f94d78d7b4d509d4a7c5e1cdf524a17e7b08d1c188a83cf720e69808" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" "checksum tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ffc7b5fc8e19e220b29566d1750949224a518478eab9cebc8df60583242ca30a" diff --git a/Cargo.toml b/Cargo.toml index 737c21b09..b82490e88 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,6 @@ serde_json = "0.9" app_dirs = "1.1.1" fdlimit = "0.1" ws2_32-sys = "0.2" -hyper = { default-features = false, git = "https://github.com/paritytech/hyper" } ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethsync = { path = "sync" } @@ -50,8 +49,9 @@ parity-ipfs-api = { path = "ipfs" } parity-updater = { path = "updater" } parity-reactor = { path = "util/reactor" } parity-local-store = { path = "local-store" } -ethcore-dapps = { path = "dapps", optional = true } path = { path = "util/path" } + +parity-dapps = { path = "dapps", optional = true } clippy = { version = "0.0.103", optional = true} ethcore-secretstore = { path = "secret_store", optional = true } @@ -60,6 +60,7 @@ rustc_version = "0.2" [dev-dependencies] ethcore-ipc-tests = { path = "ipc/tests" } +pretty_assertions = "0.1" [target.'cfg(windows)'.dependencies] winapi = "0.2" @@ -71,18 +72,18 @@ daemonize = "0.2" default = ["ui-precompiled"] ui = [ "dapps", - "ethcore-dapps/ui", + "parity-dapps/ui", "ethcore-signer/ui", ] ui-precompiled = [ "dapps", "ethcore-signer/ui-precompiled", - "ethcore-dapps/ui-precompiled", + "parity-dapps/ui-precompiled", ] -dapps = ["ethcore-dapps"] +dapps = ["parity-dapps"] ipc = ["ethcore/ipc", "ethsync/ipc"] jit = ["ethcore/jit"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "parity-dapps/dev", "ethcore-signer/dev"] json-tests = ["ethcore/json-tests"] test-heavy = ["ethcore/test-heavy"] ethkey-cli = ["ethcore/ethkey-cli"] diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 07f136d78..429ed01f5 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "Parity Dapps crate" -name = "ethcore-dapps" +name = "parity-dapps" version = "1.7.0" license = "GPL-3.0" authors = ["Parity Technologies "] @@ -28,11 +28,8 @@ zip = { version = "0.1", default-features = false } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -# TODO [ToDr] Temporary solution, server should be merged with RPC. -jsonrpc-server-utils = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethcore-devtools = { path = "../devtools" } -ethcore-rpc = { path = "../rpc" } ethcore-util = { path = "../util" } fetch = { path = "../util/fetch" } parity-hash-fetch = { path = "../hash-fetch" } @@ -42,7 +39,7 @@ parity-ui = { path = "./ui" } clippy = { version = "0.0.103", optional = true} [features] -dev = ["clippy", "ethcore-rpc/dev", "ethcore-util/dev"] +dev = ["clippy", "ethcore-util/dev"] ui = ["parity-ui/no-precompiled-js"] ui-precompiled = ["parity-ui/use-precompiled-js"] diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index e07bd4535..df3386358 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::sync::Arc; use unicase::UniCase; use hyper::{server, net, Decoder, Encoder, Next, Control}; use hyper::header; @@ -26,48 +25,49 @@ use apps::fetcher::Fetcher; use handlers::extract_url; use endpoint::{Endpoint, Endpoints, Handler, EndpointPath}; -use jsonrpc_http_server; -use jsonrpc_server_utils::cors; +use jsonrpc_http_server::{self, AccessControlAllowOrigin}; #[derive(Clone)] -pub struct RestApi { - cors_domains: Option>, - endpoints: Arc, - fetcher: Arc, +pub struct RestApi { + // TODO [ToDr] cors_domains should be handled by the server to avoid duplicated logic. + // RequestMiddleware should be able to tell that cors headers should be included. + cors_domains: Option>, + apps: Vec, + fetcher: F, } -impl RestApi { - pub fn new(cors_domains: Vec, endpoints: Arc, fetcher: Arc) -> Box { +impl RestApi { + pub fn new(cors_domains: Vec, endpoints: &Endpoints, fetcher: F) -> Box { Box::new(RestApi { cors_domains: Some(cors_domains), - endpoints: endpoints, + apps: Self::list_apps(endpoints), fetcher: fetcher, }) } - fn list_apps(&self) -> Vec { - self.endpoints.iter().filter_map(|(ref k, ref e)| { + fn list_apps(endpoints: &Endpoints) -> Vec { + endpoints.iter().filter_map(|(ref k, ref e)| { e.info().map(|ref info| App::from_info(k, info)) }).collect() } } -impl Endpoint for RestApi { +impl Endpoint for RestApi { fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box { - Box::new(RestApiRouter::new(self.clone(), path, control)) + Box::new(RestApiRouter::new((*self).clone(), path, control)) } } -struct RestApiRouter { - api: RestApi, +struct RestApiRouter { + api: RestApi, cors_header: Option, path: Option, control: Option, handler: Box, } -impl RestApiRouter { - fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { +impl RestApiRouter { + fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { RestApiRouter { path: Some(path), cors_header: None, @@ -114,7 +114,7 @@ impl RestApiRouter { } } -impl server::Handler for RestApiRouter { +impl server::Handler for RestApiRouter { fn on_request(&mut self, request: server::Request) -> Next { self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains).into(); @@ -142,7 +142,7 @@ impl server::Handler for RestApiRouter { if let Some(ref hash) = hash { path.app_id = hash.clone().to_owned() } let handler = endpoint.and_then(|v| match v { - "apps" => Some(response::as_json(&self.api.list_apps())), + "apps" => Some(response::as_json(&self.api.apps)), "ping" => Some(response::ping()), "content" => self.resolve_content(hash, path, control), _ => None diff --git a/dapps/src/apps/fetcher/mod.rs b/dapps/src/apps/fetcher/mod.rs index c2607fe43..a824134cb 100644 --- a/dapps/src/apps/fetcher/mod.rs +++ b/dapps/src/apps/fetcher/mod.rs @@ -47,7 +47,8 @@ pub trait Fetcher: Send + Sync + 'static { fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box; } -pub struct ContentFetcher { +#[derive(Clone)] +pub struct ContentFetcher { dapps_path: PathBuf, resolver: R, cache: Arc>, @@ -57,14 +58,14 @@ pub struct ContentFetcher Drop for ContentFetcher { +impl Drop for ContentFetcher { fn drop(&mut self) { // Clear cache path let _ = fs::remove_dir_all(&self.dapps_path); } } -impl ContentFetcher { +impl ContentFetcher { pub fn new(resolver: R, sync_status: Arc, embeddable_on: Option<(String, u16)>, remote: Remote, fetch: F) -> Self { let mut dapps_path = env::temp_dir(); @@ -97,7 +98,7 @@ impl ContentFetcher { } } -impl Fetcher for ContentFetcher { +impl Fetcher for ContentFetcher { fn contains(&self, content_id: &str) -> bool { { let mut cache = self.cache.lock(); @@ -233,6 +234,7 @@ mod tests { use page::LocalPageEndpoint; use super::{ContentFetcher, Fetcher}; + #[derive(Clone)] struct FakeResolver; impl URLHint for FakeResolver { fn resolve(&self, _id: Bytes) -> Option { diff --git a/dapps/src/endpoint.rs b/dapps/src/endpoint.rs index 648d82ff8..ea5825b74 100644 --- a/dapps/src/endpoint.rs +++ b/dapps/src/endpoint.rs @@ -16,9 +16,10 @@ //! URL Endpoint traits -use hyper::{self, server, net}; use std::collections::BTreeMap; +use hyper::{self, server, net}; + #[derive(Debug, PartialEq, Default, Clone)] pub struct EndpointPath { pub app_id: String, diff --git a/dapps/src/handlers/auth.rs b/dapps/src/handlers/auth.rs deleted file mode 100644 index db6018e0d..000000000 --- a/dapps/src/handlers/auth.rs +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Authorization Handlers - -use hyper::{server, Decoder, Encoder, Next}; -use hyper::net::HttpStream; -use hyper::status::StatusCode; - -pub struct AuthRequiredHandler; - -impl server::Handler for AuthRequiredHandler { - fn on_request(&mut self, _request: server::Request) -> Next { - Next::write() - } - - fn on_request_readable(&mut self, _decoder: &mut Decoder) -> Next { - Next::write() - } - - fn on_response(&mut self, res: &mut server::Response) -> Next { - res.set_status(StatusCode::Unauthorized); - res.headers_mut().set_raw("WWW-Authenticate", vec![b"Basic realm=\"Parity\"".to_vec()]); - Next::write() - } - - fn on_response_writable(&mut self, _encoder: &mut Encoder) -> Next { - Next::end() - } -} - diff --git a/dapps/src/handlers/mod.rs b/dapps/src/handlers/mod.rs index cec7be631..3e2daf462 100644 --- a/dapps/src/handlers/mod.rs +++ b/dapps/src/handlers/mod.rs @@ -16,14 +16,12 @@ //! Hyper handlers implementations. -mod auth; mod content; mod echo; mod fetch; mod redirect; mod streaming; -pub use self::auth::AuthRequiredHandler; pub use self::content::ContentHandler; pub use self::echo::EchoHandler; pub use self::fetch::{ContentFetcherHandler, ContentValidator, FetchControl, ValidatorResponse}; diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 252e1c3bb..60aba30a4 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -34,9 +34,7 @@ extern crate zip; extern crate jsonrpc_core; extern crate jsonrpc_http_server; -extern crate jsonrpc_server_utils; -extern crate ethcore_rpc; extern crate ethcore_util as util; extern crate fetch; extern crate parity_dapps_glue as parity_dapps; @@ -61,7 +59,6 @@ mod apps; mod page; mod router; mod handlers; -mod rpc; mod api; mod proxypac; mod url; @@ -69,23 +66,16 @@ mod web; #[cfg(test)] mod tests; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::sync::Arc; -use std::net::SocketAddr; use std::collections::HashMap; -use jsonrpc_core::{Middleware, MetaIoHandler}; -use jsonrpc_http_server::tokio_core::reactor::Remote as TokioRemote; -pub use jsonrpc_http_server::{DomainsValidation, Host, AccessControlAllowOrigin}; -pub use jsonrpc_http_server::hyper; +use jsonrpc_http_server::{self as http, hyper, AccessControlAllowOrigin}; -use ethcore_rpc::Metadata; -use fetch::{Fetch, Client as FetchClient}; -use hash_fetch::urlhint::ContractClient; +use fetch::Fetch; use parity_reactor::Remote; -use router::auth::{Authorization, NoAuth, HttpBasicAuth}; -use self::apps::{HOME_PAGE, DAPPS_DOMAIN}; +pub use hash_fetch::urlhint::ContractClient; /// Indicates sync status pub trait SyncStatus: Send + Sync { @@ -107,296 +97,92 @@ impl WebProxyTokens for F where F: Fn(String) -> bool + Send + Sync { fn is_web_proxy_token_valid(&self, token: &str) -> bool { self(token.to_owned()) } } -/// Webapps HTTP+RPC server build. -pub struct ServerBuilder { - dapps_path: PathBuf, - extra_dapps: Vec, - registrar: Arc, - sync_status: Arc, - web_proxy_tokens: Arc, - signer_address: Option<(String, u16)>, - allowed_hosts: Option>, - extra_cors: Option>, - remote: Remote, - fetch: Option, +/// Dapps server as `jsonrpc-http-server` request middleware. +pub struct Middleware { + router: router::Router>, } -impl ServerBuilder { - /// Construct new dapps server - pub fn new>(dapps_path: P, registrar: Arc, remote: Remote) -> Self { - ServerBuilder { - dapps_path: dapps_path.as_ref().to_owned(), - extra_dapps: vec![], - registrar: registrar, - sync_status: Arc::new(|| false), - web_proxy_tokens: Arc::new(|_| false), - signer_address: None, - allowed_hosts: Some(vec![]), - extra_cors: None, - remote: remote, - fetch: None, - } - } -} - -impl ServerBuilder { - /// Set a fetch client to use. - pub fn fetch(self, fetch: X) -> ServerBuilder { - ServerBuilder { - dapps_path: self.dapps_path, - extra_dapps: vec![], - registrar: self.registrar, - sync_status: self.sync_status, - web_proxy_tokens: self.web_proxy_tokens, - signer_address: self.signer_address, - allowed_hosts: self.allowed_hosts, - extra_cors: self.extra_cors, - remote: self.remote, - fetch: Some(fetch), - } - } - - /// Change default sync status. - pub fn sync_status(mut self, status: Arc) -> Self { - self.sync_status = status; - self - } - - /// Change default web proxy tokens validator. - pub fn web_proxy_tokens(mut self, tokens: Arc) -> Self { - self.web_proxy_tokens = tokens; - self - } - - /// Change default signer port. - pub fn signer_address(mut self, signer_address: Option<(String, u16)>) -> Self { - self.signer_address = signer_address; - self - } - - /// Change allowed hosts. - /// `None` - All hosts are allowed - /// `Some(whitelist)` - Allow only whitelisted hosts (+ listen address) - pub fn allowed_hosts(mut self, allowed_hosts: DomainsValidation) -> Self { - self.allowed_hosts = allowed_hosts.into(); - self - } - - /// Extra cors headers. - /// `None` - no additional CORS URLs - pub fn extra_cors_headers(mut self, cors: DomainsValidation) -> Self { - self.extra_cors = cors.into(); - self - } - - /// Change extra dapps paths (apart from `dapps_path`) - pub fn extra_dapps>(mut self, extra_dapps: &[P]) -> Self { - self.extra_dapps = extra_dapps.iter().map(|p| p.as_ref().to_owned()).collect(); - self - } - - /// Asynchronously start server with no authentication, - /// returns result with `Server` handle on success or an error. - pub fn start_unsecured_http>(self, addr: &SocketAddr, handler: MetaIoHandler, tokio_remote: TokioRemote) -> Result { - let fetch = self.fetch_client()?; - Server::start_http( - addr, - self.allowed_hosts, - self.extra_cors, - NoAuth, - handler, - self.dapps_path, - self.extra_dapps, - self.signer_address, - self.registrar, - self.sync_status, - self.web_proxy_tokens, - self.remote, - tokio_remote, - fetch, - ) - } - - /// Asynchronously start server with `HTTP Basic Authentication`, - /// return result with `Server` handle on success or an error. - pub fn start_basic_auth_http>(self, addr: &SocketAddr, username: &str, password: &str, handler: MetaIoHandler, tokio_remote: TokioRemote) -> Result { - let fetch = self.fetch_client()?; - Server::start_http( - addr, - self.allowed_hosts, - self.extra_cors, - HttpBasicAuth::single_user(username, password), - handler, - self.dapps_path, - self.extra_dapps, - self.signer_address, - self.registrar, - self.sync_status, - self.web_proxy_tokens, - self.remote, - tokio_remote, - fetch, - ) - } - - fn fetch_client(&self) -> Result { - match self.fetch.clone() { - Some(fetch) => Ok(fetch), - None => T::new().map_err(|_| ServerError::FetchInitialization), - } - } -} - -/// Webapps HTTP server. -pub struct Server { - server: Option, -} - -impl Server { - /// Returns a list of allowed hosts or `None` if all hosts are allowed. - fn allowed_hosts(hosts: Option>, bind_address: String) -> Option> { - let mut allowed = Vec::new(); - - match hosts { - Some(hosts) => allowed.extend_from_slice(&hosts), - None => return None, - } - - // Add localhost domain as valid too if listening on loopback interface. - allowed.push(bind_address.replace("127.0.0.1", "localhost").into()); - allowed.push(bind_address.into()); - Some(allowed) - } - - /// Returns a list of CORS domains for API endpoint. - fn cors_domains( +impl Middleware { + /// Creates new Dapps server middleware. + pub fn new( + remote: Remote, signer_address: Option<(String, u16)>, - extra_cors: Option>, - ) -> Vec { - let basic_cors = match signer_address { - Some(signer_address) => [ - format!("http://{}{}", HOME_PAGE, DAPPS_DOMAIN), - format!("http://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), - format!("http://{}", address(&signer_address)), - format!("https://{}{}", HOME_PAGE, DAPPS_DOMAIN), - format!("https://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), - format!("https://{}", address(&signer_address)), - ].into_iter().map(|val| AccessControlAllowOrigin::Value(val.into())).collect(), - None => vec![], - }; - - match extra_cors { - None => basic_cors, - Some(extra_cors) => basic_cors.into_iter().chain(extra_cors).collect(), - } - } - - fn start_http>( - addr: &SocketAddr, - hosts: Option>, - extra_cors: Option>, - authorization: A, - handler: MetaIoHandler, dapps_path: PathBuf, extra_dapps: Vec, - signer_address: Option<(String, u16)>, registrar: Arc, sync_status: Arc, web_proxy_tokens: Arc, - remote: Remote, - tokio_remote: TokioRemote, fetch: F, - ) -> Result { - let authorization = Arc::new(authorization); - let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new( + ) -> Self { + let content_fetcher = apps::fetcher::ContentFetcher::new( hash_fetch::urlhint::URLHintContract::new(registrar), sync_status, signer_address.clone(), remote.clone(), fetch.clone(), - )); - let endpoints = Arc::new(apps::all_endpoints( + ); + let endpoints = apps::all_endpoints( dapps_path, extra_dapps, signer_address.clone(), web_proxy_tokens, remote.clone(), fetch.clone(), - )); - let cors_domains = Self::cors_domains(signer_address.clone(), extra_cors); + ); - let special = Arc::new({ + let cors_domains = cors_domains(signer_address.clone()); + + let special = { let mut special = HashMap::new(); - special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, tokio_remote, cors_domains.clone())); - special.insert(router::SpecialEndpoint::Utils, apps::utils()); + special.insert(router::SpecialEndpoint::Rpc, None); + special.insert(router::SpecialEndpoint::Utils, Some(apps::utils())); special.insert( router::SpecialEndpoint::Api, - api::RestApi::new(cors_domains, endpoints.clone(), content_fetcher.clone()) + Some(api::RestApi::new(cors_domains.clone(), &endpoints, content_fetcher.clone())), ); special - }); - let hosts = Self::allowed_hosts(hosts, format!("{}", addr)); + }; - hyper::Server::http(addr)? - .handle(move |ctrl| router::Router::new( - ctrl, - signer_address.clone(), - content_fetcher.clone(), - endpoints.clone(), - special.clone(), - authorization.clone(), - hosts.clone(), - )) - .map(|(l, srv)| { + let router = router::Router::new( + signer_address, + content_fetcher, + endpoints, + special, + ); - ::std::thread::spawn(move || { - srv.run(); - }); - - Server { - server: Some(l), - } - }) - .map_err(ServerError::from) - } - - #[cfg(test)] - /// Returns address that this server is bound to. - pub fn addr(&self) -> &SocketAddr { - self.server.as_ref() - .expect("server is always Some at the start; it's consumed only when object is dropped; qed") - .addrs() - .first() - .expect("You cannot start the server without binding to at least one address; qed") - } -} - -impl Drop for Server { - fn drop(&mut self) { - self.server.take().unwrap().close() - } -} - -/// Webapp Server startup error -#[derive(Debug)] -pub enum ServerError { - /// Wrapped `std::io::Error` - IoError(std::io::Error), - /// Other `hyper` error - Other(hyper::error::Error), - /// Fetch service initialization error - FetchInitialization, -} - -impl From for ServerError { - fn from(err: hyper::error::Error) -> Self { - match err { - hyper::error::Error::Io(e) => ServerError::IoError(e), - e => ServerError::Other(e), + Middleware { + router: router, } } } +impl http::RequestMiddleware for Middleware { + fn on_request(&self, req: &hyper::server::Request, control: &hyper::Control) -> http::RequestMiddlewareAction { + self.router.on_request(req, control) + } +} + +/// Returns a list of CORS domains for API endpoint. +fn cors_domains(signer_address: Option<(String, u16)>) -> Vec { + use self::apps::{HOME_PAGE, DAPPS_DOMAIN}; + + match signer_address { + Some(signer_address) => [ + format!("http://{}{}", HOME_PAGE, DAPPS_DOMAIN), + format!("http://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), + format!("http://{}", address(&signer_address)), + format!("https://{}{}", HOME_PAGE, DAPPS_DOMAIN), + format!("https://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), + format!("https://{}", address(&signer_address)), + ].into_iter().map(|val| AccessControlAllowOrigin::Value(val.into())).collect(), + None => vec![], + } +} + +fn address(address: &(String, u16)) -> String { + format!("{}:{}", address.0, address.1) +} + /// Random filename fn random_filename() -> String { use ::rand::Rng; @@ -404,39 +190,18 @@ fn random_filename() -> String { rng.gen_ascii_chars().take(12).collect() } -fn address(address: &(String, u16)) -> String { - format!("{}:{}", address.0, address.1) -} - #[cfg(test)] mod util_tests { - use super::Server; + use super::cors_domains; use jsonrpc_http_server::AccessControlAllowOrigin; - #[test] - fn should_return_allowed_hosts() { - // given - let bind_address = "127.0.0.1".to_owned(); - - // when - let all = Server::allowed_hosts(None, bind_address.clone()); - let address = Server::allowed_hosts(Some(Vec::new()), bind_address.clone()); - let some = Server::allowed_hosts(Some(vec!["ethcore.io".into()]), bind_address.clone()); - - // then - assert_eq!(all, None); - assert_eq!(address, Some(vec!["localhost".into(), "127.0.0.1".into()])); - assert_eq!(some, Some(vec!["ethcore.io".into(), "localhost".into(), "127.0.0.1".into()])); - } - #[test] fn should_return_cors_domains() { // given // when - let none = Server::cors_domains(None, None); - let some = Server::cors_domains(Some(("127.0.0.1".into(), 18180)), None); - let extra = Server::cors_domains(None, Some(vec!["all".into()])); + let none = cors_domains(None); + let some = cors_domains(Some(("127.0.0.1".into(), 18180))); // then assert_eq!(none, Vec::::new()); @@ -448,6 +213,5 @@ mod util_tests { "https://parity.web3.site:18180".into(), "https://127.0.0.1:18180".into(), ]); - assert_eq!(extra, vec![AccessControlAllowOrigin::Any]); } } diff --git a/dapps/src/router/mod.rs b/dapps/src/router.rs similarity index 71% rename from dapps/src/router/mod.rs rename to dapps/src/router.rs index 0b4e632a6..995565f26 100644 --- a/dapps/src/router/mod.rs +++ b/dapps/src/router.rs @@ -15,24 +15,20 @@ // along with Parity. If not, see . //! Router implementation -//! Processes request handling authorization and dispatching it to proper application. - -pub mod auth; -mod host_validation; +//! Dispatch requests to proper application. use address; use std::cmp; -use std::sync::Arc; use std::collections::HashMap; use url::{Url, Host}; -use hyper::{self, server, header, Next, Encoder, Decoder, Control, StatusCode}; +use hyper::{self, server, header, Control, StatusCode}; use hyper::net::HttpStream; -use jsonrpc_server_utils::hosts; +use jsonrpc_http_server as http; use apps::{self, DAPPS_DOMAIN}; use apps::fetcher::Fetcher; -use endpoint::{Endpoint, Endpoints, EndpointPath}; +use endpoint::{Endpoint, Endpoints, EndpointPath, Handler}; use handlers::{self, Redirection, ContentHandler}; /// Special endpoints are accessible on every domain (every dapp) @@ -44,51 +40,29 @@ pub enum SpecialEndpoint { None, } -pub struct Router { - control: Option, +pub struct Router { signer_address: Option<(String, u16)>, - endpoints: Arc, - fetch: Arc, - special: Arc>>, - authorization: Arc, - allowed_hosts: Option>, - handler: Box + Send>, + endpoints: Endpoints, + fetch: F, + special: HashMap>>, } -impl server::Handler for Router { - - fn on_request(&mut self, req: server::Request) -> Next { +impl http::RequestMiddleware for Router { + fn on_request(&self, req: &server::Request, control: &Control) -> http::RequestMiddlewareAction { // Choose proper handler depending on path / domain - let url = handlers::extract_url(&req); + let url = handlers::extract_url(req); let endpoint = extract_endpoint(&url); - let referer = extract_referer_endpoint(&req); + let referer = extract_referer_endpoint(req); let is_utils = endpoint.1 == SpecialEndpoint::Utils; + let is_dapps_domain = endpoint.0.as_ref().map(|endpoint| endpoint.using_dapps_domains).unwrap_or(false); + let is_origin_set = req.headers().get::().is_some(); let is_get_request = *req.method() == hyper::Method::Get; trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req); - // Validate Host header - trace!(target: "dapps", "Validating host headers against: {:?}", self.allowed_hosts); - let is_valid = is_utils || host_validation::is_valid(&req, &self.allowed_hosts); - if !is_valid { - debug!(target: "dapps", "Rejecting invalid host header."); - self.handler = host_validation::host_invalid_response(); - return self.handler.on_request(req); - } - - trace!(target: "dapps", "Checking authorization."); - // Check authorization - let auth = self.authorization.is_authorized(&req); - if let auth::Authorized::No(handler) = auth { - debug!(target: "dapps", "Authorization denied."); - self.handler = handler; - return self.handler.on_request(req); - } - - - let control = self.control.take().expect("on_request is called only once; control is always defined at start; qed"); + let control = control.clone(); debug!(target: "dapps", "Handling endpoint request: {:?}", endpoint); - self.handler = match (endpoint.0, endpoint.1, referer) { + let handler: Option> = match (endpoint.0, endpoint.1, referer) { // Handle invalid web requests that we can recover from (ref path, SpecialEndpoint::None, Some((ref referer, ref referer_url))) if referer.app_id == apps::WEB_PATH @@ -100,26 +74,27 @@ impl server::Handler for Router let len = cmp::min(referer_url.path.len(), 2); // /web// let base = referer_url.path[..len].join("/"); let requested = url.map(|u| u.path.join("/")).unwrap_or_default(); - Redirection::boxed(&format!("/{}/{}", base, requested)) + Some(Redirection::boxed(&format!("/{}/{}", base, requested))) }, // First check special endpoints (ref path, ref endpoint, _) if self.special.contains_key(endpoint) => { trace!(target: "dapps", "Resolving to special endpoint."); self.special.get(endpoint) .expect("special known to contain key; qed") - .to_async_handler(path.clone().unwrap_or_default(), control) + .as_ref() + .map(|special| special.to_async_handler(path.clone().unwrap_or_default(), control)) }, // Then delegate to dapp (Some(ref path), _, _) if self.endpoints.contains_key(&path.app_id) => { trace!(target: "dapps", "Resolving to local/builtin dapp."); - self.endpoints.get(&path.app_id) + Some(self.endpoints.get(&path.app_id) .expect("endpoints known to contain key; qed") - .to_async_handler(path.clone(), control) + .to_async_handler(path.clone(), control)) }, // Try to resolve and fetch the dapp (Some(ref path), _, _) if self.fetch.contains(&path.app_id) => { trace!(target: "dapps", "Resolving to fetchable content."); - self.fetch.to_async_handler(path.clone(), control) + Some(self.fetch.to_async_handler(path.clone(), control)) }, // NOTE [todr] /home is redirected to home page since some users may have the redirection cached // (in the past we used 301 instead of 302) @@ -128,82 +103,61 @@ impl server::Handler for Router // 404 for non-existent content (Some(ref path), _, _) if is_get_request && path.app_id != "home" => { trace!(target: "dapps", "Resolving to 404."); - Box::new(ContentHandler::error( + Some(Box::new(ContentHandler::error( StatusCode::NotFound, "404 Not Found", "Requested content was not found.", None, self.signer_address.clone(), - )) + ))) }, // Redirect any other GET request to signer. _ if is_get_request => { if let Some(ref signer_address) = self.signer_address { trace!(target: "dapps", "Redirecting to signer interface."); - Redirection::boxed(&format!("http://{}", address(signer_address))) + Some(Redirection::boxed(&format!("http://{}", address(signer_address)))) } else { trace!(target: "dapps", "Signer disabled, returning 404."); - Box::new(ContentHandler::error( + Some(Box::new(ContentHandler::error( StatusCode::NotFound, "404 Not Found", "Your homepage is not available when Trusted Signer is disabled.", Some("You can still access dapps by writing a correct address, though. Re-enable Signer to get your homepage back."), self.signer_address.clone(), - )) + ))) } }, // RPC by default _ => { trace!(target: "dapps", "Resolving to RPC call."); - self.special.get(&SpecialEndpoint::Rpc) - .expect("RPC endpoint always stored; qed") - .to_async_handler(EndpointPath::default(), control) + None } }; - // Delegate on_request to proper handler - self.handler.on_request(req) - } - - /// This event occurs each time the `Request` is ready to be read from. - fn on_request_readable(&mut self, decoder: &mut Decoder) -> Next { - self.handler.on_request_readable(decoder) - } - - /// This event occurs after the first time this handled signals `Next::write()`. - fn on_response(&mut self, response: &mut server::Response) -> Next { - self.handler.on_response(response) - } - - /// This event occurs each time the `Response` is ready to be written to. - fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { - self.handler.on_response_writable(encoder) + match handler { + Some(handler) => http::RequestMiddlewareAction::Respond { + should_validate_hosts: !(is_utils || is_dapps_domain), + handler: handler, + }, + None => http::RequestMiddlewareAction::Proceed { + should_continue_on_invalid_cors: !is_origin_set, + }, + } } } -impl Router { +impl Router { pub fn new( - control: Control, signer_address: Option<(String, u16)>, - content_fetcher: Arc, - endpoints: Arc, - special: Arc>>, - authorization: Arc, - allowed_hosts: Option>, - ) -> Self { - - let handler = special.get(&SpecialEndpoint::Utils) - .expect("Utils endpoint always stored; qed") - .to_handler(EndpointPath::default()); + content_fetcher: F, + endpoints: Endpoints, + special: HashMap>>, + ) -> Self { Router { - control: Some(control), signer_address: signer_address, endpoints: endpoints, fetch: content_fetcher, special: special, - authorization: authorization, - allowed_hosts: allowed_hosts, - handler: handler, } } } diff --git a/dapps/src/router/auth.rs b/dapps/src/router/auth.rs deleted file mode 100644 index 007ebb96d..000000000 --- a/dapps/src/router/auth.rs +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! HTTP Authorization implementations - -use std::collections::HashMap; -use hyper::{server, net, header, status}; -use endpoint::Handler; -use handlers::{AuthRequiredHandler, ContentHandler}; - -/// Authorization result -pub enum Authorized { - /// Authorization was successful. - Yes, - /// Unsuccessful authorization. Handler for further work is returned. - No(Box), -} - -/// Authorization interface -pub trait Authorization : Send + Sync { - /// Checks if authorization is valid. - fn is_authorized(&self, req: &server::Request)-> Authorized; -} - -/// HTTP Basic Authorization handler -pub struct HttpBasicAuth { - users: HashMap, -} - -/// No-authorization implementation (authorization disabled) -pub struct NoAuth; - -impl Authorization for NoAuth { - fn is_authorized(&self, _req: &server::Request)-> Authorized { - Authorized::Yes - } -} - -impl Authorization for HttpBasicAuth { - fn is_authorized(&self, req: &server::Request) -> Authorized { - let auth = self.check_auth(&req); - - match auth { - Access::Denied => { - Authorized::No(Box::new(ContentHandler::error( - status::StatusCode::Unauthorized, - "Unauthorized", - "You need to provide valid credentials to access this page.", - None, - None, - ))) - }, - Access::AuthRequired => { - Authorized::No(Box::new(AuthRequiredHandler)) - }, - Access::Granted => { - Authorized::Yes - }, - } - } -} - -#[derive(Debug)] -enum Access { - Granted, - Denied, - AuthRequired, -} - -impl HttpBasicAuth { - /// Creates `HttpBasicAuth` instance with only one user. - pub fn single_user(username: &str, password: &str) -> Self { - let mut users = HashMap::new(); - users.insert(username.to_owned(), password.to_owned()); - HttpBasicAuth { - users: users - } - } - - fn is_authorized(&self, username: &str, password: &str) -> bool { - self.users.get(&username.to_owned()).map_or(false, |pass| pass == password) - } - - fn check_auth(&self, req: &server::Request) -> Access { - match req.headers().get::>() { - Some(&header::Authorization( - header::Basic { ref username, password: Some(ref password) } - )) if self.is_authorized(username, password) => Access::Granted, - Some(_) => Access::Denied, - None => Access::AuthRequired, - } - } -} diff --git a/dapps/src/router/host_validation.rs b/dapps/src/router/host_validation.rs deleted file mode 100644 index e5fcedd94..000000000 --- a/dapps/src/router/host_validation.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - - -use apps::DAPPS_DOMAIN; -use hyper::{server, header, StatusCode}; -use hyper::net::HttpStream; - -use handlers::ContentHandler; -use jsonrpc_http_server; -use jsonrpc_server_utils::hosts; - -pub fn is_valid(req: &server::Request, allowed_hosts: &Option>) -> bool { - let header_valid = jsonrpc_http_server::is_host_allowed(req, allowed_hosts); - match (header_valid, req.headers().get::()) { - (true, _) => true, - (_, Some(host)) => host.hostname.ends_with(DAPPS_DOMAIN), - _ => false, - } -} - -pub fn host_invalid_response() -> Box + Send> { - Box::new(ContentHandler::error(StatusCode::Forbidden, - "Current Host Is Disallowed", - "You are trying to access your node using incorrect address.", - Some("Use allowed URL or specify different hosts CLI options."), - None, - )) -} diff --git a/dapps/src/rpc.rs b/dapps/src/rpc.rs index 6ddb31db0..b743408dc 100644 --- a/dapps/src/rpc.rs +++ b/dapps/src/rpc.rs @@ -66,14 +66,14 @@ impl> Endpoint for RpcEndpoint { #[derive(Default)] struct NoopMiddleware; impl http::RequestMiddleware for NoopMiddleware { - fn on_request(&self, request: &http::hyper::server::Request) -> http::RequestMiddlewareAction { + fn on_request(&self, request: &http::hyper::server::Request, _control: &http::hyper::Control) -> http::RequestMiddlewareAction { http::RequestMiddlewareAction::Proceed { should_continue_on_invalid_cors: request.headers().get::().is_none(), } } } -struct MetadataExtractor; +pub struct MetadataExtractor; impl HttpMetaExtractor for MetadataExtractor { fn read_metadata(&self, request: &http::hyper::server::Request) -> Metadata { let dapp_id = request.headers().get::() diff --git a/dapps/src/tests/api.rs b/dapps/src/tests/api.rs index 73467e854..043814377 100644 --- a/dapps/src/tests/api.rs +++ b/dapps/src/tests/api.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use tests::helpers::{serve, serve_with_registrar, serve_extra_cors, request, assert_security_headers}; +use tests::helpers::{serve, serve_with_registrar, request, assert_security_headers}; #[test] fn should_return_error() { @@ -195,26 +195,3 @@ fn should_return_signer_port_cors_headers_for_home_parity_with_port() { response.assert_status("HTTP/1.1 200 OK"); response.assert_header("Access-Control-Allow-Origin", "http://parity.web3.site:18180"); } - -#[test] -fn should_return_extra_cors_headers() { - // given - let server = serve_extra_cors(Some(vec!["all".to_owned()])); - - // when - let response = request(server, - "\ - POST /api/ping HTTP/1.1\r\n\ - Host: localhost:8080\r\n\ - Origin: http://somedomain.io\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - response.assert_status("HTTP/1.1 200 OK"); - response.assert_header("Access-Control-Allow-Origin", "http://somedomain.io"); -} - diff --git a/dapps/src/tests/authorization.rs b/dapps/src/tests/authorization.rs deleted file mode 100644 index 346f8f2fb..000000000 --- a/dapps/src/tests/authorization.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use tests::helpers::{serve_with_auth, request, assert_security_headers_for_embed}; - -#[test] -fn should_require_authorization() { - // given - let server = serve_with_auth("test", "test"); - - // when - let response = request(server, - "\ - GET / HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - \r\n\ - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 401 Unauthorized".to_owned()); - assert_eq!(response.headers.get(0).unwrap(), "WWW-Authenticate: Basic realm=\"Parity\""); -} - -#[test] -fn should_reject_on_invalid_auth() { - // given - let server = serve_with_auth("test", "test"); - - // when - let response = request(server, - "\ - GET / HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l\r\n - \r\n\ - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 401 Unauthorized".to_owned()); - assert!(response.body.contains("Unauthorized"), response.body); - assert_eq!(response.headers_raw.contains("WWW-Authenticate"), false); -} - -#[test] -fn should_allow_on_valid_auth() { - // given - let server = serve_with_auth("Aladdin", "OpenSesame"); - - // when - let response = request(server, - "\ - GET /ui/ HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l\r\n - \r\n\ - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_security_headers_for_embed(&response.headers); -} diff --git a/dapps/src/tests/helpers/mod.rs b/dapps/src/tests/helpers/mod.rs index 036933995..e6c032549 100644 --- a/dapps/src/tests/helpers/mod.rs +++ b/dapps/src/tests/helpers/mod.rs @@ -16,18 +16,20 @@ use std::env; use std::str; -use std::ops::Deref; +use std::net::SocketAddr; +use std::path::{Path, PathBuf}; use std::sync::Arc; use env_logger::LogBuilder; -use ethcore_rpc::Metadata; -use jsonrpc_core::MetaIoHandler; +use jsonrpc_core::IoHandler; +use jsonrpc_http_server::{self as http, Host, DomainsValidation}; -use ServerBuilder; -use Server; -use fetch::Fetch; use devtools::http_client; +use hash_fetch::urlhint::ContractClient; +use fetch::{Fetch, Client as FetchClient}; use parity_reactor::{EventLoop, Remote}; +use {Middleware, SyncStatus, WebProxyTokens}; + mod registrar; mod fetch; @@ -50,7 +52,7 @@ pub struct ServerLoop { pub event_loop: EventLoop, } -impl Deref for ServerLoop { +impl ::std::ops::Deref for ServerLoop { type Target = Server; fn deref(&self) -> &Self::Target { @@ -58,7 +60,7 @@ impl Deref for ServerLoop { } } -pub fn init_server(process: F, io: MetaIoHandler, remote: Remote) -> (ServerLoop, Arc) where +pub fn init_server(process: F, io: IoHandler, remote: Remote) -> (ServerLoop, Arc) where F: FnOnce(ServerBuilder) -> ServerBuilder, B: Fetch, { @@ -74,33 +76,15 @@ pub fn init_server(process: F, io: MetaIoHandler, remote: Remote &dapps_path, registrar.clone(), remote, )) .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) - .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io, event_loop.raw_remote()).unwrap(); + .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io).unwrap(); ( ServerLoop { server: server, event_loop: event_loop }, registrar, ) } -pub fn serve_with_auth(user: &str, pass: &str) -> ServerLoop { - init_logger(); - let registrar = Arc::new(FakeRegistrar::new()); - let mut dapps_path = env::temp_dir(); - dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading"); - - let event_loop = EventLoop::spawn(); - let io = MetaIoHandler::default(); - let server = ServerBuilder::new(&dapps_path, registrar, event_loop.remote()) - .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) - .allowed_hosts(None.into()) - .start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), user, pass, io, event_loop.raw_remote()).unwrap(); - ServerLoop { - server: server, - event_loop: event_loop, - } -} - -pub fn serve_with_rpc(io: MetaIoHandler) -> ServerLoop { - init_server(|builder| builder.allowed_hosts(None.into()), io, Remote::new_sync()).0 +pub fn serve_with_rpc(io: IoHandler) -> ServerLoop { + init_server(|builder| builder, io, Remote::new_sync()).0 } pub fn serve_hosts(hosts: Option>) -> ServerLoop { @@ -108,20 +92,13 @@ pub fn serve_hosts(hosts: Option>) -> ServerLoop { init_server(|builder| builder.allowed_hosts(hosts.into()), Default::default(), Remote::new_sync()).0 } -pub fn serve_extra_cors(extra_cors: Option>) -> ServerLoop { - let extra_cors = extra_cors.map(|cors| cors.into_iter().map(Into::into).collect()); - init_server(|builder| builder.allowed_hosts(None.into()).extra_cors_headers(extra_cors.into()), Default::default(), Remote::new_sync()).0 -} - pub fn serve_with_registrar() -> (ServerLoop, Arc) { - init_server(|builder| builder.allowed_hosts(None.into()), Default::default(), Remote::new_sync()) + init_server(|builder| builder, Default::default(), Remote::new_sync()) } pub fn serve_with_registrar_and_sync() -> (ServerLoop, Arc) { init_server(|builder| { - builder - .sync_status(Arc::new(|| true)) - .allowed_hosts(None.into()) + builder.sync_status(Arc::new(|| true)) }, Default::default(), Remote::new_sync()) } @@ -133,7 +110,7 @@ pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (Serv let fetch = FakeFetch::default(); let f = fetch.clone(); let (server, reg) = init_server(move |builder| { - builder.allowed_hosts(None.into()).fetch(f.clone()) + builder.fetch(f.clone()) }, Default::default(), if multi_threaded { Remote::new_thread_per_future() } else { Remote::new_sync() }); (server, fetch, reg) @@ -144,7 +121,6 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) { let f = fetch.clone(); let (server, _) = init_server(move |builder| { builder - .allowed_hosts(None.into()) .fetch(f.clone()) .web_proxy_tokens(Arc::new(move |token| &token == web_token)) }, Default::default(), Remote::new_sync()); @@ -153,7 +129,7 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) { } pub fn serve() -> ServerLoop { - init_server(|builder| builder.allowed_hosts(None.into()), Default::default(), Remote::new_sync()).0 + init_server(|builder| builder, Default::default(), Remote::new_sync()).0 } pub fn request(server: ServerLoop, request: &str) -> http_client::Response { @@ -166,3 +142,157 @@ pub fn assert_security_headers(headers: &[String]) { pub fn assert_security_headers_for_embed(headers: &[String]) { http_client::assert_security_headers_present(headers, Some(SIGNER_PORT)) } + + +/// Webapps HTTP+RPC server build. +pub struct ServerBuilder { + dapps_path: PathBuf, + registrar: Arc, + sync_status: Arc, + web_proxy_tokens: Arc, + signer_address: Option<(String, u16)>, + allowed_hosts: DomainsValidation, + remote: Remote, + fetch: Option, +} + +impl ServerBuilder { + /// Construct new dapps server + pub fn new>(dapps_path: P, registrar: Arc, remote: Remote) -> Self { + ServerBuilder { + dapps_path: dapps_path.as_ref().to_owned(), + registrar: registrar, + sync_status: Arc::new(|| false), + web_proxy_tokens: Arc::new(|_| false), + signer_address: None, + allowed_hosts: DomainsValidation::Disabled, + remote: remote, + fetch: None, + } + } +} + +impl ServerBuilder { + /// Set a fetch client to use. + pub fn fetch(self, fetch: X) -> ServerBuilder { + ServerBuilder { + dapps_path: self.dapps_path, + registrar: self.registrar, + sync_status: self.sync_status, + web_proxy_tokens: self.web_proxy_tokens, + signer_address: self.signer_address, + allowed_hosts: self.allowed_hosts, + remote: self.remote, + fetch: Some(fetch), + } + } + + /// Change default sync status. + pub fn sync_status(mut self, status: Arc) -> Self { + self.sync_status = status; + self + } + + /// Change default web proxy tokens validator. + pub fn web_proxy_tokens(mut self, tokens: Arc) -> Self { + self.web_proxy_tokens = tokens; + self + } + + /// Change default signer port. + pub fn signer_address(mut self, signer_address: Option<(String, u16)>) -> Self { + self.signer_address = signer_address; + self + } + + /// Change allowed hosts. + /// `None` - All hosts are allowed + /// `Some(whitelist)` - Allow only whitelisted hosts (+ listen address) + pub fn allowed_hosts(mut self, allowed_hosts: DomainsValidation) -> Self { + self.allowed_hosts = allowed_hosts; + self + } + + /// Asynchronously start server with no authentication, + /// returns result with `Server` handle on success or an error. + pub fn start_unsecured_http(self, addr: &SocketAddr, io: IoHandler) -> Result { + let fetch = self.fetch_client(); + Server::start_http( + addr, + io, + self.allowed_hosts, + self.signer_address, + self.dapps_path, + vec![], + self.registrar, + self.sync_status, + self.web_proxy_tokens, + self.remote, + fetch, + ) + } + + fn fetch_client(&self) -> T { + match self.fetch.clone() { + Some(fetch) => fetch, + None => T::new().unwrap(), + } + } +} + + +/// Webapps HTTP server. +pub struct Server { + server: Option, +} + +impl Server { + fn start_http( + addr: &SocketAddr, + io: IoHandler, + allowed_hosts: DomainsValidation, + signer_address: Option<(String, u16)>, + dapps_path: PathBuf, + extra_dapps: Vec, + registrar: Arc, + sync_status: Arc, + web_proxy_tokens: Arc, + remote: Remote, + fetch: F, + ) -> Result { + let middleware = Middleware::new( + remote, + signer_address, + dapps_path, + extra_dapps, + registrar, + sync_status, + web_proxy_tokens, + fetch, + ); + http::ServerBuilder::new(io) + .request_middleware(middleware) + .allowed_hosts(allowed_hosts) + .cors(http::DomainsValidation::Disabled) + .start_http(addr) + .map(|server| Server { + server: Some(server), + }) + } + + /// Returns address that this server is bound to. + pub fn addr(&self) -> &SocketAddr { + self.server.as_ref() + .expect("server is always Some at the start; it's consumed only when object is dropped; qed") + .addrs() + .first() + .expect("You cannot start the server without binding to at least one address; qed") + } +} + +impl Drop for Server { + fn drop(&mut self) { + self.server.take().unwrap().close() + } +} + diff --git a/dapps/src/tests/mod.rs b/dapps/src/tests/mod.rs index ced211d53..089318483 100644 --- a/dapps/src/tests/mod.rs +++ b/dapps/src/tests/mod.rs @@ -19,7 +19,6 @@ mod helpers; mod api; -mod authorization; mod fetch; mod redirection; mod rpc; diff --git a/dapps/src/tests/rpc.rs b/dapps/src/tests/rpc.rs index 2cc4ccb24..0cfc2c5a8 100644 --- a/dapps/src/tests/rpc.rs +++ b/dapps/src/tests/rpc.rs @@ -14,16 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use futures::{future, Future}; -use ethcore_rpc::{Metadata, Origin}; -use jsonrpc_core::{MetaIoHandler, Value}; +use jsonrpc_core::{IoHandler, Value}; use tests::helpers::{serve_with_rpc, request}; #[test] fn should_serve_rpc() { // given - let mut io = MetaIoHandler::default(); + let mut io = IoHandler::default(); io.add_method("rpc_test", |_| { Ok(Value::String("Hello World!".into())) }); @@ -49,70 +47,3 @@ fn should_serve_rpc() { response.assert_status("HTTP/1.1 200 OK"); assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned()); } - -#[test] -fn should_extract_metadata() { - // given - let mut io = MetaIoHandler::default(); - io.add_method_with_meta("rpc_test", |_params, meta: Metadata| { - assert_eq!(meta.origin, Origin::Dapps("".into())); - assert_eq!(meta.dapp_id(), "".into()); - future::ok(Value::String("Hello World!".into())).boxed() - }); - let server = serve_with_rpc(io); - - // when - let req = r#"{"jsonrpc":"2.0","id":1,"method":"rpc_test","params":[]}"#; - let response = request(server, &format!( - "\ - POST /rpc/ HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - X-Parity-Origin: https://this.should.be.ignored\r\n\ - Content-Type: application/json\r\n\ - Content-Length: {}\r\n\ - \r\n\ - {}\r\n\ - ", - req.as_bytes().len(), - req, - )); - - // then - response.assert_status("HTTP/1.1 200 OK"); - assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned()); -} - -#[test] -fn should_extract_metadata_from_custom_header() { - // given - let mut io = MetaIoHandler::default(); - io.add_method_with_meta("rpc_test", |_params, meta: Metadata| { - assert_eq!(meta.origin, Origin::Dapps("https://parity.io/".into())); - assert_eq!(meta.dapp_id(), "https://parity.io/".into()); - future::ok(Value::String("Hello World!".into())).boxed() - }); - let server = serve_with_rpc(io); - - // when - let req = r#"{"jsonrpc":"2.0","id":1,"method":"rpc_test","params":[]}"#; - let response = request(server, &format!( - "\ - POST /rpc/ HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - Origin: null\r\n\ - X-Parity-Origin: https://parity.io/\r\n\ - Content-Type: application/json\r\n\ - Content-Length: {}\r\n\ - \r\n\ - {}\r\n\ - ", - req.as_bytes().len(), - req, - )); - - // then - response.assert_status("HTTP/1.1 200 OK"); - assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned()); -} diff --git a/dapps/src/tests/validation.rs b/dapps/src/tests/validation.rs index afeb7b5ef..fb68cf5ed 100644 --- a/dapps/src/tests/validation.rs +++ b/dapps/src/tests/validation.rs @@ -34,7 +34,7 @@ fn should_reject_invalid_host() { // then assert_eq!(response.status, "HTTP/1.1 403 Forbidden".to_owned()); - assert!(response.body.contains("Current Host Is Disallowed"), response.body); + assert!(response.body.contains("Provided Host header is not whitelisted."), response.body); } #[test] @@ -97,31 +97,3 @@ fn should_allow_parity_utils_even_on_invalid_domain() { // then assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); } - -#[test] -fn should_not_return_cors_headers_for_rpc() { - // given - let server = serve_hosts(Some(vec!["localhost:8080".into()])); - - // when - let response = request(server, - "\ - POST /rpc HTTP/1.1\r\n\ - Host: localhost:8080\r\n\ - Origin: null\r\n\ - Content-Type: application/json\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert!( - !response.headers_raw.contains("Access-Control-Allow-Origin"), - "CORS headers were not expected: {:?}", - response.headers - ); -} - diff --git a/hash-fetch/src/urlhint.rs b/hash-fetch/src/urlhint.rs index 1588b5482..579c83845 100644 --- a/hash-fetch/src/urlhint.rs +++ b/hash-fetch/src/urlhint.rs @@ -92,12 +92,13 @@ pub enum URLHintResult { } /// URLHint Contract interface -pub trait URLHint { +pub trait URLHint: Send + Sync { /// Resolves given id to registrar entry. fn resolve(&self, id: Bytes) -> Option; } /// `URLHintContract` API +#[derive(Clone)] pub struct URLHintContract { urlhint: Contract, registrar: Contract, diff --git a/ipfs/src/lib.rs b/ipfs/src/lib.rs index df03b6cd7..eeac2431b 100644 --- a/ipfs/src/lib.rs +++ b/ipfs/src/lib.rs @@ -32,12 +32,13 @@ use std::sync::Arc; use std::net::{SocketAddr, IpAddr}; use error::ServerError; use route::Out; -use http::hyper::server::{Listening, Handler, Request, Response}; +use http::hyper::server::{Handler, Request, Response}; use http::hyper::net::HttpStream; use http::hyper::header::{self, Vary, ContentLength, ContentType}; use http::hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode}; use ethcore::client::BlockChainClient; +pub use http::hyper::server::Listening; pub use http::{AccessControlAllowOrigin, Host, DomainsValidation}; /// Request/response handler diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 65b1cfea4..7576c063f 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -164,6 +164,8 @@ usage! { or |c: &Config| otry!(c.rpc).apis.as_ref().map(|vec| vec.join(",")), flag_jsonrpc_hosts: String = "none", or |c: &Config| otry!(c.rpc).hosts.as_ref().map(|vec| vec.join(",")), + flag_jsonrpc_threads: Option = None, + or |c: &Config| otry!(c.rpc).threads.map(Some), // IPC flag_no_ipc: bool = false, @@ -176,21 +178,8 @@ usage! { // DAPPS flag_no_dapps: bool = false, or |c: &Config| otry!(c.dapps).disable.clone(), - flag_dapps_port: u16 = 8080u16, - or |c: &Config| otry!(c.dapps).port.clone(), - flag_dapps_interface: String = "local", - or |c: &Config| otry!(c.dapps).interface.clone(), - flag_dapps_hosts: String = "none", - or |c: &Config| otry!(c.dapps).hosts.as_ref().map(|vec| vec.join(",")), - flag_dapps_cors: Option = None, - or |c: &Config| otry!(c.dapps).cors.clone().map(Some), flag_dapps_path: String = "$BASE/dapps", or |c: &Config| otry!(c.dapps).path.clone(), - flag_dapps_user: Option = None, - or |c: &Config| otry!(c.dapps).user.clone().map(Some), - flag_dapps_pass: Option = None, - or |c: &Config| otry!(c.dapps).pass.clone().map(Some), - flag_dapps_apis_all: bool = false, or |_| None, // Secret Store flag_no_secretstore: bool = false, @@ -330,6 +319,22 @@ usage! { or |c: &Config| otry!(c.misc).log_file.clone().map(Some), flag_no_color: bool = false, or |c: &Config| otry!(c.misc).color.map(|c| !c).clone(), + + + // -- Legacy Options supported in configs + flag_dapps_port: Option = None, + or |c: &Config| otry!(c.dapps).port.clone().map(Some), + flag_dapps_interface: Option = None, + or |c: &Config| otry!(c.dapps).interface.clone().map(Some), + flag_dapps_hosts: Option = None, + or |c: &Config| otry!(c.dapps).hosts.as_ref().map(|vec| Some(vec.join(","))), + flag_dapps_cors: Option = None, + or |c: &Config| otry!(c.dapps).cors.clone().map(Some), + flag_dapps_user: Option = None, + or |c: &Config| otry!(c.dapps).user.clone().map(Some), + flag_dapps_pass: Option = None, + or |c: &Config| otry!(c.dapps).pass.clone().map(Some), + flag_dapps_apis_all: Option = None, or |_| None, } { // Values with optional default value. @@ -419,6 +424,7 @@ struct Rpc { cors: Option, apis: Option>, hosts: Option>, + threads: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -672,6 +678,7 @@ mod tests { flag_jsonrpc_cors: Some("null".into()), flag_jsonrpc_apis: "web3,eth,net,parity,traces,rpc".into(), flag_jsonrpc_hosts: "none".into(), + flag_jsonrpc_threads: None, // IPC flag_no_ipc: false, @@ -679,15 +686,8 @@ mod tests { flag_ipc_apis: "web3,eth,net,parity,parity_accounts,personal,traces,rpc".into(), // DAPPS - flag_no_dapps: false, - flag_dapps_port: 8080u16, - flag_dapps_interface: "local".into(), - flag_dapps_hosts: "none".into(), - flag_dapps_cors: None, flag_dapps_path: "$HOME/.parity/dapps".into(), - flag_dapps_user: Some("test_user".into()), - flag_dapps_pass: Some("test_pass".into()), - flag_dapps_apis_all: false, + flag_no_dapps: false, flag_no_secretstore: false, flag_secretstore_port: 8082u16, @@ -792,6 +792,14 @@ mod tests { flag_extradata: None, flag_cache: None, flag_warp: Some(true), + // Legacy-Dapps + flag_dapps_port: Some(8080), + flag_dapps_interface: Some("local".into()), + flag_dapps_hosts: Some("none".into()), + flag_dapps_cors: None, + flag_dapps_user: Some("test_user".into()), + flag_dapps_pass: Some("test_pass".into()), + flag_dapps_apis_all: None, // -- Miscellaneous Options flag_version: false, @@ -873,6 +881,7 @@ mod tests { cors: None, apis: None, hosts: None, + threads: None, }), ipc: Some(Ipc { disable: None, diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 4c1abafbe..1ebeffef9 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -149,6 +149,8 @@ API and Console Options: is additional security against some attack vectors. Special options: "all", "none", (default: {flag_jsonrpc_hosts}). + --jsonrpc-threads THREADS Enables experimental faster implementation of JSON-RPC server. + Requires Dapps server to be disabled using --no-dapps. (default: {flag_jsonrpc_threads:?}) --no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc}) --ipc-path PATH Specify custom path for JSON-RPC over IPC service @@ -157,29 +159,8 @@ API and Console Options: IPC (default: {flag_ipc_apis}). --no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps}) - --dapps-port PORT Specify the port portion of the Dapps server - (default: {flag_dapps_port}). - --dapps-interface IP Specify the hostname portion of the Dapps - server, IP should be an interface's IP address, - or local (default: {flag_dapps_interface}). - --dapps-hosts HOSTS List of allowed Host header values. This option will - validate the Host header sent by the browser, it - is additional security against some attack - vectors. Special options: "all", "none", - (default: {flag_dapps_hosts}). - --dapps-cors URL Specify CORS headers for Dapps server APIs. - (default: {flag_dapps_cors:?}) - --dapps-user USERNAME Specify username for Dapps server. It will be - used in HTTP Basic Authentication Scheme. - If --dapps-pass is not specified you will be - asked for password on startup. (default: {flag_dapps_user:?}) - --dapps-pass PASSWORD Specify password for Dapps server. Use only in - conjunction with --dapps-user. (default: {flag_dapps_pass:?}) --dapps-path PATH Specify directory where dapps should be installed. (default: {flag_dapps_path}) - --dapps-apis-all Expose all possible RPC APIs on Dapps port. - WARNING: INSECURE. Used only for development. - (default: {flag_dapps_apis_all}) --ipfs-api Enable IPFS-compatible HTTP API. (default: {flag_ipfs_api}) --ipfs-api-port PORT Configure on which port the IPFS HTTP API should listen. (default: {flag_ipfs_api_port}) @@ -392,6 +373,13 @@ Legacy Options: --jsonrpc-off Equivalent to --no-jsonrpc. -w --webapp Does nothing; dapps server is on by default now. --dapps-off Equivalent to --no-dapps. + --dapps-user USERNAME Dapps server authentication has been removed. (default: {flag_dapps_user:?}) + --dapps-pass PASSWORD Dapps server authentication has been removed. (default: {flag_dapps_pass:?}) + --dapps-apis-all Dapps server is merged with RPC server. Use --jsonrpc-apis. (default: {flag_dapps_apis_all:?}) + --dapps-cors URL Dapps server is merged with RPC server. Use --jsonrpc-cors. (default: {flag_dapps_cors:?}) + --dapps-hosts HOSTS Dapps server is merged with RPC server. Use --jsonrpc-hosts. (default: {flag_dapps_hosts:?}) + --dapps-interface IP Dapps server is merged with RPC server. Use --jsonrpc-interface. (default: {flag_dapps_interface:?}) + --dapps-port PORT Dapps server is merged with RPC server. Use --jsonrpc-port. (default: {flag_dapps_port:?}) --rpc Does nothing; JSON-RPC is on by default now. --warp Does nothing; Warp sync is on by default. (default: {flag_warp}) --rpcaddr IP Equivalent to --jsonrpc-interface IP. diff --git a/parity/configuration.rs b/parity/configuration.rs index 5dd11bd90..1eb4c1848 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -132,12 +132,17 @@ impl Configuration { let warp_sync = !self.args.flag_no_warp && fat_db != Switch::On && tracing != Switch::On && pruning != Pruning::Specific(Algorithm::Archive); let geth_compatibility = self.args.flag_geth; let ui_address = self.ui_port().map(|port| (self.ui_interface(), port)); - let dapps_conf = self.dapps_config(); + let mut dapps_conf = self.dapps_config(); let ipfs_conf = self.ipfs_config(); let signer_conf = self.signer_config(); let secretstore_conf = self.secretstore_config(); let format = self.format()?; + if self.args.flag_jsonrpc_threads.is_some() && dapps_conf.enabled { + dapps_conf.enabled = false; + writeln!(&mut stderr(), "Warning: Disabling Dapps server because fast RPC server was enabled.").expect("Error writing to stderr.") + } + let cmd = if self.args.flag_version { Cmd::Version } else if self.args.cmd_signer { @@ -554,19 +559,12 @@ impl Configuration { fn dapps_config(&self) -> DappsConfiguration { DappsConfiguration { enabled: self.dapps_enabled(), - interface: self.dapps_interface(), - port: self.args.flag_dapps_port, - hosts: self.dapps_hosts(), - cors: self.dapps_cors(), - user: self.args.flag_dapps_user.clone(), - pass: self.args.flag_dapps_pass.clone(), dapps_path: PathBuf::from(self.directories().dapps), extra_dapps: if self.args.cmd_dapp { self.args.arg_path.iter().map(|path| PathBuf::from(path)).collect() } else { vec![] }, - all_apis: self.args.flag_dapps_apis_all, } } @@ -746,14 +744,10 @@ impl Configuration { Self::cors(self.args.flag_ipfs_api_cors.as_ref()) } - fn dapps_cors(&self) -> Option> { - Self::cors(self.args.flag_dapps_cors.as_ref()) - } - fn hosts(hosts: &str) -> Option> { match hosts { "none" => return Some(Vec::new()), - "all" => return None, + "*" | "all" | "any" => return None, _ => {} } let hosts = hosts.split(',').map(Into::into).collect(); @@ -764,10 +758,6 @@ impl Configuration { Self::hosts(&self.args.flag_jsonrpc_hosts) } - fn dapps_hosts(&self) -> Option> { - Self::hosts(&self.args.flag_dapps_hosts) - } - fn ipfs_hosts(&self) -> Option> { Self::hosts(&self.args.flag_ipfs_api_hosts) } @@ -793,12 +783,17 @@ impl Configuration { fn http_config(&self) -> Result { let conf = HttpConfiguration { - enabled: !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc, + enabled: self.rpc_enabled(), interface: self.rpc_interface(), port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), apis: self.rpc_apis().parse()?, hosts: self.rpc_hosts(), cors: self.rpc_cors(), + threads: match self.args.flag_jsonrpc_threads { + Some(threads) if threads > 0 => Some(threads), + None => None, + _ => return Err("--jsonrpc-threads number needs to be positive.".into()), + } }; Ok(conf) @@ -809,7 +804,7 @@ impl Configuration { name: self.args.flag_identity.clone(), chain: self.chain(), network_port: self.args.flag_port, - rpc_enabled: !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc, + rpc_enabled: self.rpc_enabled(), rpc_interface: self.args.flag_rpcaddr.clone().unwrap_or(self.args.flag_jsonrpc_interface.clone()), rpc_port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), } @@ -916,13 +911,6 @@ impl Configuration { Self::interface(&self.network_settings().rpc_interface) } - fn dapps_interface(&self) -> String { - match self.args.flag_dapps_interface.as_str() { - "local" => "127.0.0.1", - x => x, - }.into() - } - fn ipfs_interface(&self) -> String { Self::interface(&self.args.flag_ipfs_api_interface) } @@ -938,8 +926,12 @@ impl Configuration { Self::interface(&self.args.flag_stratum_interface) } + fn rpc_enabled(&self) -> bool { + !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc + } + fn dapps_enabled(&self) -> bool { - !self.args.flag_dapps_off && !self.args.flag_no_dapps && cfg!(feature = "dapps") + !self.args.flag_dapps_off && !self.args.flag_no_dapps && self.rpc_enabled() && cfg!(feature = "dapps") } fn secretstore_enabled(&self) -> bool { @@ -1317,23 +1309,6 @@ mod tests { assert_eq!(conf3.rpc_hosts(), Some(vec!["ethcore.io".into(), "something.io".into()])); } - #[test] - fn should_parse_dapps_hosts() { - // given - - // when - let conf0 = parse(&["parity"]); - let conf1 = parse(&["parity", "--dapps-hosts", "none"]); - let conf2 = parse(&["parity", "--dapps-hosts", "all"]); - let conf3 = parse(&["parity", "--dapps-hosts", "ethcore.io,something.io"]); - - // then - assert_eq!(conf0.dapps_hosts(), Some(Vec::new())); - assert_eq!(conf1.dapps_hosts(), Some(Vec::new())); - assert_eq!(conf2.dapps_hosts(), None); - assert_eq!(conf3.dapps_hosts(), Some(vec!["ethcore.io".into(), "something.io".into()])); - } - #[test] fn should_parse_ipfs_hosts() { // given diff --git a/parity/dapps.rs b/parity/dapps.rs index bbd5f4960..e0e97c08f 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -19,25 +19,17 @@ use std::sync::Arc; use dir::default_data_path; use ethcore::client::Client; -use ethcore_rpc::informant::RpcStats; use ethsync::SyncProvider; use hash_fetch::fetch::Client as FetchClient; use helpers::replace_home; -use rpc_apis::{self, SignerService}; +use rpc_apis::SignerService; use parity_reactor; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { pub enabled: bool, - pub interface: String, - pub port: u16, - pub hosts: Option>, - pub cors: Option>, - pub user: Option, - pub pass: Option, pub dapps_path: PathBuf, pub extra_dapps: Vec, - pub all_apis: bool, } impl Default for Configuration { @@ -45,80 +37,56 @@ impl Default for Configuration { let data_dir = default_data_path(); Configuration { enabled: true, - interface: "127.0.0.1".into(), - port: 8080, - hosts: Some(Vec::new()), - cors: None, - user: None, - pass: None, dapps_path: replace_home(&data_dir, "$BASE/dapps").into(), extra_dapps: vec![], - all_apis: false, } } } pub struct Dependencies { - pub apis: Arc, pub client: Arc, pub sync: Arc, pub remote: parity_reactor::TokioRemote, pub fetch: FetchClient, pub signer: Arc, - pub stats: Arc, } -pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> { +pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> { if !configuration.enabled { return Ok(None); } - let url = format!("{}:{}", configuration.interface, configuration.port); - let addr = url.parse().map_err(|_| format!("Invalid Webapps listen host/port given: {}", url))?; - - let auth = configuration.user.as_ref().map(|username| { - let password = configuration.pass.as_ref().map_or_else(|| { - use rpassword::read_password; - println!("Type password for WebApps server (user: {}): ", username); - let pass = read_password().unwrap(); - println!("OK, got it. Starting server..."); - pass - }, |pass| pass.to_owned()); - (username.to_owned(), password) - }); - - Ok(Some(setup_dapps_server( + dapps_middleware( deps, configuration.dapps_path, configuration.extra_dapps, - &addr, - configuration.hosts, - configuration.cors, - auth, - configuration.all_apis, - )?)) + ).map(Some) } -pub use self::server::WebappServer; -pub use self::server::setup_dapps_server; +pub use self::server::Middleware; +pub use self::server::dapps_middleware; #[cfg(not(feature = "dapps"))] mod server { use super::Dependencies; - use std::net::SocketAddr; use std::path::PathBuf; + use ethcore_rpc::{hyper, RequestMiddleware, RequestMiddlewareAction}; - pub struct WebappServer; - pub fn setup_dapps_server( + pub struct Middleware; + + impl RequestMiddleware for Middleware { + fn on_request( + &self, req: &hyper::server::Request, control: &hyper::Control + ) -> RequestMiddlewareAction { + unreachable!() + } + } + + pub fn dapps_middleware( _deps: Dependencies, _dapps_path: PathBuf, _extra_dapps: Vec, - _url: &SocketAddr, - _allowed_hosts: Option>, - _cors: Option>, - _auth: Option<(String, String)>, - _all_apis: bool, - ) -> Result { + ) -> Result { Err("Your Parity version has been compiled without WebApps support.".into()) } } @@ -128,78 +96,41 @@ mod server { use super::Dependencies; use std::path::PathBuf; use std::sync::Arc; - use std::net::SocketAddr; - use std::io; use util::{Bytes, Address, U256}; - use ansi_term::Colour; use ethcore::transaction::{Transaction, Action}; use ethcore::client::{Client, BlockChainClient, BlockId}; - use ethcore_dapps::{AccessControlAllowOrigin, Host}; use ethcore_rpc::is_major_importing; + use hash_fetch::fetch::Client as FetchClient; use hash_fetch::urlhint::ContractClient; + use parity_dapps; use parity_reactor; - use rpc_apis; - pub use ethcore_dapps::Server as WebappServer; + pub type Middleware = parity_dapps::Middleware; - pub fn setup_dapps_server( + pub fn dapps_middleware( deps: Dependencies, dapps_path: PathBuf, extra_dapps: Vec, - url: &SocketAddr, - allowed_hosts: Option>, - cors: Option>, - auth: Option<(String, String)>, - all_apis: bool, - ) -> Result { - use ethcore_dapps as dapps; - - let server = dapps::ServerBuilder::new( - &dapps_path, - Arc::new(Registrar { client: deps.client.clone() }), - parity_reactor::Remote::new(deps.remote.clone()), - ); - let allowed_hosts: Option> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); - let cors: Option> = cors.map(|cors| cors.into_iter().map(AccessControlAllowOrigin::from).collect()); - - let sync = deps.sync.clone(); - let client = deps.client.clone(); + ) -> Result { + let sync = deps.sync; let signer = deps.signer.clone(); - let server = server - .fetch(deps.fetch.clone()) - .sync_status(Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info()))) - .web_proxy_tokens(Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token))) - .extra_dapps(&extra_dapps) - .signer_address(deps.signer.address()) - .allowed_hosts(allowed_hosts.into()) - .extra_cors_headers(cors.into()); + let client = deps.client; + let parity_remote = parity_reactor::Remote::new(deps.remote.clone()); + let registrar = Arc::new(Registrar { client: client.clone() }); + let sync_status = Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info())); + let web_proxy_tokens = Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token)); - let api_set = if all_apis { - warn!("{}", Colour::Red.bold().paint("*** INSECURE *** Running Dapps with all APIs exposed.")); - info!("If you do not intend this, exit now."); - rpc_apis::ApiSet::SafeContext - } else { - rpc_apis::ApiSet::UnsafeContext - }; - let apis = rpc_apis::setup_rpc(deps.stats, deps.apis.clone(), api_set); - let start_result = match auth { - None => { - server.start_unsecured_http(url, apis, deps.remote) - }, - Some((username, password)) => { - server.start_basic_auth_http(url, &username, &password, apis, deps.remote) - }, - }; - - match start_result { - Err(dapps::ServerError::IoError(err)) => match err.kind() { - io::ErrorKind::AddrInUse => Err(format!("WebApps address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --dapps-port and --dapps-interface options.", url)), - _ => Err(format!("WebApps io error: {}", err)), - }, - Err(e) => Err(format!("WebApps error: {:?}", e)), - Ok(server) => Ok(server), - } + Ok(parity_dapps::Middleware::new( + parity_remote, + deps.signer.address(), + dapps_path, + extra_dapps, + registrar, + sync_status, + web_proxy_tokens, + deps.fetch.clone(), + )) } struct Registrar { diff --git a/parity/deprecated.rs b/parity/deprecated.rs index 97c6ffe4a..820181efa 100644 --- a/parity/deprecated.rs +++ b/parity/deprecated.rs @@ -21,94 +21,89 @@ use cli::Args; pub enum Deprecated { DoesNothing(&'static str), Replaced(&'static str, &'static str), + Removed(&'static str), } impl fmt::Display for Deprecated { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { - Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default", s), - Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead", old, new), + Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default.", s), + Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead.", old, new), + Deprecated::Removed(s) => write!(f, "Option '{}' has been removed and is no longer supported.", s) } } } -impl Deprecated { - fn jsonrpc() -> Self { - Deprecated::DoesNothing("--jsonrpc") - } - - fn rpc() -> Self { - Deprecated::DoesNothing("--rpc") - } - - fn jsonrpc_off() -> Self { - Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc") - } - - fn webapp() -> Self { - Deprecated::DoesNothing("--webapp") - } - - fn dapps_off() -> Self { - Deprecated::Replaced("--dapps-off", "--no-dapps") - } - - fn ipcdisable() -> Self { - Deprecated::Replaced("--ipcdisable", "--no-ipc") - } - - fn ipc_off() -> Self { - Deprecated::Replaced("--ipc-off", "--no-ipc") - } - - fn etherbase() -> Self { - Deprecated::Replaced("--etherbase", "--author") - } - - fn extradata() -> Self { - Deprecated::Replaced("--extradata", "--extra-data") - } -} - pub fn find_deprecated(args: &Args) -> Vec { let mut result = vec![]; if args.flag_jsonrpc { - result.push(Deprecated::jsonrpc()); + result.push(Deprecated::DoesNothing("--jsonrpc")); } if args.flag_rpc { - result.push(Deprecated::rpc()); + result.push(Deprecated::DoesNothing("--rpc")); } if args.flag_jsonrpc_off { - result.push(Deprecated::jsonrpc_off()); + result.push(Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc")); } if args.flag_webapp { - result.push(Deprecated::webapp()) + result.push(Deprecated::DoesNothing("--webapp")); } if args.flag_dapps_off { - result.push(Deprecated::dapps_off()); + result.push(Deprecated::Replaced("--dapps-off", "--no-dapps")); } if args.flag_ipcdisable { - result.push(Deprecated::ipcdisable()); + result.push(Deprecated::Replaced("--ipcdisable", "--no-ipc")); } if args.flag_ipc_off { - result.push(Deprecated::ipc_off()); + result.push(Deprecated::Replaced("--ipc-off", "--no-ipc")); } if args.flag_etherbase.is_some() { - result.push(Deprecated::etherbase()); + result.push(Deprecated::Replaced("--etherbase", "--author")); } if args.flag_extradata.is_some() { - result.push(Deprecated::extradata()); + result.push(Deprecated::Replaced("--extradata", "--extra-data")); } + // Removed in 1.7 + if args.flag_dapps_port.is_some() { + result.push(Deprecated::Replaced("--dapps-port", "--jsonrpc-port")); + } + + if args.flag_dapps_interface.is_some() { + result.push(Deprecated::Replaced("--dapps-interface", "--jsonrpc-interface")); + } + + if args.flag_dapps_hosts.is_some() { + result.push(Deprecated::Replaced("--dapps-hosts", "--jsonrpc-hosts")); + } + + if args.flag_dapps_cors.is_some() { + result.push(Deprecated::Replaced("--dapps-cors", "--jsonrpc-cors")); + } + + if args.flag_dapps_user.is_some() { + result.push(Deprecated::Removed("--dapps-user")); + } + + if args.flag_dapps_pass.is_some() { + result.push(Deprecated::Removed("--dapps-pass")); + } + + if args.flag_dapps_apis_all.is_some() { + result.push(Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis")); + } + + // Removed in 1.8 + result } @@ -131,17 +126,31 @@ mod tests { args.flag_ipc_off = true; args.flag_etherbase = Some(Default::default()); args.flag_extradata = Some(Default::default()); + args.flag_dapps_port = Some(Default::default()); + args.flag_dapps_interface = Some(Default::default()); + args.flag_dapps_hosts = Some(Default::default()); + args.flag_dapps_cors = Some(Default::default()); + args.flag_dapps_user = Some(Default::default()); + args.flag_dapps_pass = Some(Default::default()); + args.flag_dapps_apis_all = Some(Default::default()); args }), vec![ - Deprecated::jsonrpc(), - Deprecated::rpc(), - Deprecated::jsonrpc_off(), - Deprecated::webapp(), - Deprecated::dapps_off(), - Deprecated::ipcdisable(), - Deprecated::ipc_off(), - Deprecated::etherbase(), - Deprecated::extradata(), + Deprecated::DoesNothing("--jsonrpc"), + Deprecated::DoesNothing("--rpc"), + Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc"), + Deprecated::DoesNothing("--webapp"), + Deprecated::Replaced("--dapps-off", "--no-dapps"), + Deprecated::Replaced("--ipcdisable", "--no-ipc"), + Deprecated::Replaced("--ipc-off", "--no-ipc"), + Deprecated::Replaced("--etherbase", "--author"), + Deprecated::Replaced("--extradata", "--extra-data"), + Deprecated::Replaced("--dapps-port", "--jsonrpc-port"), + Deprecated::Replaced("--dapps-interface", "--jsonrpc-interface"), + Deprecated::Replaced("--dapps-hosts", "--jsonrpc-hosts"), + Deprecated::Replaced("--dapps-cors", "--jsonrpc-cors"), + Deprecated::Removed("--dapps-user"), + Deprecated::Removed("--dapps-pass"), + Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis"), ]); } } diff --git a/parity/ipfs.rs b/parity/ipfs.rs index 760868f91..45c3f7062 100644 --- a/parity/ipfs.rs +++ b/parity/ipfs.rs @@ -15,10 +15,9 @@ // along with Parity. If not, see . use std::sync::Arc; -use parity_ipfs_api::{self, AccessControlAllowOrigin, Host}; +use parity_ipfs_api::{self, AccessControlAllowOrigin, Host, Listening}; use parity_ipfs_api::error::ServerError; use ethcore::client::BlockChainClient; -use hyper::server::Listening; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { diff --git a/parity/main.rs b/parity/main.rs index 2044b3ee0..4b6dc6dab 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -28,7 +28,6 @@ extern crate ctrlc; extern crate docopt; extern crate env_logger; extern crate fdlimit; -extern crate hyper; extern crate isatty; extern crate jsonrpc_core; extern crate num_cpus; @@ -73,7 +72,11 @@ extern crate ethcore_stratum; extern crate ethcore_secretstore; #[cfg(feature = "dapps")] -extern crate ethcore_dapps; +extern crate parity_dapps; + +#[cfg(test)] +#[macro_use] +extern crate pretty_assertions; #[cfg(windows)] extern crate ws2_32; #[cfg(windows)] extern crate winapi; diff --git a/parity/rpc.rs b/parity/rpc.rs index a435f24db..254bb782e 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -14,24 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::fmt; +use std::{io, fmt}; use std::sync::Arc; -use std::net::SocketAddr; -use std::io; +use dapps; use dir::default_data_path; -use ethcore_rpc::{self as rpc, HttpServerError, Metadata, Origin, AccessControlAllowOrigin, Host}; use ethcore_rpc::informant::{RpcStats, Middleware}; +use ethcore_rpc::{self as rpc, HttpServerError, Metadata, Origin, AccessControlAllowOrigin, Host}; use helpers::parity_ipc_path; -use hyper; use jsonrpc_core::MetaIoHandler; -use rpc_apis; -use rpc_apis::ApiSet; use parity_reactor::TokioRemote; +use rpc_apis::{self, ApiSet}; -pub use ethcore_rpc::{IpcServer, HttpServer}; +pub use ethcore_rpc::{IpcServer, HttpServer, RequestMiddleware}; -#[derive(Debug, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub struct HttpConfiguration { pub enabled: bool, pub interface: String, @@ -39,6 +36,7 @@ pub struct HttpConfiguration { pub apis: ApiSet, pub cors: Option>, pub hosts: Option>, + pub threads: Option, } impl Default for HttpConfiguration { @@ -50,6 +48,7 @@ impl Default for HttpConfiguration { apis: ApiSet::UnsafeContext, cors: None, hosts: Some(Vec::new()), + threads: None, } } } @@ -89,13 +88,17 @@ pub struct Dependencies { } pub struct RpcExtractor; -impl rpc::HttpMetaExtractor for RpcExtractor { - fn read_metadata(&self, req: &hyper::server::Request) -> Metadata { - let origin = req.headers().get::() - .map(|origin| format!("{}://{}", origin.scheme, origin.host)) - .unwrap_or_else(|| "unknown".into()); +impl rpc::HttpMetaExtractor for RpcExtractor { + type Metadata = Metadata; + + fn read_metadata(&self, origin: String, dapps_origin: Option) -> Metadata { let mut metadata = Metadata::default(); - metadata.origin = Origin::Rpc(origin); + + metadata.origin = match (origin.as_str(), dapps_origin) { + ("null", Some(dapp)) => Origin::Dapps(dapp.into()), + _ => Origin::Rpc(origin), + }; + metadata } } @@ -109,52 +112,92 @@ impl rpc::IpcMetaExtractor for RpcExtractor { } } -pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result, String> { +fn setup_apis(apis: ApiSet, deps: &Dependencies) -> MetaIoHandler { + rpc_apis::setup_rpc(deps.stats.clone(), deps.apis.clone(), apis) +} + +pub fn new_http(conf: HttpConfiguration, deps: &Dependencies, middleware: Option) -> Result, String> { if !conf.enabled { return Ok(None); } let url = format!("{}:{}", conf.interface, conf.port); let addr = url.parse().map_err(|_| format!("Invalid JSONRPC listen host/port given: {}", url))?; - Ok(Some(setup_http_rpc_server(deps, &addr, conf.cors, conf.hosts, conf.apis)?)) -} + let handler = setup_apis(conf.apis, deps); + let remote = deps.remote.clone(); -fn setup_apis(apis: ApiSet, deps: &Dependencies) -> MetaIoHandler { - rpc_apis::setup_rpc(deps.stats.clone(), deps.apis.clone(), apis) -} + let cors_domains: Option> = conf.cors.map(|domains| domains.into_iter().map(AccessControlAllowOrigin::from).collect()); + let allowed_hosts: Option> = conf.hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); + + let start_result = rpc::start_http( + &addr, + cors_domains.into(), + allowed_hosts.into(), + handler, + remote, + RpcExtractor, + match (conf.threads, middleware) { + (Some(threads), None) => rpc::HttpSettings::Threads(threads), + (None, middleware) => rpc::HttpSettings::Dapps(middleware), + (Some(_), Some(_)) => { + return Err("Dapps and fast multi-threaded RPC server cannot be enabled at the same time.".into()) + }, + } + ); -pub fn setup_http_rpc_server( - dependencies: &Dependencies, - url: &SocketAddr, - cors_domains: Option>, - allowed_hosts: Option>, - apis: ApiSet -) -> Result { - let handler = setup_apis(apis, dependencies); - let remote = dependencies.remote.clone(); - let cors_domains: Option> = cors_domains.map(|domains| domains.into_iter().map(AccessControlAllowOrigin::from).collect()); - let allowed_hosts: Option> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); - let start_result = rpc::start_http(url, cors_domains.into(), allowed_hosts.into(), handler, remote, RpcExtractor); match start_result { - Err(HttpServerError::IoError(err)) => match err.kind() { - io::ErrorKind::AddrInUse => Err(format!("RPC address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --jsonrpc-port and --jsonrpc-interface options.", url)), + Ok(server) => Ok(Some(server)), + Err(HttpServerError::Io(err)) => match err.kind() { + io::ErrorKind::AddrInUse => Err( + format!("RPC address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --jsonrpc-port and --jsonrpc-interface options.", url) + ), _ => Err(format!("RPC io error: {}", err)), }, Err(e) => Err(format!("RPC error: {:?}", e)), - Ok(server) => Ok(server), } } -pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result, String> { - if !conf.enabled { return Ok(None); } - Ok(Some(setup_ipc_rpc_server(deps, &conf.socket_addr, conf.apis)?)) -} - -pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result { - let handler = setup_apis(apis, dependencies); +pub fn new_ipc(conf: IpcConfiguration, dependencies: &Dependencies) -> Result, String> { + if !conf.enabled { + return Ok(None); + } + let handler = setup_apis(conf.apis, dependencies); let remote = dependencies.remote.clone(); - match rpc::start_ipc(addr, handler, remote, RpcExtractor) { + match rpc::start_ipc(&conf.socket_addr, handler, remote, RpcExtractor) { + Ok(server) => Ok(Some(server)), Err(io_error) => Err(format!("RPC io error: {}", io_error)), - Ok(server) => Ok(server) + } +} + +#[cfg(test)] +mod tests { + use super::RpcExtractor; + use ethcore_rpc::{HttpMetaExtractor, Origin}; + + #[test] + fn should_extract_rpc_origin() { + // given + let extractor = RpcExtractor; + + // when + let meta = extractor.read_metadata("http://parity.io".into(), None); + let meta1 = extractor.read_metadata("http://parity.io".into(), Some("ignored".into())); + + // then + assert_eq!(meta.origin, Origin::Rpc("http://parity.io".into())); + assert_eq!(meta1.origin, Origin::Rpc("http://parity.io".into())); + } + + #[test] + fn should_dapps_origin() { + // given + let extractor = RpcExtractor; + let dapp = "https://wallet.ethereum.org".to_owned(); + + // when + let meta = extractor.read_metadata("null".into(), Some(dapp.clone())); + + // then + assert_eq!(meta.origin, Origin::Dapps(dapp.into())); } } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index e168f029c..dbeeea962 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -81,7 +81,7 @@ impl FromStr for Api { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum ApiSet { SafeContext, UnsafeContext, diff --git a/parity/run.rs b/parity/run.rs index c438c25a5..a85bcc39b 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -36,7 +36,6 @@ use updater::{UpdatePolicy, Updater}; use parity_reactor::EventLoop; use hash_fetch::fetch::{Fetch, Client as FetchClient}; -use rpc::{HttpConfiguration, IpcConfiguration}; use params::{ SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool @@ -76,8 +75,8 @@ pub struct RunCmd { pub daemon: Option, pub logger_config: LogConfig, pub miner_options: MinerOptions, - pub http_conf: HttpConfiguration, - pub ipc_conf: IpcConfiguration, + pub http_conf: rpc::HttpConfiguration, + pub ipc_conf: rpc::IpcConfiguration, pub net_conf: NetworkConfiguration, pub network_id: Option, pub warp_sync: bool, @@ -110,11 +109,7 @@ pub struct RunCmd { pub verifier_settings: VerifierSettings, } -pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configuration) -> Result<(), String> { - if !dapps_conf.enabled { - return Err("Cannot use UI command with Dapps turned off.".into()) - } - +pub fn open_ui(signer_conf: &signer::Configuration) -> Result<(), String> { if !signer_conf.enabled { return Err("Cannot use UI command with UI turned off.".into()) } @@ -127,12 +122,12 @@ pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configur Ok(()) } -pub fn open_dapp(dapps_conf: &dapps::Configuration, dapp: &str) -> Result<(), String> { +pub fn open_dapp(dapps_conf: &dapps::Configuration, rpc_conf: &rpc::HttpConfiguration, dapp: &str) -> Result<(), String> { if !dapps_conf.enabled { return Err("Cannot use DAPP command with Dapps turned off.".into()) } - let url = format!("http://{}:{}/{}/", dapps_conf.interface, dapps_conf.port, dapp); + let url = format!("http://{}:{}/{}/", rpc_conf.interface, rpc_conf.port, dapp); url::open(&url); Ok(()) } @@ -156,9 +151,9 @@ impl ::local_store::NodeInfo for FullNodeInfo { pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result<(bool, Option), String> { if cmd.ui && cmd.dapps_conf.enabled { // Check if Parity is already running - let addr = format!("{}:{}", cmd.dapps_conf.interface, cmd.dapps_conf.port); + let addr = format!("{}:{}", cmd.signer_conf.interface, cmd.signer_conf.port); if !TcpListener::bind(&addr as &str).is_ok() { - return open_ui(&cmd.dapps_conf, &cmd.signer_conf).map(|_| (false, None)); + return open_ui(&cmd.signer_conf).map(|_| (false, None)); } } @@ -429,11 +424,11 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R updater: updater.clone(), geth_compatibility: cmd.geth_compatibility, dapps_interface: match cmd.dapps_conf.enabled { - true => Some(cmd.dapps_conf.interface.clone()), + true => Some(cmd.http_conf.interface.clone()), false => None, }, dapps_port: match cmd.dapps_conf.enabled { - true => Some(cmd.dapps_conf.port), + true => Some(cmd.http_conf.port), false => None, }, fetch: fetch.clone(), @@ -445,21 +440,19 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R stats: rpc_stats.clone(), }; - // start rpc servers - let http_server = rpc::new_http(cmd.http_conf, &dependencies)?; - let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; - - // the dapps server + // the dapps middleware let dapps_deps = dapps::Dependencies { - apis: deps_for_rpc_apis.clone(), client: client.clone(), sync: sync_provider.clone(), remote: event_loop.raw_remote(), fetch: fetch.clone(), signer: deps_for_rpc_apis.signer_service.clone(), - stats: rpc_stats.clone(), }; - let dapps_server = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?; + let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?; + + // start rpc servers + let http_server = rpc::new_http(cmd.http_conf.clone(), &dependencies, dapps_middleware)?; + let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; // the signer server let signer_deps = signer::Dependencies { @@ -524,18 +517,18 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // start ui if cmd.ui { - open_ui(&cmd.dapps_conf, &cmd.signer_conf)?; + open_ui(&cmd.signer_conf)?; } if let Some(dapp) = cmd.dapp { - open_dapp(&cmd.dapps_conf, &dapp)?; + open_dapp(&cmd.dapps_conf, &cmd.http_conf, &dapp)?; } // Handle exit let restart = wait_for_exit(panic_handler, Some(updater), Some(client), can_restart); // drop this stuff as soon as exit detected. - drop((http_server, ipc_server, dapps_server, signer_server, secretstore_key_server, ipfs_server, event_loop)); + drop((http_server, ipc_server, signer_server, secretstore_key_server, ipfs_server, event_loop)); info!("Finishing work, please wait..."); diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 960dc5102..cbfecf366 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -21,6 +21,7 @@ transient-hashmap = "0.4" jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } +jsonrpc-minihttp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index abc51f2ed..247edd8f7 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -29,8 +29,9 @@ extern crate time; extern crate transient_hashmap; extern crate jsonrpc_core; -pub extern crate jsonrpc_http_server as http; -pub extern crate jsonrpc_ipc_server as ipc; +extern crate jsonrpc_http_server as http; +extern crate jsonrpc_minihttp_server as minihttp; +extern crate jsonrpc_ipc_server as ipc; extern crate ethash; extern crate ethcore; @@ -62,10 +63,15 @@ extern crate ethjson; #[cfg(test)] extern crate ethcore_devtools as devtools; +mod metadata; pub mod v1; pub use ipc::{Server as IpcServer, MetaExtractor as IpcMetaExtractor, RequestContext as IpcRequestContext}; -pub use http::{HttpMetaExtractor, Server as HttpServer, Error as HttpServerError, AccessControlAllowOrigin, Host}; +pub use http::{ + hyper, + RequestMiddleware, RequestMiddlewareAction, + AccessControlAllowOrigin, Host, +}; pub use v1::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings, Metadata, Origin, informant, dispatch}; pub use v1::block_import::is_major_importing; @@ -73,26 +79,98 @@ pub use v1::block_import::is_major_importing; use std::net::SocketAddr; use http::tokio_core; +/// RPC HTTP Server instance +pub enum HttpServer { + /// Fast MiniHTTP variant + Mini(minihttp::Server), + /// Hyper variant + Hyper(http::Server), +} + +/// RPC HTTP Server error +#[derive(Debug)] +pub enum HttpServerError { + /// IO error + Io(::std::io::Error), + /// Other hyper error + Hyper(hyper::Error), +} + +impl From for HttpServerError { + fn from(e: http::Error) -> Self { + use self::HttpServerError::*; + match e { + http::Error::Io(io) => Io(io), + http::Error::Other(hyper) => Hyper(hyper), + } + } +} + +impl From for HttpServerError { + fn from(e: minihttp::Error) -> Self { + use self::HttpServerError::*; + match e { + minihttp::Error::Io(io) => Io(io), + } + } +} + +/// HTTP RPC server impl-independent metadata extractor +pub trait HttpMetaExtractor: Send + Sync + 'static { + /// Type of Metadata + type Metadata: jsonrpc_core::Metadata; + /// Extracts metadata from given params. + fn read_metadata(&self, origin: String, dapps_origin: Option) -> Self::Metadata; +} + +/// HTTP server implementation-specific settings. +pub enum HttpSettings { + /// Enable fast minihttp server with given number of threads. + Threads(usize), + /// Enable standard server with optional dapps middleware. + Dapps(Option), +} + /// Start http server asynchronously and returns result with `Server` handle on success or an error. -pub fn start_http( +pub fn start_http( addr: &SocketAddr, cors_domains: http::DomainsValidation, allowed_hosts: http::DomainsValidation, handler: H, remote: tokio_core::reactor::Remote, extractor: T, + settings: HttpSettings, ) -> Result where M: jsonrpc_core::Metadata, S: jsonrpc_core::Middleware, H: Into>, - T: HttpMetaExtractor, + T: HttpMetaExtractor, + R: RequestMiddleware, { - http::ServerBuilder::new(handler) - .event_loop_remote(remote) - .meta_extractor(extractor) - .cors(cors_domains.into()) - .allowed_hosts(allowed_hosts.into()) - .start_http(addr) + Ok(match settings { + HttpSettings::Dapps(middleware) => { + let mut builder = http::ServerBuilder::new(handler) + .event_loop_remote(remote) + .meta_extractor(metadata::HyperMetaExtractor::new(extractor)) + .cors(cors_domains.into()) + .allowed_hosts(allowed_hosts.into()); + + if let Some(dapps) = middleware { + builder = builder.request_middleware(dapps) + } + builder.start_http(addr) + .map(HttpServer::Hyper)? + }, + HttpSettings::Threads(threads) => { + minihttp::ServerBuilder::new(handler) + .threads(threads) + .meta_extractor(metadata::MiniMetaExtractor::new(extractor)) + .cors(cors_domains.into()) + .allowed_hosts(allowed_hosts.into()) + .start_http(addr) + .map(HttpServer::Mini)? + }, + }) } /// Start ipc server asynchronously and returns result with `Server` handle on success or an error. diff --git a/rpc/src/metadata.rs b/rpc/src/metadata.rs new file mode 100644 index 000000000..af3a5d183 --- /dev/null +++ b/rpc/src/metadata.rs @@ -0,0 +1,74 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use jsonrpc_core; +use http; +use hyper; +use minihttp; +use HttpMetaExtractor; + +pub struct HyperMetaExtractor { + extractor: T, +} + +impl HyperMetaExtractor { + pub fn new(extractor: T) -> Self { + HyperMetaExtractor { + extractor: extractor, + } + } +} + +impl http::MetaExtractor for HyperMetaExtractor where + T: HttpMetaExtractor, + M: jsonrpc_core::Metadata, +{ + fn read_metadata(&self, req: &hyper::server::Request) -> M { + let origin = req.headers().get::() + .map(|origin| format!("{}://{}", origin.scheme, origin.host)) + .unwrap_or_else(|| "unknown".into()); + let dapps_origin = req.headers().get_raw("x-parity-origin") + .and_then(|raw| raw.one()) + .map(|raw| String::from_utf8_lossy(raw).into_owned()); + self.extractor.read_metadata(origin, dapps_origin) + } +} + +pub struct MiniMetaExtractor { + extractor: T, +} + +impl MiniMetaExtractor { + pub fn new(extractor: T) -> Self { + MiniMetaExtractor { + extractor: extractor, + } + } +} + +impl minihttp::MetaExtractor for MiniMetaExtractor where + T: HttpMetaExtractor, + M: jsonrpc_core::Metadata, +{ + fn read_metadata(&self, req: &minihttp::Req) -> M { + let origin = req.header("origin") + .unwrap_or_else(|| "unknown") + .to_owned(); + let dapps_origin = req.header("x-parity-origin").map(|h| h.to_owned()); + + self.extractor.read_metadata(origin, dapps_origin) + } +} diff --git a/scripts/targets.sh b/scripts/targets.sh index 505875336..040485d85 100644 --- a/scripts/targets.sh +++ b/scripts/targets.sh @@ -5,7 +5,7 @@ export TARGETS=" -p ethash \ -p ethcore \ -p ethcore-bigint\ - -p ethcore-dapps \ + -p parity-dapps \ -p ethcore-rpc \ -p ethcore-signer \ -p ethcore-util \ From c0c06fdc53a912cb39f970b9086283aeee89b27b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 3 Apr 2017 12:13:51 +0300 Subject: [PATCH 6/6] Secretstore over network (#4974) * ECDKG protocol prototype * added test for enc/dec math * get rid of decryption_session * added licenses * fix after merge * get rid of unused serde dependency * doc * decryption session [without commutative enc] * failed_dec_session * fixed tests * added commen * added more decryption session tests * helper to localize an issue * more computations to localize error * decryption_session::SessionParams * added tests for EC math to localize problem * secretstore network transport * encryption_session_works_over_network * network errors processing * connecting to KeyServer * licenses * get rid of debug println-s * fixed secretstore args * encryption results are stored in KS database * decryption protocol works over network * enc/dec Session traits * fixing warnings * fix after merge * finally fixed -of-N-scheme * temporary commented test * 1-of-N works in math * scheme 1-of-N works * remove unnecessary unsafety * fixed grumbles * fix grumbles * lost files --- Cargo.lock | 11 + Cargo.toml | 1 + ethcrypto/src/lib.rs | 6 + ethkey/src/keypair.rs | 1 + parity/main.rs | 1 + parity/secretstore.rs | 32 +- secret_store/Cargo.toml | 10 + secret_store/src/http_listener.rs | 113 ++- secret_store/src/key_server.rs | 220 +++-- .../src/key_server_cluster/cluster.rs | 926 +++++++++++++++++- .../key_server_cluster/decryption_session.rs | 387 +++++--- .../key_server_cluster/encryption_session.rs | 666 +++++++++---- .../src/key_server_cluster/io/deadline.rs | 85 ++ .../src/key_server_cluster/io/handshake.rs | 320 ++++++ .../src/key_server_cluster/io/message.rs | 247 +++++ secret_store/src/key_server_cluster/io/mod.rs | 34 + .../src/key_server_cluster/io/read_header.rs | 44 + .../src/key_server_cluster/io/read_message.rs | 86 ++ .../src/key_server_cluster/io/read_payload.rs | 64 ++ .../io/shared_tcp_stream.rs | 60 ++ .../key_server_cluster/io/write_message.rs | 70 ++ secret_store/src/key_server_cluster/math.rs | 34 +- .../src/key_server_cluster/message.rs | 270 ++++- secret_store/src/key_server_cluster/mod.rs | 101 +- .../net/accept_connection.rs | 63 ++ .../src/key_server_cluster/net/connect.rs | 90 ++ .../src/key_server_cluster/net/connection.rs | 32 + .../src/key_server_cluster/net/mod.rs | 23 + secret_store/src/key_storage.rs | 125 ++- secret_store/src/lib.rs | 23 +- secret_store/src/serialization.rs | 260 +++++ secret_store/src/traits.rs | 2 + secret_store/src/types/all.rs | 61 +- 33 files changed, 3894 insertions(+), 574 deletions(-) create mode 100644 secret_store/src/key_server_cluster/io/deadline.rs create mode 100644 secret_store/src/key_server_cluster/io/handshake.rs create mode 100644 secret_store/src/key_server_cluster/io/message.rs create mode 100644 secret_store/src/key_server_cluster/io/mod.rs create mode 100644 secret_store/src/key_server_cluster/io/read_header.rs create mode 100644 secret_store/src/key_server_cluster/io/read_message.rs create mode 100644 secret_store/src/key_server_cluster/io/read_payload.rs create mode 100644 secret_store/src/key_server_cluster/io/shared_tcp_stream.rs create mode 100644 secret_store/src/key_server_cluster/io/write_message.rs create mode 100644 secret_store/src/key_server_cluster/net/accept_connection.rs create mode 100644 secret_store/src/key_server_cluster/net/connect.rs create mode 100644 secret_store/src/key_server_cluster/net/connection.rs create mode 100644 secret_store/src/key_server_cluster/net/mod.rs create mode 100644 secret_store/src/serialization.rs diff --git a/Cargo.lock b/Cargo.lock index 83e643900..aecf8960f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,6 +23,7 @@ dependencies = [ "ethcore-signer 1.7.0", "ethcore-stratum 1.7.0", "ethcore-util 1.7.0", + "ethkey 0.2.0", "ethsync 1.7.0", "evmbin 0.1.0", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -633,6 +634,7 @@ dependencies = [ name = "ethcore-secretstore" version = "1.0.0" dependencies = [ + "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-devtools 1.7.0", "ethcore-ipc 1.7.0", "ethcore-ipc-codegen 1.7.0", @@ -640,9 +642,18 @@ dependencies = [ "ethcore-util 1.7.0", "ethcrypto 0.1.0", "ethkey 0.2.0", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/Cargo.toml b/Cargo.toml index b82490e88..1d0e27a71 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,7 @@ ethcore-ipc-hypervisor = { path = "ipc/hypervisor" } ethcore-light = { path = "ethcore/light" } ethcore-logger = { path = "logger" } ethcore-stratum = { path = "stratum" } +ethkey = { path = "ethkey" } evmbin = { path = "evmbin" } rlp = { path = "util/rlp" } rpc-cli = { path = "rpc_cli" } diff --git a/ethcrypto/src/lib.rs b/ethcrypto/src/lib.rs index a4d426b54..9c1352087 100644 --- a/ethcrypto/src/lib.rs +++ b/ethcrypto/src/lib.rs @@ -78,6 +78,12 @@ impl fmt::Display for Error { } } +impl Into for Error { + fn into(self) -> String { + format!("{}", self) + } +} + impl From for Error { fn from(e: SecpError) -> Self { Error::Secp(e) diff --git a/ethkey/src/keypair.rs b/ethkey/src/keypair.rs index b25664cd7..f883c4738 100644 --- a/ethkey/src/keypair.rs +++ b/ethkey/src/keypair.rs @@ -27,6 +27,7 @@ pub fn public_to_address(public: &Public) -> Address { result } +#[derive(Clone)] /// secp256k1 key pair pub struct KeyPair { secret: Secret, diff --git a/parity/main.rs b/parity/main.rs index 4b6dc6dab..1063571a9 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -53,6 +53,7 @@ extern crate ethcore_logger; extern crate ethcore_rpc; extern crate ethcore_signer; extern crate ethcore_util as util; +extern crate ethkey; extern crate ethsync; extern crate parity_hash_fetch as hash_fetch; extern crate parity_ipfs_api; diff --git a/parity/secretstore.rs b/parity/secretstore.rs index 79a209504..13d6d28d2 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -53,6 +53,7 @@ mod server { #[cfg(feature="secretstore")] mod server { + use ethkey; use ethcore_secretstore; use super::{Configuration, Dependencies}; @@ -64,10 +65,35 @@ mod server { impl KeyServer { /// Create new key server pub fn new(conf: Configuration, _deps: Dependencies) -> Result { + let key_pairs = vec![ + ethkey::KeyPair::from_secret("6c26a76e9b31048d170873a791401c7e799a11f0cefc0171cc31a49800967509".parse().unwrap()).unwrap(), + ethkey::KeyPair::from_secret("7e94018b3731afdb3b4e6f4c3e179475640166da12e1d1b0c7d80729b1a5b452".parse().unwrap()).unwrap(), + ethkey::KeyPair::from_secret("5ab6ed2a52c33142380032c39a03a86b12eacb3fa4b53bc16d84f51318156f8c".parse().unwrap()).unwrap(), + ]; let conf = ethcore_secretstore::ServiceConfiguration { - listener_addr: conf.interface, - listener_port: conf.port, - data_path: conf.data_path, + listener_address: ethcore_secretstore::NodeAddress { + address: conf.interface.clone(), + port: conf.port, + }, + data_path: conf.data_path.clone(), + // TODO: this is test configuration. how it will be configured in production? + cluster_config: ethcore_secretstore::ClusterConfiguration { + threads: 4, + self_private: (***key_pairs[(conf.port - 8082) as usize].secret()).into(), + listener_address: ethcore_secretstore::NodeAddress { + address: conf.interface.clone(), + port: conf.port + 10, + }, + nodes: key_pairs.iter().enumerate().map(|(i, kp)| (kp.public().clone(), + ethcore_secretstore::NodeAddress { + address: conf.interface.clone(), + port: 8082 + 10 + (i as u16), + })).collect(), + allow_connecting_to_higher_nodes: true, + encryption_config: ethcore_secretstore::EncryptionConfiguration { + key_check_timeout_ms: 1000, + }, + } }; let key_server = ethcore_secretstore::start(conf) diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index eff7c1ef0..fba76804b 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -10,9 +10,19 @@ build = "build.rs" ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] +byteorder = "1.0" log = "0.3" parking_lot = "0.4" hyper = { version = "0.10", default-features = false } +serde = "0.9" +serde_json = "0.9" +serde_derive = "0.9" +futures = "0.1" +futures-cpupool = "0.1" +rustc-serialize = "0.3" +tokio-core = "0.1" +tokio-service = "0.1" +tokio-proto = "0.1" url = "1.0" ethcore-devtools = { path = "../devtools" } ethcore-util = { path = "../util" } diff --git a/secret_store/src/http_listener.rs b/secret_store/src/http_listener.rs index 92799d221..79fe71330 100644 --- a/secret_store/src/http_listener.rs +++ b/secret_store/src/http_listener.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::str::FromStr; use std::sync::Arc; use hyper::header; use hyper::uri::RequestUri; @@ -39,7 +38,9 @@ pub struct KeyServerHttpListener { enum Request { /// Invalid request Invalid, - /// Request encryption key of given document for given requestor + /// Generate encryption key. + GenerateDocumentKey(DocumentAddress, RequestSignature, usize), + /// Request encryption key of given document for given requestor. GetDocumentKey(DocumentAddress, RequestSignature), } @@ -63,9 +64,9 @@ impl KeyServerHttpListener where T: KeyServer + 'static { handler: shared_handler.clone(), }; - let listener_addr: &str = &format!("{}:{}", config.listener_addr, config.listener_port); - let http_server = HttpServer::http(&listener_addr).unwrap(); - let http_server = http_server.handle(handler).unwrap(); + let listener_addr: &str = &format!("{}:{}", config.listener_address.address, config.listener_address.port); + let http_server = HttpServer::http(&listener_addr).expect("cannot start HttpServer"); + let http_server = http_server.handle(handler).expect("cannot start HttpServer"); let listener = KeyServerHttpListener { _http_server: http_server, handler: shared_handler, @@ -75,6 +76,10 @@ impl KeyServerHttpListener where T: KeyServer + 'static { } impl KeyServer for KeyServerHttpListener where T: KeyServer + 'static { + fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result { + self.handler.key_server.generate_document_key(signature, document, threshold) + } + fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result { self.handler.key_server.document_key(signature, document) } @@ -82,95 +87,103 @@ impl KeyServer for KeyServerHttpListener where T: KeyServer + 'static { impl HttpHandler for KeyServerHttpHandler where T: KeyServer + 'static { fn handle(&self, req: HttpRequest, mut res: HttpResponse) { - if req.method != HttpMethod::Get { - warn!(target: "secretstore", "Ignoring {}-request {}", req.method, req.uri); - *res.status_mut() = HttpStatusCode::NotFound; - return; - } - if req.headers.has::() { warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method, req.uri); *res.status_mut() = HttpStatusCode::NotFound; return; } - match req.uri { - RequestUri::AbsolutePath(ref path) => match parse_request(&path) { - Request::GetDocumentKey(document, signature) => { - let document_key = self.handler.key_server.document_key(&signature, &document) + let req_method = req.method.clone(); + let req_uri = req.uri.clone(); + match &req_uri { + &RequestUri::AbsolutePath(ref path) => match parse_request(&req_method, &path) { + Request::GenerateDocumentKey(document, signature, threshold) => { + return_document_key(req, res, self.handler.key_server.generate_document_key(&signature, &document, threshold) .map_err(|err| { - warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req.uri, err); + warn!(target: "secretstore", "GenerateDocumentKey request {} has failed with: {}", req_uri, err); err - }); - match document_key { - Ok(document_key) => { - let document_key = document_key.to_hex().into_bytes(); - res.headers_mut().set(header::ContentType::plaintext()); - if let Err(err) = res.send(&document_key) { - // nothing to do, but log error - warn!(target: "secretstore", "GetDocumentKey request {} response has failed with: {}", req.uri, err); - } - }, - Err(Error::BadSignature) => *res.status_mut() = HttpStatusCode::BadRequest, - Err(Error::AccessDenied) => *res.status_mut() = HttpStatusCode::Forbidden, - Err(Error::DocumentNotFound) => *res.status_mut() = HttpStatusCode::NotFound, - Err(Error::Database(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, - Err(Error::Internal(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, - } + })); + }, + Request::GetDocumentKey(document, signature) => { + return_document_key(req, res, self.handler.key_server.document_key(&signature, &document) + .map_err(|err| { + warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req_uri, err); + err + })); }, Request::Invalid => { - warn!(target: "secretstore", "Ignoring invalid {}-request {}", req.method, req.uri); + warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); *res.status_mut() = HttpStatusCode::BadRequest; }, }, _ => { - warn!(target: "secretstore", "Ignoring invalid {}-request {}", req.method, req.uri); + warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); *res.status_mut() = HttpStatusCode::NotFound; }, }; } } -fn parse_request(uri_path: &str) -> Request { +fn return_document_key(req: HttpRequest, mut res: HttpResponse, document_key: Result) { + match document_key { + Ok(document_key) => { + let document_key = document_key.to_hex().into_bytes(); + res.headers_mut().set(header::ContentType::plaintext()); + if let Err(err) = res.send(&document_key) { + // nothing to do, but to log an error + warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err); + } + }, + Err(Error::BadSignature) => *res.status_mut() = HttpStatusCode::BadRequest, + Err(Error::AccessDenied) => *res.status_mut() = HttpStatusCode::Forbidden, + Err(Error::DocumentNotFound) => *res.status_mut() = HttpStatusCode::NotFound, + Err(Error::Database(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, + Err(Error::Internal(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, + } +} + +fn parse_request(method: &HttpMethod, uri_path: &str) -> Request { let uri_path = match percent_decode(uri_path.as_bytes()).decode_utf8() { Ok(path) => path, Err(_) => return Request::Invalid, }; let path: Vec = uri_path.trim_left_matches('/').split('/').map(Into::into).collect(); - if path.len() != 2 || path[0].is_empty() || path[1].is_empty() { + if path.len() < 2 || path[0].is_empty() || path[1].is_empty() { return Request::Invalid; } - let document = DocumentAddress::from_str(&path[0]); - let signature = RequestSignature::from_str(&path[1]); - match (document, signature) { - (Ok(document), Ok(signature)) => Request::GetDocumentKey(document, signature), + let args_len = path.len(); + let document = path[0].parse(); + let signature = path[1].parse(); + let threshold = (if args_len > 2 { &path[2] } else { "" }).parse(); + match (args_len, method, document, signature, threshold) { + (3, &HttpMethod::Post, Ok(document), Ok(signature), Ok(threshold)) => Request::GenerateDocumentKey(document, signature, threshold), + (2, &HttpMethod::Get, Ok(document), Ok(signature), _) => Request::GetDocumentKey(document, signature), _ => Request::Invalid, } } #[cfg(test)] mod tests { - use std::str::FromStr; - use super::super::RequestSignature; + use hyper::method::Method as HttpMethod; use super::{parse_request, Request}; #[test] fn parse_request_successful() { - assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), - RequestSignature::from_str("a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01").unwrap())); - assert_eq!(parse_request("/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), + "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); + assert_eq!(parse_request(&HttpMethod::Get, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), - RequestSignature::from_str("a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01").unwrap())); + "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); } #[test] fn parse_request_failed() { - assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001"), Request::Invalid); - assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001/"), Request::Invalid); - assert_eq!(parse_request("/a/b"), Request::Invalid); - assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/a/b"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002"), Request::Invalid); } } diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index 32ac48031..553b49bfe 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -14,42 +14,78 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::thread; +use std::sync::Arc; +use std::sync::mpsc; +use futures::{self, Future}; +use parking_lot::Mutex; +use tokio_core::reactor::Core; use ethcrypto; use ethkey; use super::acl_storage::AclStorage; use super::key_storage::KeyStorage; +use key_server_cluster::ClusterCore; use traits::KeyServer; -use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey}; +use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey, ClusterConfiguration}; +use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration}; /// Secret store key server implementation -pub struct KeyServerImpl { - acl_storage: T, - key_storage: U, +pub struct KeyServerImpl { + data: Arc>, } -impl KeyServerImpl where T: AclStorage, U: KeyStorage { +/// Secret store key server data. +pub struct KeyServerCore { + close: Option>, + handle: Option>, + cluster: Option>, +} + +impl KeyServerImpl { /// Create new key server instance - pub fn new(acl_storage: T, key_storage: U) -> Self { - KeyServerImpl { - acl_storage: acl_storage, - key_storage: key_storage, - } + pub fn new(config: &ClusterConfiguration, acl_storage: Arc, key_storage: Arc) -> Result { + Ok(KeyServerImpl { + data: Arc::new(Mutex::new(KeyServerCore::new(config, acl_storage, key_storage)?)), + }) + } + + #[cfg(test)] + /// Get cluster client reference. + pub fn cluster(&self) -> Arc { + self.data.lock().cluster.clone() + .expect("cluster can be None in test cfg only; test cfg is for correct tests; qed") } } -impl KeyServer for KeyServerImpl where T: AclStorage, U: KeyStorage { +impl KeyServer for KeyServerImpl { + fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result { + // recover requestor' public key from signature + let public = ethkey::recover(signature, document) + .map_err(|_| Error::BadSignature)?; + + // generate document key + let data = self.data.lock(); + let encryption_session = data.cluster.as_ref().expect("cluster can be None in test cfg only; test cfg is for correct tests; qed") + .new_encryption_session(document.clone(), threshold)?; + let document_key = encryption_session.wait()?; + + // encrypt document key with requestor public key + let document_key = ethcrypto::ecies::encrypt_single_message(&public, &document_key) + .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; + Ok(document_key) + } + fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result { // recover requestor' public key from signature let public = ethkey::recover(signature, document) .map_err(|_| Error::BadSignature)?; - // check that requestor has access to the document - if !self.acl_storage.check(&public, document)? { - return Err(Error::AccessDenied); - } + // decrypt document key + let data = self.data.lock(); + let decryption_session = data.cluster.as_ref().expect("cluster can be None in test cfg only; test cfg is for correct tests; qed") + .new_decryption_session(document.clone(), signature.clone())?; + let document_key = decryption_session.wait()?; - // read unencrypted document key - let document_key = self.key_storage.get(document)?; // encrypt document key with requestor public key let document_key = ethcrypto::ecies::encrypt_single_message(&public, &document_key) .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; @@ -57,68 +93,132 @@ impl KeyServer for KeyServerImpl where T: AclStorage, U: KeyStorage } } +impl KeyServerCore { + pub fn new(config: &ClusterConfiguration, acl_storage: Arc, key_storage: Arc) -> Result { + let config = NetClusterConfiguration { + threads: config.threads, + self_key_pair: ethkey::KeyPair::from_secret_slice(&config.self_private)?, + listen_address: (config.listener_address.address.clone(), config.listener_address.port), + nodes: config.nodes.iter() + .map(|(node_id, node_address)| (node_id.clone(), (node_address.address.clone(), node_address.port))) + .collect(), + allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, + encryption_config: config.encryption_config.clone(), + acl_storage: acl_storage, + key_storage: key_storage, + }; + + let (stop, stopped) = futures::oneshot(); + let (tx, rx) = mpsc::channel(); + let handle = thread::spawn(move || { + let mut el = match Core::new() { + Ok(el) => el, + Err(e) => { + tx.send(Err(Error::Internal(format!("error initializing event loop: {}", e)))).expect("Rx is blocking upper thread."); + return; + }, + }; + + let cluster = ClusterCore::new(el.handle(), config); + let cluster_client = cluster.and_then(|c| c.run().map(|_| c.client())); + tx.send(cluster_client.map_err(Into::into)).expect("Rx is blocking upper thread."); + let _ = el.run(futures::empty().select(stopped)); + }); + let cluster = rx.recv().map_err(|e| Error::Internal(format!("error initializing event loop: {}", e)))??; + + Ok(KeyServerCore { + close: Some(stop), + handle: Some(handle), + cluster: Some(cluster), + }) + } +} + +impl Drop for KeyServerCore { + fn drop(&mut self) { + self.close.take().map(|v| v.send(())); + self.handle.take().map(|h| h.join()); + } +} + #[cfg(test)] mod tests { - use std::str::FromStr; + use std::time; + use std::sync::Arc; use ethcrypto; - use ethkey::{self, Secret}; + use ethkey::{self, Random, Generator}; use acl_storage::DummyAclStorage; - use key_storage::KeyStorage; use key_storage::tests::DummyKeyStorage; - use super::super::{Error, RequestSignature, DocumentAddress}; + use types::all::{ClusterConfiguration, NodeAddress, EncryptionConfiguration, DocumentEncryptedKey, DocumentKey}; + use super::super::{RequestSignature, DocumentAddress}; use super::{KeyServer, KeyServerImpl}; const DOCUMENT1: &'static str = "0000000000000000000000000000000000000000000000000000000000000001"; - const DOCUMENT2: &'static str = "0000000000000000000000000000000000000000000000000000000000000002"; - const KEY1: &'static str = "key1"; const PRIVATE1: &'static str = "03055e18a8434dcc9061cc1b81c4ef84dc7cf4574d755e52cdcf0c8898b25b11"; - const PUBLIC2: &'static str = "dfe62f56bb05fbd85b485bac749f3410309e24b352bac082468ce151e9ddb94fa7b5b730027fe1c7c5f3d5927621d269f91aceb5caa3c7fe944677a22f88a318"; - const PRIVATE2: &'static str = "0eb3816f4f705fa0fd952fb27b71b8c0606f09f4743b5b65cbc375bd569632f2"; - - fn create_key_server() -> KeyServerImpl { - let acl_storage = DummyAclStorage::default(); - let key_storage = DummyKeyStorage::default(); - key_storage.insert(DOCUMENT1.into(), KEY1.into()).unwrap(); - acl_storage.prohibit(PUBLIC2.into(), DOCUMENT1.into()); - KeyServerImpl::new(acl_storage, key_storage) - } fn make_signature(secret: &str, document: &'static str) -> RequestSignature { - let secret = Secret::from_str(secret).unwrap(); + let secret = secret.parse().unwrap(); let document: DocumentAddress = document.into(); ethkey::sign(&secret, &document).unwrap() } - #[test] - fn document_key_succeeds() { - let key_server = create_key_server(); - let signature = make_signature(PRIVATE1, DOCUMENT1); - let document_key = key_server.document_key(&signature, &DOCUMENT1.into()).unwrap(); - let document_key = ethcrypto::ecies::decrypt_single_message(&Secret::from_str(PRIVATE1).unwrap(), &document_key); - assert_eq!(document_key, Ok(KEY1.into())); + fn decrypt_document_key(secret: &str, document_key: DocumentEncryptedKey) -> DocumentKey { + let secret = secret.parse().unwrap(); + ethcrypto::ecies::decrypt_single_message(&secret, &document_key).unwrap() } #[test] - fn document_key_fails_when_bad_signature() { - let key_server = create_key_server(); - let signature = RequestSignature::default(); - let document_key = key_server.document_key(&signature, &DOCUMENT1.into()); - assert_eq!(document_key, Err(Error::BadSignature)); - } + fn document_key_generation_and_retrievement_works_over_network() { + //::util::log::init_log(); - #[test] - fn document_key_fails_when_acl_check_fails() { - let key_server = create_key_server(); - let signature = make_signature(PRIVATE2, DOCUMENT1); - let document_key = key_server.document_key(&signature, &DOCUMENT1.into()); - assert_eq!(document_key, Err(Error::AccessDenied)); - } + let num_nodes = 3; + let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); + let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { + threads: 1, + self_private: (***key_pairs[i].secret()).into(), + listener_address: NodeAddress { + address: "127.0.0.1".into(), + port: 6060 + (i as u16), + }, + nodes: key_pairs.iter().enumerate().map(|(j, kp)| (kp.public().clone(), + NodeAddress { + address: "127.0.0.1".into(), + port: 6060 + (j as u16), + })).collect(), + allow_connecting_to_higher_nodes: false, + encryption_config: EncryptionConfiguration { + key_check_timeout_ms: 10, + }, + }).collect(); + let key_servers: Vec<_> = configs.into_iter().map(|cfg| + KeyServerImpl::new(&cfg, Arc::new(DummyAclStorage::default()), Arc::new(DummyKeyStorage::default())).unwrap() + ).collect(); - #[test] - fn document_key_fails_when_document_not_found() { - let key_server = create_key_server(); - let signature = make_signature(PRIVATE1, DOCUMENT2); - let document_key = key_server.document_key(&signature, &DOCUMENT2.into()); - assert_eq!(document_key, Err(Error::DocumentNotFound)); + // wait until connections are established + let start = time::Instant::now(); + loop { + if key_servers.iter().all(|ks| ks.cluster().cluster_state().connected.len() == num_nodes - 1) { + break; + } + if time::Instant::now() - start > time::Duration::from_millis(30000) { + panic!("connections are not established in 30000ms"); + } + } + + let test_cases = [0, 1, 2]; + for threshold in &test_cases { + // generate document key + // TODO: it is an error that we can regenerate key for the same DOCUMENT + let signature = make_signature(PRIVATE1, DOCUMENT1); + let generated_key = key_servers[0].generate_document_key(&signature, &DOCUMENT1.into(), *threshold).unwrap(); + let generated_key = decrypt_document_key(PRIVATE1, generated_key); + + // now let's try to retrieve key back + for key_server in key_servers.iter() { + let retrieved_key = key_server.document_key(&signature, &DOCUMENT1.into()).unwrap(); + let retrieved_key = decrypt_document_key(PRIVATE1, retrieved_key); + assert_eq!(retrieved_key, generated_key); + } + } } } diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 5f6c99808..388a79aef 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -14,11 +14,42 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use key_server_cluster::{Error, NodeId}; -use key_server_cluster::message::Message; +use std::io; +use std::time; +use std::sync::Arc; +use std::collections::{BTreeMap, BTreeSet, VecDeque}; +use std::collections::btree_map::Entry; +use std::net::{SocketAddr, IpAddr}; +use futures::{finished, failed, Future, Stream, BoxFuture}; +use futures_cpupool::CpuPool; +use parking_lot::{RwLock, Mutex}; +use tokio_core::io::IoFuture; +use tokio_core::reactor::{Handle, Remote, Timeout, Interval}; +use tokio_core::net::{TcpListener, TcpStream}; +use ethkey::{Secret, KeyPair, Signature, Random, Generator}; +use key_server_cluster::{Error, NodeId, SessionId, EncryptionConfiguration, AclStorage, KeyStorage}; +use key_server_cluster::message::{self, Message, ClusterMessage, EncryptionMessage, DecryptionMessage}; +use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl, DecryptionSessionId, + SessionParams as DecryptionSessionParams, Session as DecryptionSession}; +use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl, SessionState as EncryptionSessionState, + SessionParams as EncryptionSessionParams, Session as EncryptionSession}; +use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, read_encrypted_message, WriteMessage, write_encrypted_message}; +use key_server_cluster::net::{accept_connection as net_accept_connection, connect as net_connect, Connection as NetConnection}; + +pub type BoxedEmptyFuture = BoxFuture<(), ()>; + +/// Cluster interface for external clients. +pub trait ClusterClient: Send + Sync { + /// Get cluster state. + fn cluster_state(&self) -> ClusterState; + /// Start new encryption session. + fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result, Error>; + /// Start new decryption session. + fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature) -> Result, Error>; +} /// Cluster access for single encryption/decryption participant. -pub trait Cluster { +pub trait Cluster: Send + Sync { /// Broadcast message to all other nodes. fn broadcast(&self, message: Message) -> Result<(), Error>; /// Send message to given node. @@ -27,13 +58,841 @@ pub trait Cluster { fn blacklist(&self, node: &NodeId); } +#[derive(Clone)] +/// Cluster initialization parameters. +pub struct ClusterConfiguration { + /// Number of threads reserved by cluster. + pub threads: usize, + /// Allow connecting to 'higher' nodes. + pub allow_connecting_to_higher_nodes: bool, + /// KeyPair this node holds. + pub self_key_pair: KeyPair, + /// Interface to listen to. + pub listen_address: (String, u16), + /// Cluster nodes. + pub nodes: BTreeMap, + /// Encryption session configuration. + pub encryption_config: EncryptionConfiguration, + /// Reference to key storage + pub key_storage: Arc, + /// Reference to ACL storage + pub acl_storage: Arc, +} + +/// Cluster state. +pub struct ClusterState { + /// Nodes, to which connections are established. + pub connected: BTreeSet, +} + +/// Network cluster implementation. +pub struct ClusterCore { + /// Handle to the event loop. + handle: Handle, + /// Listen address. + listen_address: SocketAddr, + /// Cluster data. + data: Arc, +} + +/// Network cluster client interface implementation. +pub struct ClusterClientImpl { + /// Cluster data. + data: Arc, +} + +/// Network cluster view. It is a communication channel, required in single session. +pub struct ClusterView { + core: Arc>, +} + +/// Cross-thread shareable cluster data. +pub struct ClusterData { + /// Cluster configuration. + config: ClusterConfiguration, + /// Handle to the event loop. + handle: Remote, + /// Handle to the cpu thread pool. + pool: CpuPool, + /// KeyPair this node holds. + self_key_pair: KeyPair, + /// Connections data. + connections: ClusterConnections, + /// Active sessions data. + sessions: ClusterSessions, +} + +/// Connections that are forming the cluster. +pub struct ClusterConnections { + /// Self node id. + pub self_node_id: NodeId, + /// All known other key servers. + pub nodes: BTreeMap, + /// Active connections to key servers. + pub connections: RwLock>>, +} + +/// Active sessions on this cluster. +pub struct ClusterSessions { + /// Self node id. + pub self_node_id: NodeId, + /// Reference to key storage + pub key_storage: Arc, + /// Reference to ACL storage + pub acl_storage: Arc, + /// Active encryption sessions. + pub encryption_sessions: RwLock>, + /// Active decryption sessions. + pub decryption_sessions: RwLock>, +} + +/// Encryption session and its message queue. +pub struct QueuedEncryptionSession { + /// Encryption session. + pub session: Arc, + /// Messages queue. + pub queue: VecDeque<(NodeId, EncryptionMessage)>, +} + +/// Decryption session and its message queue. +pub struct QueuedDecryptionSession { + /// Decryption session. + pub session: Arc, + /// Messages queue. + pub queue: VecDeque<(NodeId, DecryptionMessage)>, +} + +/// Cluster view core. +struct ClusterViewCore { + /// Cluster reference. + cluster: Arc, + /// Subset of nodes, required for this session. + nodes: BTreeSet, +} + +/// Connection to single node. +pub struct Connection { + /// Node id. + node_id: NodeId, + /// Node address. + node_address: SocketAddr, + /// Is inbound connection? + is_inbound: bool, + /// Tcp stream. + stream: SharedTcpStream, + /// Connection key. + key: Secret, + /// Last message time. + last_message_time: Mutex, +} + +impl ClusterCore { + pub fn new(handle: Handle, config: ClusterConfiguration) -> Result, Error> { + let listen_address = make_socket_address(&config.listen_address.0, config.listen_address.1)?; + let connections = ClusterConnections::new(&config)?; + let sessions = ClusterSessions::new(&config); + let data = ClusterData::new(&handle, config, connections, sessions); + + Ok(Arc::new(ClusterCore { + handle: handle, + listen_address: listen_address, + data: data, + })) + } + + /// Create new client interface. + pub fn client(&self) -> Arc { + Arc::new(ClusterClientImpl::new(self.data.clone())) + } + + #[cfg(test)] + /// Get cluster configuration. + pub fn config(&self) -> &ClusterConfiguration { + &self.data.config + } + + #[cfg(test)] + /// Get connection to given node. + pub fn connection(&self, node: &NodeId) -> Option> { + self.data.connection(node) + } + + /// Run cluster + pub fn run(&self) -> Result<(), Error> { + // try to connect to every other peer + ClusterCore::connect_disconnected_nodes(self.data.clone()); + + // schedule maintain procedures + ClusterCore::schedule_maintain(&self.handle, self.data.clone()); + + // start listening for incoming connections + self.handle.spawn(ClusterCore::listen(&self.handle, self.data.clone(), self.listen_address.clone())?); + + Ok(()) + } + + /// Connect to peer. + fn connect(data: Arc, node_address: SocketAddr) { + data.handle.clone().spawn(move |handle| { + data.pool.clone().spawn(ClusterCore::connect_future(handle, data, node_address)) + }) + } + + /// Connect to socket using given context and handle. + fn connect_future(handle: &Handle, data: Arc, node_address: SocketAddr) -> BoxedEmptyFuture { + let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect(); + net_connect(&node_address, handle, data.self_key_pair.clone(), disconnected_nodes) + .then(move |result| ClusterCore::process_connection_result(data, false, result)) + .then(|_| finished(())) + .boxed() + } + + /// Start listening for incoming connections. + fn listen(handle: &Handle, data: Arc, listen_address: SocketAddr) -> Result { + Ok(TcpListener::bind(&listen_address, &handle)? + .incoming() + .and_then(move |(stream, node_address)| { + ClusterCore::accept_connection(data.clone(), stream, node_address); + Ok(()) + }) + .for_each(|_| Ok(())) + .then(|_| finished(())) + .boxed()) + } + + /// Accept connection. + fn accept_connection(data: Arc, stream: TcpStream, node_address: SocketAddr) { + data.handle.clone().spawn(move |handle| { + data.pool.clone().spawn(ClusterCore::accept_connection_future(handle, data, stream, node_address)) + }) + } + + /// Accept connection future. + fn accept_connection_future(handle: &Handle, data: Arc, stream: TcpStream, node_address: SocketAddr) -> BoxedEmptyFuture { + let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect(); + net_accept_connection(node_address, stream, handle, data.self_key_pair.clone(), disconnected_nodes) + .then(move |result| ClusterCore::process_connection_result(data, true, result)) + .then(|_| finished(())) + .boxed() + } + + /// Schedule mainatain procedures. + fn schedule_maintain(handle: &Handle, data: Arc) { + // TODO: per-session timeouts (node can respond to messages, but ignore sessions messages) + let (d1, d2, d3) = (data.clone(), data.clone(), data.clone()); + let interval: BoxedEmptyFuture = Interval::new(time::Duration::new(10, 0), handle) + .expect("failed to create interval") + .and_then(move |_| Ok(trace!(target: "secretstore_net", "{}: executing maintain procedures", d1.self_key_pair.public()))) + .and_then(move |_| Ok(ClusterCore::keep_alive(d2.clone()))) + .and_then(move |_| Ok(ClusterCore::connect_disconnected_nodes(d3.clone()))) + .for_each(|_| Ok(())) + .then(|_| finished(())) + .boxed(); + + data.spawn(interval); + } + + /// Called for every incomming mesage. + fn process_connection_messages(data: Arc, connection: Arc) -> IoFuture> { + connection + .read_message() + .then(move |result| + match result { + Ok((_, Ok(message))) => { + ClusterCore::process_connection_message(data.clone(), connection.clone(), message); + // continue serving connection + data.spawn(ClusterCore::process_connection_messages(data.clone(), connection)); + finished(Ok(())).boxed() + }, + Ok((_, Err(err))) => { + warn!(target: "secretstore_net", "{}: protocol error {} when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); + // continue serving connection + data.spawn(ClusterCore::process_connection_messages(data.clone(), connection)); + finished(Err(err)).boxed() + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: network error {} when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); + // close connection + data.connections.remove(connection.node_id(), connection.is_inbound()); + failed(err).boxed() + }, + } + ).boxed() + } + + /// Send keepalive messages to every othe node. + fn keep_alive(data: Arc) { + for connection in data.connections.active_connections() { + let last_message_diff = time::Instant::now() - connection.last_message_time(); + if last_message_diff > time::Duration::from_secs(60) { + data.connections.remove(connection.node_id(), connection.is_inbound()); + data.sessions.on_connection_timeout(connection.node_id()); + } + else if last_message_diff > time::Duration::from_secs(30) { + data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {})))); + } + } + } + + /// Try to connect to every disconnected node. + fn connect_disconnected_nodes(data: Arc) { + for (node_id, node_address) in data.connections.disconnected_nodes() { + if data.config.allow_connecting_to_higher_nodes || data.self_key_pair.public() < &node_id { + ClusterCore::connect(data.clone(), node_address); + } + } + } + + /// Process connection future result. + fn process_connection_result(data: Arc, is_inbound: bool, result: Result>, io::Error>) -> IoFuture> { + match result { + Ok(DeadlineStatus::Meet(Ok(connection))) => { + let connection = Connection::new(is_inbound, connection); + if data.connections.insert(connection.clone()) { + ClusterCore::process_connection_messages(data.clone(), connection) + } else { + finished(Ok(())).boxed() + } + }, + Ok(DeadlineStatus::Meet(Err(_))) => { + finished(Ok(())).boxed() + }, + Ok(DeadlineStatus::Timeout) => { + finished(Ok(())).boxed() + }, + Err(_) => { + // network error + finished(Ok(())).boxed() + }, + } + } + + /// Process single message from the connection. + fn process_connection_message(data: Arc, connection: Arc, message: Message) { + connection.set_last_message_time(time::Instant::now()); + trace!(target: "secretstore_net", "{}: processing message {} from {}", data.self_key_pair.public(), message, connection.node_id()); + match message { + Message::Encryption(message) => ClusterCore::process_encryption_message(data, connection, message), + Message::Decryption(message) => ClusterCore::process_decryption_message(data, connection, message), + Message::Cluster(message) => ClusterCore::process_cluster_message(data, connection, message), + } + } + + /// Process single encryption message from the connection. + fn process_encryption_message(data: Arc, connection: Arc, mut message: EncryptionMessage) { + let mut sender = connection.node_id().clone(); + let mut is_queued_message = false; + let session_id = message.session_id().clone(); + let key_check_timeout_ms = data.config.encryption_config.key_check_timeout_ms; + loop { + let result = match message { + EncryptionMessage::InitializeSession(ref message) => { + let mut connected_nodes = data.connections.connected_nodes(); + connected_nodes.insert(data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); + let session_id: SessionId = message.session.clone().into(); + data.sessions.new_encryption_session(sender.clone(), session_id.clone(), cluster) + .and_then(|s| s.on_initialize_session(sender.clone(), message)) + }, + EncryptionMessage::ConfirmInitialization(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_confirm_initialization(sender.clone(), message)), + EncryptionMessage::CompleteInitialization(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_complete_initialization(sender.clone(), message)), + EncryptionMessage::KeysDissemination(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| { + // TODO: move this logic to session (or session connector) + let is_in_key_check_state = s.state() == EncryptionSessionState::KeyCheck; + let result = s.on_keys_dissemination(sender.clone(), message); + if !is_in_key_check_state && s.state() == EncryptionSessionState::KeyCheck { + let session = s.clone(); + let d = data.clone(); + data.handle.spawn(move |handle| + Timeout::new(time::Duration::new(key_check_timeout_ms / 1000, 0), handle) + .expect("failed to create timeout") + .and_then(move |_| { + if let Err(error) = session.start_key_generation_phase() { + session.on_session_error(d.self_key_pair.public().clone(), &message::SessionError { + session: session.id().clone().into(), + error: error.into(), + }); + } + Ok(()) + }) + .then(|_| finished(())) + ); + } + + result + }), + EncryptionMessage::Complaint(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_complaint(sender.clone(), message)), + EncryptionMessage::ComplaintResponse(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_complaint_response(sender.clone(), message)), + EncryptionMessage::PublicKeyShare(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_public_key_share(sender.clone(), message)), + EncryptionMessage::SessionError(ref message) => { + if let Some(s) = data.sessions.encryption_session(&*message.session) { + data.sessions.remove_encryption_session(s.id()); + s.on_session_error(sender.clone(), message); + } + Ok(()) + }, + EncryptionMessage::SessionCompleted(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| { + let result = s.on_session_completed(sender.clone(), message); + if result.is_ok() && s.state() == EncryptionSessionState::Finished { + data.sessions.remove_encryption_session(s.id()); + } + + result + }), + }; + + match result { + Err(Error::TooEarlyForRequest) => { + data.sessions.enqueue_encryption_message(&session_id, sender, message, is_queued_message); + break; + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + if let Some(connection) = data.connections.get(&sender) { + data.spawn(connection.send_message(Message::Encryption(EncryptionMessage::SessionError(message::SessionError { + session: session_id.clone().into(), + error: format!("{:?}", err), + })))); + } + + if err != Error::InvalidSessionId { + data.sessions.remove_encryption_session(&session_id); + } + break; + }, + _ => { + match data.sessions.dequeue_encryption_message(&session_id) { + Some((msg_sender, msg)) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + None => break, + } + }, + } + } + } + + /// Process single decryption message from the connection. + fn process_decryption_message(data: Arc, connection: Arc, mut message: DecryptionMessage) { + let mut sender = connection.node_id().clone(); + let mut is_queued_message = false; + let session_id = message.session_id().clone(); + let sub_session_id = message.sub_session_id().clone(); + loop { + let result = match message { + DecryptionMessage::InitializeDecryptionSession(ref message) => { + let mut connected_nodes = data.connections.connected_nodes(); + connected_nodes.insert(data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); + data.sessions.new_decryption_session(sender.clone(), session_id.clone(), sub_session_id.clone(), cluster) + .and_then(|s| s.on_initialize_session(sender.clone(), message)) + }, + DecryptionMessage::ConfirmDecryptionInitialization(ref message) => data.sessions.decryption_session(&*message.session, &*message.sub_session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_confirm_initialization(sender.clone(), message)), + DecryptionMessage::RequestPartialDecryption(ref message) => data.sessions.decryption_session(&*message.session, &*message.sub_session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_partial_decryption_requested(sender.clone(), message)), + DecryptionMessage::PartialDecryption(ref message) => data.sessions.decryption_session(&*message.session, &*message.sub_session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_partial_decryption(sender.clone(), message)), + DecryptionMessage::DecryptionSessionError(ref message) => { + if let Some(s) = data.sessions.decryption_session(&*message.session, &*message.sub_session) { + data.sessions.remove_decryption_session(&session_id, &sub_session_id); + s.on_session_error(sender.clone(), message); + } + Ok(()) + }, + }; + + match result { + Err(Error::TooEarlyForRequest) => { + data.sessions.enqueue_decryption_message(&session_id, &sub_session_id, sender, message, is_queued_message); + break; + }, + Err(err) => { + if let Some(connection) = data.connections.get(&sender) { + data.spawn(connection.send_message(Message::Decryption(DecryptionMessage::DecryptionSessionError(message::DecryptionSessionError { + session: session_id.clone().into(), + sub_session: sub_session_id.clone().into(), + error: format!("{:?}", err), + })))); + } + + if err != Error::InvalidSessionId { + data.sessions.remove_decryption_session(&session_id, &sub_session_id); + } + break; + }, + _ => { + match data.sessions.dequeue_decryption_message(&session_id, &sub_session_id) { + Some((msg_sender, msg)) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + None => break, + } + }, + } + } + } + + /// Process single cluster message from the connection. + fn process_cluster_message(data: Arc, connection: Arc, message: ClusterMessage) { + match message { + ClusterMessage::KeepAlive(_) => data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse {})))), + ClusterMessage::KeepAliveResponse(_) => (), + _ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", data.self_key_pair.public(), message, connection.node_id(), connection.node_address()), + } + } +} + +impl ClusterConnections { + pub fn new(config: &ClusterConfiguration) -> Result { + let mut connections = ClusterConnections { + self_node_id: config.self_key_pair.public().clone(), + nodes: BTreeMap::new(), + connections: RwLock::new(BTreeMap::new()), + }; + + for (node_id, &(ref node_addr, node_port)) in config.nodes.iter().filter(|&(node_id, _)| node_id != config.self_key_pair.public()) { + let socket_address = make_socket_address(&node_addr, node_port)?; + connections.nodes.insert(node_id.clone(), socket_address); + } + + Ok(connections) + } + + pub fn cluster_state(&self) -> ClusterState { + ClusterState { + connected: self.connections.read().keys().cloned().collect(), + } + } + + pub fn get(&self, node: &NodeId) -> Option> { + self.connections.read().get(node).cloned() + } + + pub fn insert(&self, connection: Arc) -> bool { + let mut connections = self.connections.write(); + if connections.contains_key(connection.node_id()) { + // we have already connected to the same node + // the agreement is that node with lower id must establish connection to node with higher id + if (&self.self_node_id < connection.node_id() && connection.is_inbound()) + || (&self.self_node_id > connection.node_id() && !connection.is_inbound()) { + return false; + } + } + trace!(target: "secretstore_net", "{}: inserting connection to {} at {}", self.self_node_id, connection.node_id(), connection.node_address()); + connections.insert(connection.node_id().clone(), connection); + true + } + + pub fn remove(&self, node: &NodeId, is_inbound: bool) { + let mut connections = self.connections.write(); + if let Entry::Occupied(entry) = connections.entry(node.clone()) { + if entry.get().is_inbound() != is_inbound { + return; + } + + trace!(target: "secretstore_net", "{}: removing connection to {} at {}", self.self_node_id, entry.get().node_id(), entry.get().node_address()); + entry.remove_entry(); + } + } + + pub fn connected_nodes(&self) -> BTreeSet { + self.connections.read().keys().cloned().collect() + } + + pub fn active_connections(&self)-> Vec> { + self.connections.read().values().cloned().collect() + } + + pub fn disconnected_nodes(&self) -> BTreeMap { + let connections = self.connections.read(); + self.nodes.iter() + .filter(|&(node_id, _)| !connections.contains_key(node_id)) + .map(|(node_id, node_address)| (node_id.clone(), node_address.clone())) + .collect() + } +} + +impl ClusterSessions { + pub fn new(config: &ClusterConfiguration) -> Self { + ClusterSessions { + self_node_id: config.self_key_pair.public().clone(), + acl_storage: config.acl_storage.clone(), + key_storage: config.key_storage.clone(), + encryption_sessions: RwLock::new(BTreeMap::new()), + decryption_sessions: RwLock::new(BTreeMap::new()), + } + } + + pub fn new_encryption_session(&self, _master: NodeId, session_id: SessionId, cluster: Arc) -> Result, Error> { + let mut encryption_sessions = self.encryption_sessions.write(); + if encryption_sessions.contains_key(&session_id) { + return Err(Error::DuplicateSessionId); + } + + let session = Arc::new(EncryptionSessionImpl::new(EncryptionSessionParams { + id: session_id.clone(), + self_node_id: self.self_node_id.clone(), + key_storage: self.key_storage.clone(), + cluster: cluster, + })); + let encryption_session = QueuedEncryptionSession { + session: session.clone(), + queue: VecDeque::new() + }; + encryption_sessions.insert(session_id, encryption_session); + Ok(session) + } + + pub fn remove_encryption_session(&self, session_id: &SessionId) { + self.encryption_sessions.write().remove(session_id); + } + + pub fn encryption_session(&self, session_id: &SessionId) -> Option> { + self.encryption_sessions.read().get(session_id).map(|s| s.session.clone()) + } + + pub fn enqueue_encryption_message(&self, session_id: &SessionId, sender: NodeId, message: EncryptionMessage, is_queued_message: bool) { + self.encryption_sessions.write().get_mut(session_id) + .map(|session| if is_queued_message { session.queue.push_front((sender, message)) } + else { session.queue.push_back((sender, message)) }); + } + + pub fn dequeue_encryption_message(&self, session_id: &SessionId) -> Option<(NodeId, EncryptionMessage)> { + self.encryption_sessions.write().get_mut(session_id) + .and_then(|session| session.queue.pop_front()) + } + + pub fn new_decryption_session(&self, _master: NodeId, session_id: SessionId, sub_session_id: Secret, cluster: Arc) -> Result, Error> { + let mut decryption_sessions = self.decryption_sessions.write(); + let session_id = DecryptionSessionId::new(session_id, sub_session_id); + if decryption_sessions.contains_key(&session_id) { + return Err(Error::DuplicateSessionId); + } + + let session = Arc::new(DecryptionSessionImpl::new(DecryptionSessionParams { + id: session_id.id.clone(), + access_key: session_id.access_key.clone(), + self_node_id: self.self_node_id.clone(), + encrypted_data: self.key_storage.get(&session_id.id).map_err(|e| Error::KeyStorage(e.into()))?, + acl_storage: self.acl_storage.clone(), + cluster: cluster, + })?); + let decryption_session = QueuedDecryptionSession { + session: session.clone(), + queue: VecDeque::new() + }; + decryption_sessions.insert(session_id, decryption_session); + Ok(session) + } + + pub fn remove_decryption_session(&self, session_id: &SessionId, sub_session_id: &Secret) { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.write().remove(&session_id); + } + + pub fn decryption_session(&self, session_id: &SessionId, sub_session_id: &Secret) -> Option> { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.read().get(&session_id).map(|s| s.session.clone()) + } + + pub fn enqueue_decryption_message(&self, session_id: &SessionId, sub_session_id: &Secret, sender: NodeId, message: DecryptionMessage, is_queued_message: bool) { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.write().get_mut(&session_id) + .map(|session| if is_queued_message { session.queue.push_front((sender, message)) } + else { session.queue.push_back((sender, message)) }); + } + + pub fn dequeue_decryption_message(&self, session_id: &SessionId, sub_session_id: &Secret) -> Option<(NodeId, DecryptionMessage)> { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.write().get_mut(&session_id) + .and_then(|session| session.queue.pop_front()) + } + + pub fn on_connection_timeout(&self, node_id: &NodeId) { + for encryption_session in self.encryption_sessions.read().values() { + encryption_session.session.on_session_timeout(node_id); + } + for decryption_session in self.decryption_sessions.read().values() { + decryption_session.session.on_session_timeout(node_id); + } + } +} + +impl ClusterData { + pub fn new(handle: &Handle, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc { + Arc::new(ClusterData { + handle: handle.remote().clone(), + pool: CpuPool::new(config.threads), + self_key_pair: config.self_key_pair.clone(), + connections: connections, + sessions: sessions, + config: config, + }) + } + + /// Get connection to given node. + pub fn connection(&self, node: &NodeId) -> Option> { + self.connections.get(node) + } + + /// Spawns a future using thread pool and schedules execution of it with event loop handle. + pub fn spawn(&self, f: F) where F: Future + Send + 'static, F::Item: Send + 'static, F::Error: Send + 'static { + let pool_work = self.pool.spawn(f); + self.handle.spawn(move |_handle| { + pool_work.then(|_| finished(())) + }) + } +} + +impl Connection { + pub fn new(is_inbound: bool, connection: NetConnection) -> Arc { + Arc::new(Connection { + node_id: connection.node_id, + node_address: connection.address, + is_inbound: is_inbound, + stream: connection.stream, + key: connection.key, + last_message_time: Mutex::new(time::Instant::now()), + }) + } + + pub fn is_inbound(&self) -> bool { + self.is_inbound + } + + pub fn node_id(&self) -> &NodeId { + &self.node_id + } + + pub fn last_message_time(&self) -> time::Instant { + *self.last_message_time.lock() + } + + pub fn set_last_message_time(&self, last_message_time: time::Instant) { + *self.last_message_time.lock() = last_message_time; + } + + pub fn node_address(&self) -> &SocketAddr { + &self.node_address + } + + pub fn send_message(&self, message: Message) -> WriteMessage { + write_encrypted_message(self.stream.clone(), &self.key, message) + } + + pub fn read_message(&self) -> ReadMessage { + read_encrypted_message(self.stream.clone(), self.key.clone()) + } +} + +impl ClusterView { + pub fn new(cluster: Arc, nodes: BTreeSet) -> Self { + ClusterView { + core: Arc::new(Mutex::new(ClusterViewCore { + cluster: cluster, + nodes: nodes, + })), + } + } +} + +impl Cluster for ClusterView { + fn broadcast(&self, message: Message) -> Result<(), Error> { + let core = self.core.lock(); + for node in core.nodes.iter().filter(|n| *n != core.cluster.self_key_pair.public()) { + let connection = core.cluster.connection(node).ok_or(Error::NodeDisconnected)?; + core.cluster.spawn(connection.send_message(message.clone())) + } + Ok(()) + } + + fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { + let core = self.core.lock(); + let connection = core.cluster.connection(to).ok_or(Error::NodeDisconnected)?; + core.cluster.spawn(connection.send_message(message)); + Ok(()) + } + + fn blacklist(&self, _node: &NodeId) { + // TODO: unimplemented!() + } +} + +impl ClusterClientImpl { + pub fn new(data: Arc) -> Self { + ClusterClientImpl { + data: data, + } + } +} + +impl ClusterClient for ClusterClientImpl { + fn cluster_state(&self) -> ClusterState { + self.data.connections.cluster_state() + } + + fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result, Error> { + let mut connected_nodes = self.data.connections.connected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); + let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id, cluster)?; + session.initialize(threshold, connected_nodes)?; + Ok(session) + } + + fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature) -> Result, Error> { + let mut connected_nodes = self.data.connections.connected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); + + let access_key = Random.generate()?.secret().clone(); + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); + let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key, cluster)?; + session.initialize(requestor_signature)?; + Ok(session) + } +} + +fn make_socket_address(address: &str, port: u16) -> Result { + let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?; + Ok(SocketAddr::new(ip_address, port)) +} + #[cfg(test)] pub mod tests { + use std::sync::Arc; + use std::time; use std::collections::VecDeque; use parking_lot::Mutex; - use key_server_cluster::{NodeId, Error}; + use tokio_core::reactor::Core; + use ethkey::{Random, Generator}; + use key_server_cluster::{NodeId, Error, EncryptionConfiguration, DummyAclStorage, DummyKeyStorage}; use key_server_cluster::message::Message; - use key_server_cluster::cluster::Cluster; + use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration}; #[derive(Debug)] pub struct DummyCluster { @@ -87,4 +946,61 @@ pub mod tests { fn blacklist(&self, _node: &NodeId) { } } + + pub fn loop_until(core: &mut Core, timeout: time::Duration, predicate: F) where F: Fn() -> bool { + let start = time::Instant::now(); + loop { + core.turn(Some(time::Duration::from_millis(1))); + if predicate() { + break; + } + + if time::Instant::now() - start > timeout { + panic!("no result in {:?}", timeout); + } + } + } + + pub fn all_connections_established(cluster: &Arc) -> bool { + cluster.config().nodes.keys() + .filter(|p| *p != cluster.config().self_key_pair.public()) + .all(|p| cluster.connection(p).is_some()) + } + + pub fn make_clusters(core: &Core, ports_begin: u16, num_nodes: usize) -> Vec> { + let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); + let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { + threads: 1, + self_key_pair: key_pairs[i].clone(), + listen_address: ("127.0.0.1".to_owned(), ports_begin + i as u16), + nodes: key_pairs.iter().enumerate() + .map(|(j, kp)| (kp.public().clone(), ("127.0.0.1".into(), ports_begin + j as u16))) + .collect(), + allow_connecting_to_higher_nodes: false, + encryption_config: EncryptionConfiguration { + key_check_timeout_ms: 10, + }, + key_storage: Arc::new(DummyKeyStorage::default()), + acl_storage: Arc::new(DummyAclStorage::default()), + }).collect(); + let clusters: Vec<_> = cluster_params.into_iter().enumerate() + .map(|(_, params)| ClusterCore::new(core.handle(), params).unwrap()) + .collect(); + + clusters + } + + pub fn run_clusters(clusters: &[Arc]) { + for cluster in clusters { + cluster.run().unwrap(); + } + } + + #[test] + fn cluster_connects_to_other_nodes() { + let mut core = Core::new().unwrap(); + let clusters = make_clusters(&core, 6010, 3); + run_clusters(&clusters); + loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established)); + } } diff --git a/secret_store/src/key_server_cluster/decryption_session.rs b/secret_store/src/key_server_cluster/decryption_session.rs index d4160851e..71d8ad26f 100644 --- a/secret_store/src/key_server_cluster/decryption_session.rs +++ b/secret_store/src/key_server_cluster/decryption_session.rs @@ -14,15 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::cmp::{Ord, PartialOrd, Ordering}; use std::collections::{BTreeSet, BTreeMap}; use std::sync::Arc; -use parking_lot::Mutex; +use parking_lot::{Mutex, Condvar}; use ethkey::{self, Secret, Public, Signature}; -use key_server_cluster::{Error, AclStorage, EncryptedData, NodeId, SessionId}; +use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId}; use key_server_cluster::cluster::Cluster; use key_server_cluster::math; -use key_server_cluster::message::{Message, InitializeDecryptionSession, ConfirmDecryptionInitialization, - RequestPartialDecryption, PartialDecryption}; +use key_server_cluster::message::{Message, DecryptionMessage, InitializeDecryptionSession, ConfirmDecryptionInitialization, + RequestPartialDecryption, PartialDecryption, DecryptionSessionError}; + +/// Decryption session API. +pub trait Session: Send + Sync + 'static { + /// Wait until session is completed. Returns distributely restored secret key. + fn wait(&self) -> Result; +} /// Distributed decryption session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: @@ -32,7 +39,7 @@ use key_server_cluster::message::{Message, InitializeDecryptionSession, ConfirmD /// 2) ACL check: all nodes which have received the request are querying ACL-contract to check if requestor has access to the document /// 3) partial decryption: every node which has succussfully checked access for the requestor do a partial decryption /// 4) decryption: master node receives all partial decryptions of the secret and restores the secret -pub struct Session { +pub struct SessionImpl { /// Encryption session id. id: SessionId, /// Decryption session access key. @@ -40,25 +47,36 @@ pub struct Session { /// Public identifier of this node. self_node_id: NodeId, /// Encrypted data. - encrypted_data: EncryptedData, + encrypted_data: DocumentKeyShare, /// ACL storate to check access to the resource. acl_storage: Arc, /// Cluster which allows this node to send messages to other nodes in the cluster. cluster: Arc, + /// SessionImpl completion condvar. + completed: Condvar, /// Mutable session data. data: Mutex, } -/// Session creation parameters -pub struct SessionParams { - /// Session identifier. +/// Decryption session Id. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DecryptionSessionId { + /// Encryption session id. pub id: SessionId, - /// Session access key. + /// Decryption session access key. + pub access_key: Secret, +} + +/// SessionImpl creation parameters +pub struct SessionParams { + /// SessionImpl identifier. + pub id: SessionId, + /// SessionImpl access key. pub access_key: Secret, /// Id of node, on which this session is running. pub self_node_id: Public, - /// Encrypted data (result of running encryption_session::Session). - pub encrypted_data: EncryptedData, + /// Encrypted data (result of running encryption_session::SessionImpl). + pub encrypted_data: DocumentKeyShare, /// ACL storage. pub acl_storage: Arc, /// Cluster @@ -91,16 +109,11 @@ struct SessionData { /// === Values, filled during final decryption === /// Decrypted secret - decrypted_secret: Option, -} - -#[derive(Debug)] -struct NodeData { - /// Node-generated shadow point. - shadow_point: Option, + decrypted_secret: Option>, } #[derive(Debug, Clone, PartialEq)] +/// Decryption session data. pub enum SessionState { /// Every node starts in this state. WaitingForInitialization, @@ -116,18 +129,19 @@ pub enum SessionState { Failed, } -impl Session { +impl SessionImpl { /// Create new decryption session. pub fn new(params: SessionParams) -> Result { check_encrypted_data(¶ms.self_node_id, ¶ms.encrypted_data)?; - Ok(Session { + Ok(SessionImpl { id: params.id, access_key: params.access_key, self_node_id: params.self_node_id, encrypted_data: params.encrypted_data, acl_storage: params.acl_storage, cluster: params.cluster, + completed: Condvar::new(), data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, master: None, @@ -146,19 +160,22 @@ impl Session { &self.self_node_id } + #[cfg(test)] /// Get this session access key. pub fn access_key(&self) -> &Secret { &self.access_key } + #[cfg(test)] /// Get current session state. pub fn state(&self) -> SessionState { self.data.lock().state.clone() } + #[cfg(test)] /// Get decrypted secret pub fn decrypted_secret(&self) -> Option { - self.data.lock().decrypted_secret.clone() + self.data.lock().decrypted_secret.clone().and_then(|r| r.ok()) } /// Initialize decryption session. @@ -188,15 +205,20 @@ impl Session { // not enough nodes => pass initialization message to all other nodes SessionState::WaitingForInitializationConfirm => { for node in self.encrypted_data.id_numbers.keys().filter(|n| *n != self.node()) { - self.cluster.send(node, Message::InitializeDecryptionSession(InitializeDecryptionSession { - session: self.id.clone(), - sub_session: self.access_key.clone(), - requestor_signature: requestor_signature.clone(), - }))?; + self.cluster.send(node, Message::Decryption(DecryptionMessage::InitializeDecryptionSession(InitializeDecryptionSession { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + requestor_signature: requestor_signature.clone().into(), + })))?; } }, // we can decrypt data on our own - SessionState::WaitingForPartialDecryption => unimplemented!(), + SessionState::WaitingForPartialDecryption => { + data.confirmed_nodes.insert(self.node().clone()); + SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data)?; + SessionImpl::do_decryption(self.access_key.clone(), &self.encrypted_data, &mut *data)?; + self.completed.notify_all(); + }, // we can not decrypt data SessionState::Failed => (), // cannot reach other states @@ -207,9 +229,9 @@ impl Session { } /// When session initialization message is received. - pub fn on_initialize_session(&self, sender: NodeId, message: InitializeDecryptionSession) -> Result<(), Error> { - debug_assert!(self.id == message.session); - debug_assert!(self.access_key == message.sub_session); + pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeDecryptionSession) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(self.access_key == *message.sub_session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -230,17 +252,17 @@ impl Session { // respond to master node data.master = Some(sender.clone()); - self.cluster.send(&sender, Message::ConfirmDecryptionInitialization(ConfirmDecryptionInitialization { - session: self.id.clone(), - sub_session: self.access_key.clone(), + self.cluster.send(&sender, Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(ConfirmDecryptionInitialization { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), is_confirmed: is_requestor_allowed_to_read, - })) + }))) } /// When session initialization confirmation message is reeived. - pub fn on_confirm_initialization(&self, sender: NodeId, message: ConfirmDecryptionInitialization) -> Result<(), Error> { - debug_assert!(self.id == message.session); - debug_assert!(self.access_key == message.sub_session); + pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmDecryptionInitialization) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(self.access_key == *message.sub_session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -260,26 +282,8 @@ impl Session { // we do not yet have enough nodes for decryption SessionState::WaitingForInitializationConfirm => Ok(()), // we have enough nodes for decryption - SessionState::WaitingForPartialDecryption => { - let confirmed_nodes: BTreeSet<_> = data.confirmed_nodes.clone(); - for node in data.confirmed_nodes.iter().filter(|n| n != &self.node()) { - self.cluster.send(node, Message::RequestPartialDecryption(RequestPartialDecryption { - session: self.id.clone(), - sub_session: self.access_key.clone(), - nodes: confirmed_nodes.clone(), - }))?; - } - - assert!(data.confirmed_nodes.remove(self.node())); - - let shadow_point = { - let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryption follows initialization; qed"); - do_partial_decryption(self.node(), &requestor, &data.confirmed_nodes, &self.access_key, &self.encrypted_data)? - }; - data.shadow_points.insert(self.node().clone(), shadow_point); - - Ok(()) - }, + SessionState::WaitingForPartialDecryption => + SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data), // we can not have enough nodes for decryption SessionState::Failed => Ok(()), // cannot reach other states @@ -288,9 +292,9 @@ impl Session { } /// When partial decryption is requested. - pub fn on_partial_decryption_requested(&self, sender: NodeId, message: RequestPartialDecryption) -> Result<(), Error> { - debug_assert!(self.id == message.session); - debug_assert!(self.access_key == message.sub_session); + pub fn on_partial_decryption_requested(&self, sender: NodeId, message: &RequestPartialDecryption) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(self.access_key == *message.sub_session); debug_assert!(&sender != self.node()); // check message @@ -311,13 +315,13 @@ impl Session { // calculate shadow point let shadow_point = { let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryptionRequest follows initialization; qed"); - do_partial_decryption(self.node(), &requestor, &message.nodes, &self.access_key, &self.encrypted_data)? + do_partial_decryption(self.node(), &requestor, &message.nodes.iter().cloned().map(Into::into).collect(), &self.access_key, &self.encrypted_data)? }; - self.cluster.send(&sender, Message::PartialDecryption(PartialDecryption { - session: self.id.clone(), - sub_session: self.access_key.clone(), - shadow_point: shadow_point, - }))?; + self.cluster.send(&sender, Message::Decryption(DecryptionMessage::PartialDecryption(PartialDecryption { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + shadow_point: shadow_point.into(), + })))?; // update sate data.state = SessionState::Finished; @@ -326,9 +330,9 @@ impl Session { } /// When partial decryption is received. - pub fn on_partial_decryption(&self, sender: NodeId, message: PartialDecryption) -> Result<(), Error> { - debug_assert!(self.id == message.session); - debug_assert!(self.access_key == message.sub_session); + pub fn on_partial_decryption(&self, sender: NodeId, message: &PartialDecryption) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(self.access_key == *message.sub_session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -341,24 +345,113 @@ impl Session { if !data.confirmed_nodes.remove(&sender) { return Err(Error::InvalidStateForRequest); } - data.shadow_points.insert(sender, message.shadow_point); + data.shadow_points.insert(sender, message.shadow_point.clone().into()); // check if we have enough shadow points to decrypt the secret if data.shadow_points.len() != self.encrypted_data.threshold + 1 { return Ok(()); } + SessionImpl::do_decryption(self.access_key.clone(), &self.encrypted_data, &mut *data)?; + self.completed.notify_all(); + + Ok(()) + } + + /// When error has occured on another node. + pub fn on_session_error(&self, sender: NodeId, message: &DecryptionSessionError) { + warn!("{}: decryption session error: {:?} from {}", self.node(), message, sender); + let mut data = self.data.lock(); + data.state = SessionState::Failed; + data.decrypted_secret = Some(Err(Error::Io(message.error.clone()))); + self.completed.notify_all(); + } + + /// When session timeout has occured. + pub fn on_session_timeout(&self, _node: &NodeId) { + warn!("{}: decryption session timeout", self.node()); + let mut data = self.data.lock(); + // TODO: check that node is a part of decryption process + data.state = SessionState::Failed; + data.decrypted_secret = Some(Err(Error::Io("session expired".into()))); + self.completed.notify_all(); + } + + fn start_waiting_for_partial_decryption(self_node_id: NodeId, session_id: SessionId, access_key: Secret, cluster: &Arc, encrypted_data: &DocumentKeyShare, data: &mut SessionData) -> Result<(), Error> { + let confirmed_nodes: BTreeSet<_> = data.confirmed_nodes.clone(); + for node in data.confirmed_nodes.iter().filter(|n| n != &&self_node_id) { + cluster.send(node, Message::Decryption(DecryptionMessage::RequestPartialDecryption(RequestPartialDecryption { + session: session_id.clone().into(), + sub_session: access_key.clone().into(), + nodes: confirmed_nodes.iter().cloned().map(Into::into).collect(), + })))?; + } + + assert!(data.confirmed_nodes.remove(&self_node_id)); + + let shadow_point = { + let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryption follows initialization; qed"); + do_partial_decryption(&self_node_id, &requestor, &data.confirmed_nodes, &access_key, &encrypted_data)? + }; + data.shadow_points.insert(self_node_id.clone(), shadow_point); + + Ok(()) + } + + fn do_decryption(access_key: Secret, encrypted_data: &DocumentKeyShare, data: &mut SessionData) -> Result<(), Error> { // decrypt the secret using shadow points let joint_shadow_point = math::compute_joint_shadow_point(data.shadow_points.values())?; - let decrypted_secret = math::decrypt_with_joint_shadow(&self.access_key, &self.encrypted_data.encrypted_point, &joint_shadow_point)?; - data.decrypted_secret = Some(decrypted_secret); + let decrypted_secret = math::decrypt_with_joint_shadow(encrypted_data.threshold, &access_key, &encrypted_data.encrypted_point, &joint_shadow_point)?; + data.decrypted_secret = Some(Ok(decrypted_secret)); + + // switch to completed state data.state = SessionState::Finished; Ok(()) } } -fn check_encrypted_data(self_node_id: &Public, encrypted_data: &EncryptedData) -> Result<(), Error> { +impl Session for SessionImpl { + fn wait(&self) -> Result { + let mut data = self.data.lock(); + if !data.decrypted_secret.is_some() { + self.completed.wait(&mut data); + } + + data.decrypted_secret.as_ref() + .expect("checked above or waited for completed; completed is only signaled when decrypted_secret.is_some(); qed") + .clone() + } +} + +impl DecryptionSessionId { + /// Create new decryption session Id. + pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self { + DecryptionSessionId { + id: session_id, + access_key: sub_session_id, + } + } +} + +impl PartialOrd for DecryptionSessionId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + + +impl Ord for DecryptionSessionId { + fn cmp(&self, other: &Self) -> Ordering { + match self.id.cmp(&other.id) { + Ordering::Equal => self.access_key.cmp(&other.access_key), + r @ _ => r, + } + } +} + + +fn check_encrypted_data(self_node_id: &Public, encrypted_data: &DocumentKeyShare) -> Result<(), Error> { use key_server_cluster::encryption_session::{check_cluster_nodes, check_threshold}; let nodes = encrypted_data.id_numbers.keys().cloned().collect(); @@ -368,7 +461,7 @@ fn check_encrypted_data(self_node_id: &Public, encrypted_data: &EncryptedData) - Ok(()) } -fn process_initialization_response(encrypted_data: &EncryptedData, data: &mut SessionData, node: &NodeId, check_result: bool) -> Result<(), Error> { +fn process_initialization_response(encrypted_data: &DocumentKeyShare, data: &mut SessionData, node: &NodeId, check_result: bool) -> Result<(), Error> { if !data.requested_nodes.remove(node) { return Err(Error::InvalidMessage); } @@ -395,7 +488,7 @@ fn process_initialization_response(encrypted_data: &EncryptedData, data: &mut Se Ok(()) } -fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants: &BTreeSet, access_key: &Secret, encrypted_data: &EncryptedData) -> Result { +fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants: &BTreeSet, access_key: &Secret, encrypted_data: &DocumentKeyShare) -> Result { let node_id_number = &encrypted_data.id_numbers[node]; let node_secret_share = &encrypted_data.secret_share; let other_id_numbers = participants.iter() @@ -409,43 +502,42 @@ fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants #[cfg(test)] mod tests { use std::sync::Arc; - use std::str::FromStr; use std::collections::BTreeMap; use super::super::super::acl_storage::DummyAclStorage; use ethkey::{self, Random, Generator, Public, Secret}; - use key_server_cluster::{NodeId, EncryptedData, SessionId, Error}; + use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error}; use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::decryption_session::{Session, SessionParams, SessionState}; - use key_server_cluster::message::{self, Message}; + use key_server_cluster::decryption_session::{SessionImpl, SessionParams, SessionState}; + use key_server_cluster::message::{self, Message, DecryptionMessage}; const SECRET_PLAIN: &'static str = "d2b57ae7619e070af0af6bc8c703c0cd27814c54d5d6a999cacac0da34ede279ca0d9216e85991029e54e2f0c92ee0bd30237725fa765cbdbfc4529489864c5f"; - fn prepare_decryption_sessions() -> (Vec>, Vec>, Vec) { + fn prepare_decryption_sessions() -> (Vec>, Vec>, Vec) { // prepare encrypted data + cluster configuration for scheme 4-of-5 let session_id = SessionId::default(); let access_key = Random.generate().unwrap().secret().clone(); - let secret_shares = vec![ - Secret::from_str("834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec").unwrap(), - Secret::from_str("5a3c1d90fafafa66bb808bcc464354a98b05e6b2c95b5f609d4511cdd1b17a0b").unwrap(), - Secret::from_str("71bf61e7848e08e3a8486c308ce521bdacfebcf9116a0151447eb301f3a2d0e9").unwrap(), - Secret::from_str("80c0e5e2bea66fa9b2e07f7ce09630a9563e8242446d5ee63221feb09c4338f4").unwrap(), - Secret::from_str("c06546b5669877ba579ca437a5602e89425c53808c708d44ccd6afcaa4610fad").unwrap(), + let secret_shares: Vec = vec![ + "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(), + "5a3c1d90fafafa66bb808bcc464354a98b05e6b2c95b5f609d4511cdd1b17a0b".parse().unwrap(), + "71bf61e7848e08e3a8486c308ce521bdacfebcf9116a0151447eb301f3a2d0e9".parse().unwrap(), + "80c0e5e2bea66fa9b2e07f7ce09630a9563e8242446d5ee63221feb09c4338f4".parse().unwrap(), + "c06546b5669877ba579ca437a5602e89425c53808c708d44ccd6afcaa4610fad".parse().unwrap(), ]; let id_numbers: Vec<(NodeId, Secret)> = vec![ ("b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(), - Secret::from_str("281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c").unwrap()), + "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()), ("1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".into(), - Secret::from_str("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b").unwrap()), + "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse().unwrap()), ("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into(), - Secret::from_str("f43ac0fba42a5b6ed95707d2244659e89ba877b1c9b82c0d0a9dcf834e80fc62").unwrap()), + "f43ac0fba42a5b6ed95707d2244659e89ba877b1c9b82c0d0a9dcf834e80fc62".parse().unwrap()), ("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".into(), - Secret::from_str("5a324938dfb2516800487d25ab7289ba8ec38811f77c3df602e4e65e3c9acd9f").unwrap()), + "5a324938dfb2516800487d25ab7289ba8ec38811f77c3df602e4e65e3c9acd9f".parse().unwrap()), ("321977760d1d8e15b047a309e4c7fe6f355c10bb5a06c68472b676926427f69f229024fa2692c10da167d14cdc77eb95d0fce68af0a0f704f0d3db36baa83bb2".into(), - Secret::from_str("12cf422d50002d04e52bd4906fd7f5f235f051ca36abfe37e061f8da248008d8").unwrap()), + "12cf422d50002d04e52bd4906fd7f5f235f051ca36abfe37e061f8da248008d8".parse().unwrap()), ]; let common_point: Public = "6962be696e1bcbba8e64cc7fddf140f854835354b5804f3bb95ae5a2799130371b589a131bd39699ac7174ccb35fc4342dab05331202209582fc8f3a40916ab0".into(); let encrypted_point: Public = "b07031982bde9890e12eff154765f03c56c3ab646ad47431db5dd2d742a9297679c4c65b998557f8008469afd0c43d40b6c5f6c6a1c7354875da4115237ed87a".into(); - let encrypted_datas: Vec<_> = (0..5).map(|i| EncryptedData { + let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare { threshold: 3, id_numbers: id_numbers.clone().into_iter().collect(), secret_share: secret_shares[i].clone(), @@ -454,7 +546,7 @@ mod tests { }).collect(); let acl_storages: Vec<_> = (0..5).map(|_| Arc::new(DummyAclStorage::default())).collect(); let clusters: Vec<_> = (0..5).map(|i| Arc::new(DummyCluster::new(id_numbers.iter().nth(i).clone().unwrap().0))).collect(); - let sessions: Vec<_> = (0..5).map(|i| Session::new(SessionParams { + let sessions: Vec<_> = (0..5).map(|i| SessionImpl::new(SessionParams { id: session_id.clone(), access_key: access_key.clone(), self_node_id: id_numbers.iter().nth(i).clone().unwrap().0, @@ -466,11 +558,11 @@ mod tests { (clusters, acl_storages, sessions) } - fn do_messages_exchange(clusters: &[Arc], sessions: &[Session]) { + fn do_messages_exchange(clusters: &[Arc], sessions: &[SessionImpl]) { do_messages_exchange_until(clusters, sessions, |_, _, _| false); } - fn do_messages_exchange_until(clusters: &[Arc], sessions: &[Session], mut cond: F) where F: FnMut(&NodeId, &NodeId, &Message) -> bool { + fn do_messages_exchange_until(clusters: &[Arc], sessions: &[SessionImpl], mut cond: F) where F: FnMut(&NodeId, &NodeId, &Message) -> bool { while let Some((from, to, message)) = clusters.iter().filter_map(|c| c.take_message().map(|(to, msg)| (c.node(), to, msg))).next() { let session = &sessions[sessions.iter().position(|s| s.node() == &to).unwrap()]; if cond(&from, &to, &message) { @@ -478,10 +570,10 @@ mod tests { } match message { - Message::InitializeDecryptionSession(message) => session.on_initialize_session(from, message).unwrap(), - Message::ConfirmDecryptionInitialization(message) => session.on_confirm_initialization(from, message).unwrap(), - Message::RequestPartialDecryption(message) => session.on_partial_decryption_requested(from, message).unwrap(), - Message::PartialDecryption(message) => session.on_partial_decryption(from, message).unwrap(), + Message::Decryption(DecryptionMessage::InitializeDecryptionSession(message)) => session.on_initialize_session(from, &message).unwrap(), + Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(message)) => session.on_confirm_initialization(from, &message).unwrap(), + Message::Decryption(DecryptionMessage::RequestPartialDecryption(message)) => session.on_partial_decryption_requested(from, &message).unwrap(), + Message::Decryption(DecryptionMessage::PartialDecryption(message)) => session.on_partial_decryption(from, &message).unwrap(), _ => panic!("unexpected"), } } @@ -492,11 +584,11 @@ mod tests { let mut nodes = BTreeMap::new(); let self_node_id = Random.generate().unwrap().public().clone(); nodes.insert(self_node_id, Random.generate().unwrap().secret().clone()); - match Session::new(SessionParams { + match SessionImpl::new(SessionParams { id: SessionId::default(), access_key: Random.generate().unwrap().secret().clone(), self_node_id: self_node_id.clone(), - encrypted_data: EncryptedData { + encrypted_data: DocumentKeyShare { threshold: 0, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), @@ -517,11 +609,11 @@ mod tests { let self_node_id = Random.generate().unwrap().public().clone(); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); - match Session::new(SessionParams { + match SessionImpl::new(SessionParams { id: SessionId::default(), access_key: Random.generate().unwrap().secret().clone(), self_node_id: self_node_id.clone(), - encrypted_data: EncryptedData { + encrypted_data: DocumentKeyShare { threshold: 0, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), @@ -542,11 +634,11 @@ mod tests { let self_node_id = Random.generate().unwrap().public().clone(); nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone()); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); - match Session::new(SessionParams { + match SessionImpl::new(SessionParams { id: SessionId::default(), access_key: Random.generate().unwrap().secret().clone(), self_node_id: self_node_id.clone(), - encrypted_data: EncryptedData { + encrypted_data: DocumentKeyShare { threshold: 2, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), @@ -572,70 +664,70 @@ mod tests { fn fails_to_accept_initialization_when_already_initialized() { let (_, _, sessions) = prepare_decryption_sessions(); assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).unwrap(), ()); - assert_eq!(sessions[0].on_initialize_session(sessions[1].node().clone(), message::InitializeDecryptionSession { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + assert_eq!(sessions[0].on_initialize_session(sessions[1].node().clone(), &message::InitializeDecryptionSession { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } #[test] fn fails_to_partial_decrypt_if_not_waiting() { let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), message::InitializeDecryptionSession { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), message::RequestPartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - nodes: sessions.iter().map(|s| s.node().clone()).take(4).collect(), + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), message::RequestPartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - nodes: sessions.iter().map(|s| s.node().clone()).take(4).collect(), + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(), }).unwrap_err(), Error::InvalidStateForRequest); } #[test] fn fails_to_partial_decrypt_if_requested_by_slave() { let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), message::InitializeDecryptionSession { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node().clone(), message::RequestPartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - nodes: sessions.iter().map(|s| s.node().clone()).take(4).collect(), + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node().clone(), &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn fails_to_partial_decrypt_if_wrong_number_of_nodes_participating() { let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), message::InitializeDecryptionSession { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), message::RequestPartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - nodes: sessions.iter().map(|s| s.node().clone()).take(2).collect(), + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + nodes: sessions.iter().map(|s| s.node().clone().into()).take(2).collect(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn fails_to_accept_partial_decrypt_if_not_waiting() { let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[0].on_partial_decryption(sessions[1].node().clone(), message::PartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - shadow_point: Random.generate().unwrap().public().clone(), + assert_eq!(sessions[0].on_partial_decryption(sessions[1].node().clone(), &message::PartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + shadow_point: Random.generate().unwrap().public().clone().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -647,7 +739,7 @@ mod tests { let mut pd_from = None; let mut pd_msg = None; do_messages_exchange_until(&clusters, &sessions, |from, _, msg| match msg { - &Message::PartialDecryption(ref msg) => { + &Message::Decryption(DecryptionMessage::PartialDecryption(ref msg)) => { pd_from = Some(from.clone()); pd_msg = Some(msg.clone()); true @@ -655,8 +747,8 @@ mod tests { _ => false, }); - assert_eq!(sessions[0].on_partial_decryption(pd_from.clone().unwrap(), pd_msg.clone().unwrap()).unwrap(), ()); - assert_eq!(sessions[0].on_partial_decryption(pd_from.unwrap(), pd_msg.unwrap()).unwrap_err(), Error::InvalidStateForRequest); + assert_eq!(sessions[0].on_partial_decryption(pd_from.clone().unwrap(), &pd_msg.clone().unwrap()).unwrap(), ()); + assert_eq!(sessions[0].on_partial_decryption(pd_from.unwrap(), &pd_msg.unwrap()).unwrap_err(), Error::InvalidStateForRequest); } #[test] @@ -704,4 +796,9 @@ mod tests { // 3) 0 sessions have decrypted key value assert!(sessions.iter().all(|s| s.decrypted_secret().is_none())); } + + #[test] + fn decryption_session_works_over_network() { + // TODO + } } diff --git a/secret_store/src/key_server_cluster/encryption_session.rs b/secret_store/src/key_server_cluster/encryption_session.rs index 6f5705a73..beca00443 100644 --- a/secret_store/src/key_server_cluster/encryption_session.rs +++ b/secret_store/src/key_server_cluster/encryption_session.rs @@ -17,13 +17,22 @@ use std::collections::{BTreeSet, BTreeMap, VecDeque}; use std::fmt::{Debug, Formatter, Error as FmtError}; use std::sync::Arc; -use parking_lot::Mutex; +use parking_lot::{Condvar, Mutex}; use ethkey::{Public, Secret}; -use key_server_cluster::{Error, NodeId, SessionId}; +use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare}; use key_server_cluster::math; use key_server_cluster::cluster::Cluster; -use key_server_cluster::message::{Message, InitializeSession, ConfirmInitialization, CompleteInitialization, - KeysDissemination, Complaint, ComplaintResponse, PublicKeyShare}; +use key_server_cluster::message::{Message, EncryptionMessage, InitializeSession, ConfirmInitialization, CompleteInitialization, + KeysDissemination, Complaint, ComplaintResponse, PublicKeyShare, SessionError, SessionCompleted}; + +/// Encryption session API. +pub trait Session: Send + Sync + 'static { + #[cfg(test)] + /// Get joint public key (if it is known). + fn joint_public_key(&self) -> Option; + /// Wait until session is completed. Returns distributely generated secret key. + fn wait(&self) -> Result; +} /// Encryption (distributed key generation) session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: @@ -34,17 +43,34 @@ use key_server_cluster::message::{Message, InitializeSession, ConfirmInitializat /// 3) key verification (KV): all nodes are checking values, received for other nodes and complaining if keys are wrong /// 4) key check phase (KC): nodes are processing complaints, received from another nodes /// 5) key generation phase (KG): nodes are exchanging with information, enough to generate joint public key -pub struct Session { +/// 6) encryption phase: master node generates secret key, encrypts it using joint public && broadcasts encryption result +pub struct SessionImpl { /// Unique session id. id: SessionId, /// Public identifier of this node. self_node_id: NodeId, + /// Key storage. + key_storage: Arc, /// Cluster which allows this node to send messages to other nodes in the cluster. cluster: Arc, + /// SessionImpl completion condvar. + completed: Condvar, /// Mutable session data. data: Mutex, } +/// SessionImpl creation parameters +pub struct SessionParams { + /// SessionImpl identifier. + pub id: SessionId, + /// Id of node, on which this session is running. + pub self_node_id: Public, + /// Key storage. + pub key_storage: Arc, + /// Cluster + pub cluster: Arc, +} + #[derive(Debug)] /// Mutable data of encryption (distributed key generation) session. struct SessionData { @@ -74,7 +100,9 @@ struct SessionData { /// === Values, filled when DKG session is completed successfully === /// Jointly generated public key, which can be used to encrypt secret. Public. - joint_public: Option, + joint_public: Option>, + /// Secret point. + secret_point: Option>, } #[derive(Debug, Clone)] @@ -95,13 +123,17 @@ struct NodeData { /// Public values, which have been received from this node. pub publics: Option>, - /// === Values, filled during KC phase === + // === Values, filled during KC phase === /// Nodes, complaining against this node. pub complaints: BTreeSet, - /// === Values, filled during KG phase === + // === Values, filled during KG phase === /// Public share, which has been received from this node. pub public_share: Option, + + // === Values, filled during encryption phase === + /// Flags marking that node has confirmed session completion (encryption data is stored). + pub completion_confirmed: bool, } #[derive(Debug, Clone, PartialEq)] @@ -139,6 +171,10 @@ pub enum SessionState { /// Node is waiting for joint public key share to be received from every other node. WaitingForPublicKeyShare, + // === Encryption phase states === + /// Node is waiting for session completion/session completion confirmation. + WaitingForEncryptionConfirmation, + // === Final states of the session === /// Joint public key generation is completed. Finished, @@ -146,13 +182,15 @@ pub enum SessionState { Failed, } -impl Session { +impl SessionImpl { /// Create new encryption session. - pub fn new(id: SessionId, self_node_id: Public, cluster: Arc) -> Self { - Session { - id: id, - self_node_id: self_node_id, - cluster: cluster, + pub fn new(params: SessionParams) -> Self { + SessionImpl { + id: params.id, + self_node_id: params.self_node_id, + key_storage: params.key_storage, + cluster: params.cluster, + completed: Condvar::new(), data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, master: None, @@ -162,10 +200,16 @@ impl Session { secret_coeff: None, secret_share: None, joint_public: None, + secret_point: None, }), } } + /// Get this session Id. + pub fn id(&self) -> &SessionId { + &self.id + } + /// Get this node Id. pub fn node(&self) -> &NodeId { &self.self_node_id @@ -176,11 +220,6 @@ impl Session { self.data.lock().state.clone() } - /// Get joint public key. - pub fn joint_public_key(&self) -> Option { - self.data.lock().joint_public.clone() - } - #[cfg(test)] /// Get derived point. pub fn derived_point(&self) -> Option { @@ -220,15 +259,15 @@ impl Session { // start initialization let derived_point = math::generate_random_point()?; - self.cluster.send(&next_node, Message::InitializeSession(InitializeSession { - session: self.id.clone(), - derived_point: derived_point, - })) + self.cluster.send(&next_node, Message::Encryption(EncryptionMessage::InitializeSession(InitializeSession { + session: self.id.clone().into(), + derived_point: derived_point.into(), + }))) } /// When session initialization message is received. - pub fn on_initialize_session(&self, sender: NodeId, mut message: InitializeSession) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeSession) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -239,13 +278,14 @@ impl Session { } // update derived point with random scalar - math::update_random_point(&mut message.derived_point)?; + let mut derived_point = message.derived_point.clone().into(); + math::update_random_point(&mut derived_point)?; // send confirmation back to master node - self.cluster.send(&sender, Message::ConfirmInitialization(ConfirmInitialization { - session: self.id.clone(), - derived_point: message.derived_point, - }))?; + self.cluster.send(&sender, Message::Encryption(EncryptionMessage::ConfirmInitialization(ConfirmInitialization { + session: self.id.clone().into(), + derived_point: derived_point.into(), + })))?; // update state data.master = Some(sender); @@ -255,8 +295,8 @@ impl Session { } /// When session initialization confirmation message is reeived. - pub fn on_confirm_initialization(&self, sender: NodeId, mut message: ConfirmInitialization) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmInitialization) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -277,25 +317,26 @@ impl Session { // proceed message match next_receiver { Some(next_receiver) => { - return self.cluster.send(&next_receiver, Message::InitializeSession(InitializeSession { - session: self.id.clone(), - derived_point: message.derived_point, - })); + return self.cluster.send(&next_receiver, Message::Encryption(EncryptionMessage::InitializeSession(InitializeSession { + session: self.id.clone().into(), + derived_point: message.derived_point.clone().into(), + }))); }, None => { // update point once again to make sure that derived point is not generated by last node - math::update_random_point(&mut message.derived_point)?; + let mut derived_point = message.derived_point.clone().into(); + math::update_random_point(&mut derived_point)?; // remember derived point - data.derived_point = Some(message.derived_point.clone()); + data.derived_point = Some(derived_point.clone().into()); // broadcast derived point && other session paraeters to every other node - self.cluster.broadcast(Message::CompleteInitialization(CompleteInitialization { - session: self.id.clone(), - nodes: data.nodes.iter().map(|(id, data)| (id.clone(), data.id_number.clone())).collect(), + self.cluster.broadcast(Message::Encryption(EncryptionMessage::CompleteInitialization(CompleteInitialization { + session: self.id.clone().into(), + nodes: data.nodes.iter().map(|(id, data)| (id.clone().into(), data.id_number.clone().into())).collect(), threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"), - derived_point: message.derived_point, - }))?; + derived_point: derived_point.into(), + })))?; }, } @@ -305,12 +346,12 @@ impl Session { } /// When session initialization completion message is received. - pub fn on_complete_initialization(&self, sender: NodeId, message: CompleteInitialization) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_complete_initialization(&self, sender: NodeId, message: &CompleteInitialization) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); // check message - let nodes_ids = message.nodes.keys().cloned().collect(); + let nodes_ids = message.nodes.keys().cloned().map(Into::into).collect(); check_cluster_nodes(self.node(), &nodes_ids)?; check_threshold(message.threshold, &nodes_ids)?; @@ -326,8 +367,8 @@ impl Session { // remember passed data data.threshold = Some(message.threshold); - data.derived_point = Some(message.derived_point); - data.nodes = message.nodes.into_iter().map(|(id, number)| (id, NodeData::with_id_number(number))).collect(); + data.derived_point = Some(message.derived_point.clone().into()); + data.nodes = message.nodes.iter().map(|(id, number)| (id.clone().into(), NodeData::with_id_number(number.clone().into()))).collect(); // now it is time for keys dissemination (KD) phase drop(data); @@ -335,17 +376,20 @@ impl Session { } /// When keys dissemination message is received. - pub fn on_keys_dissemination(&self, sender: NodeId, message: KeysDissemination) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_keys_dissemination(&self, sender: NodeId, message: &KeysDissemination) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); - debug_assert!(data.nodes.contains_key(&sender)); // check state if data.state != SessionState::WaitingForKeysDissemination { - return Err(Error::InvalidStateForRequest); + match data.state { + SessionState::WaitingForInitializationComplete => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } } + debug_assert!(data.nodes.contains_key(&sender)); // check message let threshold = data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"); @@ -360,9 +404,9 @@ impl Session { return Err(Error::InvalidStateForRequest); } - node_data.secret1 = Some(message.secret1); - node_data.secret2 = Some(message.secret2); - node_data.publics = Some(message.publics); + node_data.secret1 = Some(message.secret1.clone().into()); + node_data.secret2 = Some(message.secret2.clone().into()); + node_data.publics = Some(message.publics.iter().cloned().map(Into::into).collect()); } // check if we have received keys from every other node @@ -382,10 +426,10 @@ impl Session { if !is_key_verification_ok { node_data.complaints.insert(self.node().clone()); - self.cluster.broadcast(Message::Complaint(Complaint { - session: self.id.clone(), - against: node_id.clone(), - }))?; + self.cluster.broadcast(Message::Encryption(EncryptionMessage::Complaint(Complaint { + session: self.id.clone().into(), + against: node_id.clone().into(), + })))?; } } @@ -396,8 +440,8 @@ impl Session { } /// When complaint is received. - pub fn on_complaint(&self, sender: NodeId, message: Complaint) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_complaint(&self, sender: NodeId, message: &Complaint) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -412,16 +456,16 @@ impl Session { } // respond to complaint - if &message.against == self.node() { + if &*message.against == self.node() { let secret1_sent = data.nodes[&sender].secret1_sent.clone().expect("secrets were sent on KD phase; KC phase follows KD phase; qed"); let secret2_sent = data.nodes[&sender].secret2_sent.clone().expect("secrets were sent on KD phase; KC phase follows KD phase; qed"); // someone is complaining against us => let's respond - return self.cluster.broadcast(Message::ComplaintResponse(ComplaintResponse { - session: self.id.clone(), - secret1: secret1_sent, - secret2: secret2_sent, - })); + return self.cluster.broadcast(Message::Encryption(EncryptionMessage::ComplaintResponse(ComplaintResponse { + session: self.id.clone().into(), + secret1: secret1_sent.into(), + secret2: secret2_sent.into(), + }))); } // someone is complaining against someone else => let's remember this @@ -434,15 +478,15 @@ impl Session { if is_critical_complaints_num { // too many complaints => exclude from session - Session::disqualify_node(&message.against, &*self.cluster, &mut *data); + SessionImpl::disqualify_node(&message.against, &*self.cluster, &mut *data); } Ok(()) } /// When complaint response is received - pub fn on_complaint_response(&self, sender: NodeId, message: ComplaintResponse) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_complaint_response(&self, sender: NodeId, message: &ComplaintResponse) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -471,11 +515,11 @@ impl Session { }; if !is_key_verification_ok { - Session::disqualify_node(&sender, &*self.cluster, &mut *data); + SessionImpl::disqualify_node(&sender, &*self.cluster, &mut *data); } else { let node_data = data.nodes.get_mut(&sender).expect("cluster guarantees to deliver messages from qualified nodes only; qed"); - node_data.secret1 = Some(message.secret1); - node_data.secret2 = Some(message.secret2); + node_data.secret1 = Some(message.secret1.clone().into()); + node_data.secret2 = Some(message.secret2.clone().into()); node_data.complaints.remove(self.node()); } @@ -510,19 +554,24 @@ impl Session { self_node.public_share = Some(self_public_share.clone()); // broadcast self public key share - self.cluster.broadcast(Message::PublicKeyShare(PublicKeyShare { - session: self.id.clone(), - public_share: self_public_share, - })) + self.cluster.broadcast(Message::Encryption(EncryptionMessage::PublicKeyShare(PublicKeyShare { + session: self.id.clone().into(), + public_share: self_public_share.into(), + }))) } /// When public key share is received. - pub fn on_public_key_share(&self, sender: NodeId, message: PublicKeyShare) -> Result<(), Error> { + pub fn on_public_key_share(&self, sender: NodeId, message: &PublicKeyShare) -> Result<(), Error> { let mut data = self.data.lock(); // check state if data.state != SessionState::WaitingForPublicKeyShare { - return Err(Error::InvalidStateForRequest); + match data.state { + SessionState::WaitingForInitializationComplete | + SessionState::WaitingForKeysDissemination | + SessionState::KeyCheck => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } } // update node data with received public share @@ -532,7 +581,7 @@ impl Session { return Err(Error::InvalidMessage); } - node_data.public_share = Some(message.public_share); + node_data.public_share = Some(message.public_share.clone().into()); } // if there's also nodes, which has not sent us their public shares - do nothing @@ -540,16 +589,149 @@ impl Session { return Ok(()); } - // else - calculate joint public key && finish session - data.joint_public = { + // else - calculate joint public key + let joint_public = { let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed")); - Some(math::compute_joint_public(public_shares)?) + math::compute_joint_public(public_shares)? }; - data.state = SessionState::Finished; + + // if we are at the slave node - wait for session completion + if data.master.as_ref() != Some(self.node()) { + data.joint_public = Some(Ok(joint_public)); + data.state = SessionState::WaitingForEncryptionConfirmation; + return Ok(()); + } + + // then generate secret point + // then encrypt secret point with joint public key + let secret_point = math::generate_random_point()?; + let encrypted_secret_point = math::encrypt_secret(&secret_point, &joint_public)?; + + // then save encrypted data to the key storage + let encrypted_data = DocumentKeyShare { + threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), + id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), + secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), + common_point: encrypted_secret_point.common_point, + encrypted_point: encrypted_secret_point.encrypted_point, + }; + self.key_storage.insert(self.id.clone(), encrypted_data.clone()) + .map_err(|e| Error::KeyStorage(e.into()))?; + + // then distribute encrypted data to every other node + self.cluster.broadcast(Message::Encryption(EncryptionMessage::SessionCompleted(SessionCompleted { + session: self.id.clone().into(), + common_point: encrypted_data.common_point.clone().into(), + encrypted_point: encrypted_data.encrypted_point.clone().into(), + })))?; + + // then wait for confirmation from all other nodes + { + let self_node = data.nodes.get_mut(self.node()).expect("node is always qualified by himself; qed"); + self_node.completion_confirmed = true; + } + data.joint_public = Some(Ok(joint_public)); + data.secret_point = Some(Ok(secret_point)); + data.state = SessionState::WaitingForEncryptionConfirmation; Ok(()) } + /// When session completion message is received. + pub fn on_session_completed(&self, sender: NodeId, message: &SessionCompleted) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); + + let mut data = self.data.lock(); + debug_assert!(data.nodes.contains_key(&sender)); + + // check state + if data.state != SessionState::WaitingForEncryptionConfirmation { + match data.state { + SessionState::WaitingForPublicKeyShare => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } + } + + // if we are not masters, save result and respond with confirmation + if data.master.as_ref() != Some(self.node()) { + // check that we have received message from master + if data.master.as_ref() != Some(&sender) { + return Err(Error::InvalidMessage); + } + + // save encrypted data to key storage + let encrypted_data = DocumentKeyShare { + threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), + id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), + secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), + common_point: message.common_point.clone().into(), + encrypted_point: message.encrypted_point.clone().into(), + }; + self.key_storage.insert(self.id.clone(), encrypted_data.clone()) + .map_err(|e| Error::KeyStorage(e.into()))?; + + // then respond with confirmation + data.state = SessionState::Finished; + return self.cluster.send(&sender, Message::Encryption(EncryptionMessage::SessionCompleted(SessionCompleted { + session: self.id.clone().into(), + common_point: encrypted_data.common_point.clone().into(), + encrypted_point: encrypted_data.encrypted_point.clone().into(), + }))); + } + + // remember that we have received confirmation from sender node + { + let sender_node = data.nodes.get_mut(&sender).expect("node is always qualified by himself; qed"); + if sender_node.completion_confirmed { + return Err(Error::InvalidMessage); + } + + sender_node.completion_confirmed = true; + } + + // check if we have received confirmations from all cluster nodes + if data.nodes.iter().any(|(_, node_data)| !node_data.completion_confirmed) { + return Ok(()) + } + + // we have received enough confirmations => complete session + data.state = SessionState::Finished; + self.completed.notify_all(); + + Ok(()) + } + + /// When error has occured on another node. + pub fn on_session_error(&self, sender: NodeId, message: &SessionError) { + warn!("{}: encryption session error: {:?} from {}", self.node(), message, sender); + let mut data = self.data.lock(); + data.state = SessionState::Failed; + data.joint_public = Some(Err(Error::Io(message.error.clone()))); + data.secret_point = Some(Err(Error::Io(message.error.clone()))); + self.completed.notify_all(); + } + + /// When session timeout has occured. + pub fn on_session_timeout(&self, node: &NodeId) { + warn!("{}: encryption session timeout", self.node()); + let mut data = self.data.lock(); + + match data.state { + SessionState::WaitingForInitialization | + SessionState::WaitingForInitializationConfirm(_) | + SessionState::WaitingForInitializationComplete => (), + _ => if !data.nodes.contains_key(node) { + return; + }, + } + + data.state = SessionState::Failed; + data.joint_public = Some(Err(Error::Io("session expired".into()))); + data.secret_point = Some(Err(Error::Io("session expired".into()))); + self.completed.notify_all(); + } + /// Keys dissemination (KD) phase fn disseminate_keys(&self) -> Result<(), Error> { let mut data = self.data.lock(); @@ -576,12 +758,12 @@ impl Session { node_data.secret1_sent = Some(secret1.clone()); node_data.secret2_sent = Some(secret2.clone()); - self.cluster.send(&node, Message::KeysDissemination(KeysDissemination { - session: self.id.clone(), - secret1: secret1, - secret2: secret2, - publics: publics.clone(), - }))?; + self.cluster.send(&node, Message::Encryption(EncryptionMessage::KeysDissemination(KeysDissemination { + session: self.id.clone().into(), + secret1: secret1.into(), + secret2: secret2.into(), + publics: publics.iter().cloned().map(Into::into).collect(), + })))?; } else { node_data.secret1 = Some(secret1); node_data.secret2 = Some(secret2); @@ -599,7 +781,7 @@ impl Session { fn disqualify_node(node: &NodeId, cluster: &Cluster, data: &mut SessionData) { let threshold = data.threshold .expect("threshold is filled on initialization phase; node can only be disqualified during KC phase; KC phase follows initialization phase; qed"); - + // blacklist node cluster.blacklist(&node); // too many complaints => exclude from session @@ -612,6 +794,25 @@ impl Session { } } +impl Session for SessionImpl { + #[cfg(test)] + fn joint_public_key(&self) -> Option { + self.data.lock().joint_public.clone().and_then(|r| r.ok()) + } + + + fn wait(&self) -> Result { + let mut data = self.data.lock(); + if !data.secret_point.is_some() { + self.completed.wait(&mut data); + } + + data.secret_point.as_ref() + .expect("checked above or waited for completed; completed is only signaled when secret_point.is_some(); qed") + .clone() + } +} + impl EveryOtherNodeVisitor { pub fn new(self_id: &NodeId, nodes: I) -> Self where I: Iterator { EveryOtherNodeVisitor { @@ -648,11 +849,12 @@ impl NodeData { secret2: None, publics: None, public_share: None, + completion_confirmed: false, } } } -impl Debug for Session { +impl Debug for SessionImpl { fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { write!(f, "Encryption session {} on {}", self.id, self.self_node_id) } @@ -682,26 +884,29 @@ pub fn check_threshold(threshold: usize, nodes: &BTreeSet) -> Result<(), #[cfg(test)] mod tests { + use std::time; use std::sync::Arc; - use std::collections::{BTreeSet, BTreeMap}; + use std::collections::{BTreeSet, BTreeMap, VecDeque}; + use tokio_core::reactor::Core; use ethkey::{Random, Generator}; - use key_server_cluster::{NodeId, SessionId, Error}; - use key_server_cluster::message::{self, Message}; - use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::encryption_session::{Session, SessionState}; + use key_server_cluster::{NodeId, SessionId, Error, DummyKeyStorage}; + use key_server_cluster::message::{self, Message, EncryptionMessage}; + use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established}; + use key_server_cluster::encryption_session::{Session, SessionImpl, SessionState, SessionParams}; use key_server_cluster::math; use key_server_cluster::math::tests::do_encryption_and_decryption; #[derive(Debug)] struct Node { pub cluster: Arc, - pub session: Session, + pub session: SessionImpl, } #[derive(Debug)] struct MessageLoop { pub session_id: SessionId, pub nodes: BTreeMap, + pub queue: VecDeque<(NodeId, NodeId, Message)>, } impl MessageLoop { @@ -712,7 +917,12 @@ mod tests { let key_pair = Random.generate().unwrap(); let node_id = key_pair.public().clone(); let cluster = Arc::new(DummyCluster::new(node_id.clone())); - let session = Session::new(session_id.clone(), node_id.clone(), cluster.clone()); + let session = SessionImpl::new(SessionParams { + id: session_id.clone(), + self_node_id: node_id.clone(), + key_storage: Arc::new(DummyKeyStorage::default()), + cluster: cluster.clone(), + }); nodes.insert(node_id, Node { cluster: cluster, session: session }); } @@ -726,22 +936,23 @@ mod tests { MessageLoop { session_id: session_id, nodes: nodes, + queue: VecDeque::new(), } } - pub fn master(&self) -> &Session { + pub fn master(&self) -> &SessionImpl { &self.nodes.values().nth(0).unwrap().session } - pub fn first_slave(&self) -> &Session { + pub fn first_slave(&self) -> &SessionImpl { &self.nodes.values().nth(1).unwrap().session } - pub fn second_slave(&self) -> &Session { + pub fn second_slave(&self) -> &SessionImpl { &self.nodes.values().nth(2).unwrap().session } - pub fn third_slave(&self) -> &Session { + pub fn third_slave(&self) -> &SessionImpl { &self.nodes.values().nth(3).unwrap().session } @@ -749,18 +960,29 @@ mod tests { self.nodes.values() .filter_map(|n| n.cluster.take_message().map(|m| (n.session.node().clone(), m.0, m.1))) .nth(0) + .or_else(|| self.queue.pop_front()) } pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - match msg.2 { - Message::InitializeSession(message) => self.nodes[&msg.1].session.on_initialize_session(msg.0, message), - Message::ConfirmInitialization(message) => self.nodes[&msg.1].session.on_confirm_initialization(msg.0, message), - Message::CompleteInitialization(message) => self.nodes[&msg.1].session.on_complete_initialization(msg.0, message), - Message::KeysDissemination(message) => self.nodes[&msg.1].session.on_keys_dissemination(msg.0, message), - Message::Complaint(message) => self.nodes[&msg.1].session.on_complaint(msg.0, message), - Message::ComplaintResponse(message) => self.nodes[&msg.1].session.on_complaint_response(msg.0, message), - Message::PublicKeyShare(message) => self.nodes[&msg.1].session.on_public_key_share(msg.0, message), - _ => panic!("unexpected"), + match { + match msg.2 { + Message::Encryption(EncryptionMessage::InitializeSession(ref message)) => self.nodes[&msg.1].session.on_initialize_session(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::ConfirmInitialization(ref message)) => self.nodes[&msg.1].session.on_confirm_initialization(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::CompleteInitialization(ref message)) => self.nodes[&msg.1].session.on_complete_initialization(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::KeysDissemination(ref message)) => self.nodes[&msg.1].session.on_keys_dissemination(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::Complaint(ref message)) => self.nodes[&msg.1].session.on_complaint(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::ComplaintResponse(ref message)) => self.nodes[&msg.1].session.on_complaint_response(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::PublicKeyShare(ref message)) => self.nodes[&msg.1].session.on_public_key_share(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::SessionCompleted(ref message)) => self.nodes[&msg.1].session.on_session_completed(msg.0.clone(), &message), + _ => panic!("unexpected"), + } + } { + Ok(_) => Ok(()), + Err(Error::TooEarlyForRequest) => { + self.queue.push_back(msg); + Ok(()) + }, + Err(err) => Err(err), } } @@ -796,7 +1018,12 @@ mod tests { fn fails_to_initialize_if_not_a_part_of_cluster() { let node_id = math::generate_random_point().unwrap(); let cluster = Arc::new(DummyCluster::new(node_id.clone())); - let session = Session::new(SessionId::default(), node_id.clone(), cluster); + let session = SessionImpl::new(SessionParams { + id: SessionId::default(), + self_node_id: node_id.clone(), + key_storage: Arc::new(DummyKeyStorage::default()), + cluster: cluster, + }); let cluster_nodes: BTreeSet<_> = (0..2).map(|_| math::generate_random_point().unwrap()).collect(); assert_eq!(session.initialize(0, cluster_nodes).unwrap_err(), Error::InvalidNodesConfiguration); } @@ -816,9 +1043,9 @@ mod tests { fn fails_to_accept_initialization_when_already_initialized() { let (sid, m, _, mut l) = make_simple_cluster(0, 2).unwrap(); l.take_and_process_message().unwrap(); - assert_eq!(l.first_slave().on_initialize_session(m, message::InitializeSession { - session: sid, - derived_point: math::generate_random_point().unwrap(), + assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -826,16 +1053,16 @@ mod tests { fn slave_updates_derived_point_on_initialization() { let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); let passed_point = match l.take_message().unwrap() { - (f, t, Message::InitializeSession(message)) => { + (f, t, Message::Encryption(EncryptionMessage::InitializeSession(message))) => { let point = message.derived_point.clone(); - l.process_message((f, t, Message::InitializeSession(message))).unwrap(); + l.process_message((f, t, Message::Encryption(EncryptionMessage::InitializeSession(message)))).unwrap(); point }, _ => panic!("unexpected"), }; match l.take_message().unwrap() { - (_, _, Message::ConfirmInitialization(message)) => assert!(passed_point != message.derived_point), + (_, _, Message::Encryption(EncryptionMessage::ConfirmInitialization(message))) => assert!(passed_point != message.derived_point), _ => panic!("unexpected"), } } @@ -846,9 +1073,9 @@ mod tests { l.take_and_process_message().unwrap(); l.take_and_process_message().unwrap(); l.take_and_process_message().unwrap(); - assert_eq!(l.master().on_confirm_initialization(s, message::ConfirmInitialization { - session: sid, - derived_point: math::generate_random_point().unwrap(), + assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -857,9 +1084,9 @@ mod tests { let (sid, _, s, mut l) = make_simple_cluster(0, 2).unwrap(); l.take_and_process_message().unwrap(); l.take_and_process_message().unwrap(); - assert_eq!(l.master().on_confirm_initialization(s, message::ConfirmInitialization { - session: sid, - derived_point: math::generate_random_point().unwrap(), + assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -868,15 +1095,15 @@ mod tests { let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); l.take_and_process_message().unwrap(); let passed_point = match l.take_message().unwrap() { - (f, t, Message::ConfirmInitialization(message)) => { + (f, t, Message::Encryption(EncryptionMessage::ConfirmInitialization(message))) => { let point = message.derived_point.clone(); - l.process_message((f, t, Message::ConfirmInitialization(message))).unwrap(); + l.process_message((f, t, Message::Encryption(EncryptionMessage::ConfirmInitialization(message)))).unwrap(); point }, _ => panic!("unexpected"), }; - assert!(passed_point != l.master().derived_point().unwrap()); + assert!(l.master().derived_point().unwrap() != passed_point.into()); } #[test] @@ -884,11 +1111,11 @@ mod tests { let (sid, m, s, l) = make_simple_cluster(0, 2).unwrap(); let mut nodes = BTreeMap::new(); nodes.insert(s, math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidNodesCount); } @@ -898,11 +1125,11 @@ mod tests { let mut nodes = BTreeMap::new(); nodes.insert(m, math::generate_random_scalar().unwrap()); nodes.insert(math::generate_random_point().unwrap(), math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidNodesConfiguration); } @@ -912,11 +1139,11 @@ mod tests { let mut nodes = BTreeMap::new(); nodes.insert(m, math::generate_random_scalar().unwrap()); nodes.insert(s, math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 2, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidThreshold); } @@ -926,11 +1153,11 @@ mod tests { let mut nodes = BTreeMap::new(); nodes.insert(m, math::generate_random_scalar().unwrap()); nodes.insert(s, math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -945,22 +1172,22 @@ mod tests { nodes.insert(m, math::generate_random_scalar().unwrap()); nodes.insert(s, math::generate_random_scalar().unwrap()); nodes.insert(l.second_slave().node().clone(), math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(l.second_slave().node().clone(), message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(l.second_slave().node().clone(), &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn fails_to_accept_keys_dissemination_if_not_waiting_for_it() { let (sid, _, s, l) = make_simple_cluster(0, 2).unwrap(); - assert_eq!(l.master().on_keys_dissemination(s, message::KeysDissemination { - session: sid, - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: vec![math::generate_random_point().unwrap()], + assert_eq!(l.master().on_keys_dissemination(s, &message::KeysDissemination { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into()], }).unwrap_err(), Error::InvalidStateForRequest); } @@ -974,11 +1201,11 @@ mod tests { l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination - assert_eq!(l.first_slave().on_keys_dissemination(m, message::KeysDissemination { - session: sid, - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: vec![math::generate_random_point().unwrap(), math::generate_random_point().unwrap()], + assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into(), math::generate_random_point().unwrap().into()], }).unwrap_err(), Error::InvalidMessage); } @@ -992,11 +1219,11 @@ mod tests { l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination - assert_eq!(l.first_slave().on_keys_dissemination(m, message::KeysDissemination { - session: sid, - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: vec![math::generate_random_point().unwrap()], + assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into()], }).unwrap_err(), Error::InvalidStateForRequest); } @@ -1004,12 +1231,12 @@ mod tests { fn defends_if_receives_complain_on_himself() { let (sid, m, s, mut l) = make_simple_cluster(1, 3).unwrap(); l.take_and_process_all_messages().unwrap(); - l.master().on_complaint(s, message::Complaint { - session: sid, - against: m, + l.master().on_complaint(s, &message::Complaint { + session: sid.into(), + against: m.into(), }).unwrap(); match l.take_message().unwrap() { - (_, _, Message::ComplaintResponse(_)) => (), + (_, _, Message::Encryption(EncryptionMessage::ComplaintResponse(_))) => (), _ => panic!("unexpected"), } } @@ -1018,13 +1245,13 @@ mod tests { fn node_is_disqualified_if_enough_complaints_received() { let (sid, _, s, mut l) = make_simple_cluster(1, 4).unwrap(); l.take_and_process_all_messages().unwrap(); - l.master().on_complaint(l.second_slave().node().clone(), message::Complaint { - session: sid, - against: s.clone(), + l.master().on_complaint(l.second_slave().node().clone(), &message::Complaint { + session: sid.into(), + against: s.clone().into(), }).unwrap(); - l.master().on_complaint(l.third_slave().node().clone(), message::Complaint { - session: sid, - against: s, + l.master().on_complaint(l.third_slave().node().clone(), &message::Complaint { + session: sid.into(), + against: s.into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 3); } @@ -1033,13 +1260,13 @@ mod tests { fn node_is_not_disqualified_if_enough_complaints_received_from_the_same_node() { let (sid, _, s, mut l) = make_simple_cluster(1, 4).unwrap(); l.take_and_process_all_messages().unwrap(); - l.master().on_complaint(l.second_slave().node().clone(), message::Complaint { - session: sid, - against: s.clone(), + l.master().on_complaint(l.second_slave().node().clone(), &message::Complaint { + session: sid.into(), + against: s.clone().into(), }).unwrap(); - l.master().on_complaint(l.second_slave().node().clone(), message::Complaint { - session: sid, - against: s, + l.master().on_complaint(l.second_slave().node().clone(), &message::Complaint { + session: sid.into(), + against: s.into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 4); } @@ -1058,17 +1285,17 @@ mod tests { l.take_and_process_message().unwrap(); // s1 -> m: KeysDissemination l.take_and_process_message().unwrap(); // s1 -> s2: KeysDissemination let s2 = l.second_slave().node().clone(); - l.master().on_keys_dissemination(s2.clone(), message::KeysDissemination { - session: sid.clone(), - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: vec![math::generate_random_point().unwrap(), math::generate_random_point().unwrap()], + l.master().on_keys_dissemination(s2.clone(), &message::KeysDissemination { + session: sid.clone().into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into(), math::generate_random_point().unwrap().into()], }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 3); - l.master().on_complaint_response(s2, message::ComplaintResponse { - session: sid, - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), + l.master().on_complaint_response(s2, &message::ComplaintResponse { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 2); } @@ -1087,22 +1314,22 @@ mod tests { l.take_and_process_message().unwrap(); // s1 -> m: KeysDissemination l.take_and_process_message().unwrap(); // s1 -> s2: KeysDissemination let (f, t, msg) = match l.take_message() { - Some((f, t, Message::KeysDissemination(msg))) => (f, t, msg), + Some((f, t, Message::Encryption(EncryptionMessage::KeysDissemination(msg)))) => (f, t, msg), _ => panic!("unexpected"), }; assert_eq!(&f, l.second_slave().node()); assert_eq!(&t, l.master().node()); - l.master().on_keys_dissemination(f.clone(), message::KeysDissemination { - session: sid.clone(), - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: msg.publics.clone(), + l.master().on_keys_dissemination(f.clone(), &message::KeysDissemination { + session: sid.clone().into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: msg.publics.clone().into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 3); - l.master().on_complaint_response(f, message::ComplaintResponse { - session: sid, - secret1: msg.secret1, - secret2: msg.secret2, + l.master().on_complaint_response(f, &message::ComplaintResponse { + session: sid.into(), + secret1: msg.secret1.into(), + secret2: msg.secret2.into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 3); } @@ -1116,9 +1343,9 @@ mod tests { #[test] fn should_not_accept_public_key_share_when_is_not_waiting_for_it() { let (sid, _, s, l) = make_simple_cluster(1, 3).unwrap(); - assert_eq!(l.master().on_public_key_share(s, message::PublicKeyShare { - session: sid, - public_share: math::generate_random_point().unwrap(), + assert_eq!(l.master().on_public_key_share(s, &message::PublicKeyShare { + session: sid.into(), + public_share: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -1129,24 +1356,21 @@ mod tests { l.master().start_key_generation_phase().unwrap(); l.first_slave().start_key_generation_phase().unwrap(); let (f, t, msg) = match l.take_message() { - Some((f, t, Message::PublicKeyShare(msg))) => (f, t, msg), + Some((f, t, Message::Encryption(EncryptionMessage::PublicKeyShare(msg)))) => (f, t, msg), _ => panic!("unexpected"), }; assert_eq!(&f, l.master().node()); assert_eq!(&t, l.first_slave().node()); - l.process_message((f, t, Message::PublicKeyShare(msg.clone()))).unwrap(); - assert_eq!(l.first_slave().on_public_key_share(m, message::PublicKeyShare { - session: sid, - public_share: math::generate_random_point().unwrap(), + l.process_message((f, t, Message::Encryption(EncryptionMessage::PublicKeyShare(msg.clone())))).unwrap(); + assert_eq!(l.first_slave().on_public_key_share(m, &message::PublicKeyShare { + session: sid.into(), + public_share: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn complete_enc_dec_session() { - // TODO: when number of nodes, needed to decrypt message is odd, algorithm won't work - // let test_cases = [/*(0, 2), */(1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5), - // (1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)]; - let test_cases = [(3, 5)]; + let test_cases = [(0, 5), (2, 5), (3, 5)]; for &(threshold, num_nodes) in &test_cases { let mut l = MessageLoop::new(num_nodes); l.master().initialize(threshold, l.nodes.keys().cloned().collect()).unwrap(); @@ -1194,4 +1418,26 @@ mod tests { } // TODO: add test where some nodes are disqualified from session + + #[test] + fn encryption_session_works_over_network() { + //::util::log::init_log(); + + let test_cases = [(1, 3)]; + for &(threshold, num_nodes) in &test_cases { + let mut core = Core::new().unwrap(); + + // prepare cluster objects for each node + let clusters = make_clusters(&core, 6020, num_nodes); + run_clusters(&clusters); + + // establish connections + loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established)); + + // run session to completion + let session_id = SessionId::default(); + let session = clusters[0].client().new_encryption_session(session_id, threshold).unwrap(); + loop_until(&mut core, time::Duration::from_millis(1000), || session.joint_public_key().is_some()); + } + } } diff --git a/secret_store/src/key_server_cluster/io/deadline.rs b/secret_store/src/key_server_cluster/io/deadline.rs new file mode 100644 index 000000000..7b8c4d0ed --- /dev/null +++ b/secret_store/src/key_server_cluster/io/deadline.rs @@ -0,0 +1,85 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use std::time::Duration; +use futures::{Future, Select, BoxFuture, Poll, Async}; +use tokio_core::reactor::{Handle, Timeout}; + +type DeadlineBox where F: Future = BoxFuture, F::Error>; + +/// Complete a passed future or fail if it is not completed within timeout. +pub fn deadline(duration: Duration, handle: &Handle, future: F) -> Result, io::Error> + where F: Future + Send + 'static, T: 'static { + let timeout = try!(Timeout::new(duration, handle)).map(|_| DeadlineStatus::Timeout).boxed(); + let future = future.map(DeadlineStatus::Meet).boxed(); + let deadline = Deadline { + future: timeout.select(future), + }; + Ok(deadline) +} + +#[derive(Debug, PartialEq)] +/// Deadline future completion status. +pub enum DeadlineStatus { + /// Completed a future. + Meet(T), + /// Faled with timeout. + Timeout, +} + +/// Future, which waits for passed future completion within given period, or fails with timeout. +pub struct Deadline where F: Future { + future: Select, DeadlineBox>, +} + +impl Future for Deadline where F: Future { + type Item = DeadlineStatus; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + match self.future.poll() { + Ok(Async::Ready((result, _other))) => Ok(Async::Ready(result)), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err((err, _other)) => Err(err), + } + } +} + +#[cfg(test)] +mod tests { + use std::io; + use std::time::Duration; + use futures::{Future, empty, done}; + use tokio_core::reactor::Core; + use super::{deadline, DeadlineStatus}; + + //#[test] TODO: not working + fn _deadline_timeout_works() { + let mut core = Core::new().unwrap(); + let deadline = deadline(Duration::from_millis(1), &core.handle(), empty::<(), io::Error>()).unwrap(); + core.turn(Some(Duration::from_millis(3))); + assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Timeout); + } + + #[test] + fn deadline_result_works() { + let mut core = Core::new().unwrap(); + let deadline = deadline(Duration::from_millis(1000), &core.handle(), done(Ok(()))).unwrap(); + core.turn(Some(Duration::from_millis(3))); + assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Meet(())); + } +} \ No newline at end of file diff --git a/secret_store/src/key_server_cluster/io/handshake.rs b/secret_store/src/key_server_cluster/io/handshake.rs new file mode 100644 index 000000000..0d71d25aa --- /dev/null +++ b/secret_store/src/key_server_cluster/io/handshake.rs @@ -0,0 +1,320 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use std::collections::BTreeSet; +use futures::{Future, Poll, Async}; +use ethkey::{Random, Generator, KeyPair, Secret, sign, verify_public}; +use util::H256; +use key_server_cluster::{NodeId, Error}; +use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; +use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessage, ReadMessage, + read_message, compute_shared_key}; + +/// Start handshake procedure with another node from the cluster. +pub fn handshake(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: io::Write + io::Read { + let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); + handshake_with_plain_confirmation(a, self_confirmation_plain, self_key_pair, trusted_nodes) +} + +/// Start handshake procedure with another node from the cluster and given plain confirmation. +pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Result, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: io::Write + io::Read { + let (error, state) = match self_confirmation_plain.clone() + .and_then(|c| Handshake::::make_public_key_message(self_key_pair.public().clone(), c)) { + Ok(message) => (None, HandshakeState::SendPublicKey(write_message(a, message))), + Err(err) => (Some((a, Err(err))), HandshakeState::Finished), + }; + + Handshake { + is_active: true, + error: error, + state: state, + self_key_pair: self_key_pair, + self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()), + trusted_nodes: trusted_nodes, + other_node_id: None, + other_confirmation_plain: None, + shared_key: None, + } +} + +/// Wait for handshake procedure to be started by another node from the cluster. +pub fn accept_handshake(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: io::Write + io::Read { + let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); + let (error, state) = match self_confirmation_plain.clone() { + Ok(_) => (None, HandshakeState::ReceivePublicKey(read_message(a))), + Err(err) => (Some((a, Err(err))), HandshakeState::Finished), + }; + + Handshake { + is_active: false, + error: error, + state: state, + self_key_pair: self_key_pair, + self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()), + trusted_nodes: trusted_nodes, + other_node_id: None, + other_confirmation_plain: None, + shared_key: None, + } +} + +#[derive(Debug, PartialEq)] +/// Result of handshake procedure. +pub struct HandshakeResult { + /// Node id. + pub node_id: NodeId, + /// Shared key. + pub shared_key: Secret, +} + +/// Future handshake procedure. +pub struct Handshake { + is_active: bool, + error: Option<(A, Result)>, + state: HandshakeState, + self_key_pair: KeyPair, + self_confirmation_plain: H256, + trusted_nodes: BTreeSet, + other_node_id: Option, + other_confirmation_plain: Option, + shared_key: Option, +} + +/// Active handshake state. +enum HandshakeState { + SendPublicKey(WriteMessage), + ReceivePublicKey(ReadMessage), + SendPrivateKeySignature(WriteMessage), + ReceivePrivateKeySignature(ReadMessage), + Finished, +} + +impl Handshake where A: io::Read + io::Write { + #[cfg(test)] + pub fn set_self_confirmation_plain(&mut self, self_confirmation_plain: H256) { + self.self_confirmation_plain = self_confirmation_plain; + } + + pub fn make_public_key_message(self_node_id: NodeId, confirmation_plain: H256) -> Result { + Ok(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { + node_id: self_node_id.into(), + confirmation_plain: confirmation_plain.into(), + }))) + } + + fn make_private_key_signature_message(secret: &Secret, confirmation_plain: &H256) -> Result { + Ok(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { + confirmation_signed: sign(secret, confirmation_plain)?.into(), + }))) + } +} + +impl Future for Handshake where A: io::Read + io::Write { + type Item = (A, Result); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + if let Some(error_result) = self.error.take() { + return Ok(error_result.into()); + } + + let (next, result) = match self.state { + HandshakeState::SendPublicKey(ref mut future) => { + let (stream, _) = try_ready!(future.poll()); + + if self.is_active { + (HandshakeState::ReceivePublicKey( + read_message(stream) + ), Async::NotReady) + } else { + self.shared_key = match compute_shared_key(self.self_key_pair.secret(), + self.other_node_id.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_node_id is filled in ReceivePublicKey; qed") + ) { + Ok(shared_key) => Some(shared_key), + Err(err) => return Ok((stream, Err(err)).into()), + }; + + let message = match Handshake::::make_private_key_signature_message( + self.self_key_pair.secret(), + self.other_confirmation_plain.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_confirmation_plain is filled in ReceivePublicKey; qed") + ) { + Ok(message) => message, + Err(err) => return Ok((stream, Err(err)).into()), + }; + (HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream, + self.shared_key.as_ref().expect("filled couple of lines above; qed"), + message)), Async::NotReady) + } + }, + HandshakeState::ReceivePublicKey(ref mut future) => { + let (stream, message) = try_ready!(future.poll()); + + let message = match message { + Ok(message) => match message { + Message::Cluster(ClusterMessage::NodePublicKey(message)) => message, + _ => return Ok((stream, Err(Error::InvalidMessage)).into()), + }, + Err(err) => return Ok((stream, Err(err.into())).into()), + }; + + if !self.trusted_nodes.contains(&*message.node_id) { + return Ok((stream, Err(Error::InvalidNodeId)).into()); + } + + self.other_node_id = Some(message.node_id.into()); + self.other_confirmation_plain = Some(message.confirmation_plain.into()); + if self.is_active { + self.shared_key = match compute_shared_key(self.self_key_pair.secret(), + self.other_node_id.as_ref().expect("filled couple of lines above; qed") + ) { + Ok(shared_key) => Some(shared_key), + Err(err) => return Ok((stream, Err(err)).into()), + }; + + let message = match Handshake::::make_private_key_signature_message( + self.self_key_pair.secret(), + self.other_confirmation_plain.as_ref().expect("filled couple of lines above; qed") + ) { + Ok(message) => message, + Err(err) => return Ok((stream, Err(err)).into()), + }; + (HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream, + self.shared_key.as_ref().expect("filled couple of lines above; qed"), + message)), Async::NotReady) + } else { + let message = match Handshake::::make_public_key_message(self.self_key_pair.public().clone(), self.self_confirmation_plain.clone()) { + Ok(message) => message, + Err(err) => return Ok((stream, Err(err)).into()), + }; + (HandshakeState::SendPublicKey(write_message(stream, message)), Async::NotReady) + } + }, + HandshakeState::SendPrivateKeySignature(ref mut future) => { + let (stream, _) = try_ready!(future.poll()); + + (HandshakeState::ReceivePrivateKeySignature( + read_message(stream) + ), Async::NotReady) + }, + HandshakeState::ReceivePrivateKeySignature(ref mut future) => { + let (stream, message) = try_ready!(future.poll()); + + let message = match message { + Ok(message) => match message { + Message::Cluster(ClusterMessage::NodePrivateKeySignature(message)) => message, + _ => return Ok((stream, Err(Error::InvalidMessage)).into()), + }, + Err(err) => return Ok((stream, Err(err.into())).into()), + }; + + let other_node_public = self.other_node_id.as_ref().expect("other_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"); + if !verify_public(other_node_public, &*message.confirmation_signed, &self.self_confirmation_plain).unwrap_or(false) { + return Ok((stream, Err(Error::InvalidMessage)).into()); + } + + (HandshakeState::Finished, Async::Ready((stream, Ok(HandshakeResult { + node_id: self.other_node_id.expect("other_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"), + shared_key: self.shared_key.clone().expect("shared_key is filled in Send/ReceivePublicKey; ReceivePrivateKeySignature follows Send/ReceivePublicKey; qed"), + })))) + }, + HandshakeState::Finished => panic!("poll Handshake after it's done"), + }; + + self.state = next; + match result { + // by polling again, we register new future + Async::NotReady => self.poll(), + result => Ok(result) + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + use futures::Future; + use ethcrypto::ecdh::agree; + use ethkey::{Random, Generator, sign}; + use util::H256; + use key_server_cluster::io::message::tests::TestIo; + use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; + use super::{handshake_with_plain_confirmation, accept_handshake, HandshakeResult}; + + fn prepare_test_io() -> (H256, TestIo) { + let self_key_pair = Random.generate().unwrap(); + let peer_key_pair = Random.generate().unwrap(); + let mut io = TestIo::new(self_key_pair.clone(), peer_key_pair.public().clone()); + + let self_confirmation_plain = *Random.generate().unwrap().secret().clone(); + let peer_confirmation_plain = *Random.generate().unwrap().secret().clone(); + + let self_confirmation_signed = sign(peer_key_pair.secret(), &self_confirmation_plain).unwrap(); + let peer_confirmation_signed = sign(self_key_pair.secret(), &peer_confirmation_plain).unwrap(); + + io.add_input_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { + node_id: peer_key_pair.public().clone().into(), + confirmation_plain: peer_confirmation_plain.into(), + }))); + io.add_input_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { + confirmation_signed: self_confirmation_signed.into(), + }))); + + io.add_output_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { + node_id: self_key_pair.public().clone().into(), + confirmation_plain: self_confirmation_plain.clone().into(), + }))); + io.add_output_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { + confirmation_signed: peer_confirmation_signed.into(), + }))); + + (self_confirmation_plain, io) + } + + #[test] + fn active_handshake_works() { + let (self_confirmation_plain, io) = prepare_test_io(); + let self_key_pair = io.self_key_pair().clone(); + let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); + let shared_key = agree(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap(); + + let handshake = handshake_with_plain_confirmation(io, Ok(self_confirmation_plain), self_key_pair, trusted_nodes); + let handshake_result = handshake.wait().unwrap(); + assert_eq!(handshake_result.1, Ok(HandshakeResult { + node_id: handshake_result.0.peer_public().clone(), + shared_key: shared_key, + })); + handshake_result.0.assert_output(); + } + + #[test] + fn passive_handshake_works() { + let (self_confirmation_plain, io) = prepare_test_io(); + let self_key_pair = io.self_key_pair().clone(); + let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); + let shared_key = agree(self_key_pair.secret(), io.peer_public()).unwrap(); + + let mut handshake = accept_handshake(io, self_key_pair, trusted_nodes); + handshake.set_self_confirmation_plain(self_confirmation_plain); + + let handshake_result = handshake.wait().unwrap(); + assert_eq!(handshake_result.1, Ok(HandshakeResult { + node_id: handshake_result.0.peer_public().clone(), + shared_key: shared_key, + })); + handshake_result.0.assert_output(); + } +} diff --git a/secret_store/src/key_server_cluster/io/message.rs b/secret_store/src/key_server_cluster/io/message.rs new file mode 100644 index 000000000..bcabebf76 --- /dev/null +++ b/secret_store/src/key_server_cluster/io/message.rs @@ -0,0 +1,247 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io::Cursor; +use std::u16; +use std::ops::Deref; +use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; +use serde_json; +use ethcrypto::ecdh::agree; +use ethkey::{Public, Secret}; +use key_server_cluster::Error; +use key_server_cluster::message::{Message, ClusterMessage, EncryptionMessage, DecryptionMessage}; + +/// Size of serialized header. +pub const MESSAGE_HEADER_SIZE: usize = 4; + +#[derive(Debug, PartialEq)] +/// Message header. +pub struct MessageHeader { + /// Message/Header version. + pub version: u8, + /// Message kind. + pub kind: u8, + /// Message payload size (without header). + pub size: u16, +} + +#[derive(Debug, Clone, PartialEq)] +/// Serialized message. +pub struct SerializedMessage(Vec); + +impl Deref for SerializedMessage { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + &self.0 + } +} + +impl Into> for SerializedMessage { + fn into(self) -> Vec { + self.0 + } +} + +/// Serialize message. +pub fn serialize_message(message: Message) -> Result { + let (message_kind, payload) = match message { + Message::Cluster(ClusterMessage::NodePublicKey(payload)) => (1, serde_json::to_vec(&payload)), + Message::Cluster(ClusterMessage::NodePrivateKeySignature(payload)) => (2, serde_json::to_vec(&payload)), + Message::Cluster(ClusterMessage::KeepAlive(payload)) => (3, serde_json::to_vec(&payload)), + Message::Cluster(ClusterMessage::KeepAliveResponse(payload)) => (4, serde_json::to_vec(&payload)), + + Message::Encryption(EncryptionMessage::InitializeSession(payload)) => (50, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::ConfirmInitialization(payload)) => (51, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::CompleteInitialization(payload)) => (52, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::KeysDissemination(payload)) => (53, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::Complaint(payload)) => (54, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::ComplaintResponse(payload)) => (55, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::PublicKeyShare(payload)) => (56, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::SessionError(payload)) => (57, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::SessionCompleted(payload)) => (58, serde_json::to_vec(&payload)), + + Message::Decryption(DecryptionMessage::InitializeDecryptionSession(payload)) => (100, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(payload)) => (101, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::RequestPartialDecryption(payload)) => (102, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (103, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (104, serde_json::to_vec(&payload)), + }; + + let payload = payload.map_err(|err| Error::Serde(err.to_string()))?; + let payload_len = payload.len(); + if payload_len > u16::MAX as usize { + return Err(Error::InvalidMessage); + } + + let header = MessageHeader { + kind: message_kind, + version: 1, + size: payload_len as u16, + }; + + let mut serialized_message = serialize_header(&header)?; + serialized_message.extend(payload); + Ok(SerializedMessage(serialized_message)) +} + +/// Deserialize message. +pub fn deserialize_message(header: &MessageHeader, payload: Vec) -> Result { + Ok(match header.kind { + 1 => Message::Cluster(ClusterMessage::NodePublicKey(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 2 => Message::Cluster(ClusterMessage::NodePrivateKeySignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 3 => Message::Cluster(ClusterMessage::KeepAlive(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 4 => Message::Cluster(ClusterMessage::KeepAliveResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + 50 => Message::Encryption(EncryptionMessage::InitializeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 51 => Message::Encryption(EncryptionMessage::ConfirmInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 52 => Message::Encryption(EncryptionMessage::CompleteInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 53 => Message::Encryption(EncryptionMessage::KeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 54 => Message::Encryption(EncryptionMessage::Complaint(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 55 => Message::Encryption(EncryptionMessage::ComplaintResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 56 => Message::Encryption(EncryptionMessage::PublicKeyShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 57 => Message::Encryption(EncryptionMessage::SessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 58 => Message::Encryption(EncryptionMessage::SessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + 100 => Message::Decryption(DecryptionMessage::InitializeDecryptionSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 101 => Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 102 => Message::Decryption(DecryptionMessage::RequestPartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 103 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 104 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + _ => return Err(Error::Serde(format!("unknown message type {}", header.kind))), + }) +} + +/// Encrypt serialized message. +pub fn encrypt_message(_key: &Secret, message: SerializedMessage) -> Result { + Ok(message) // TODO: implement me +} + +/// Decrypt serialized message. +pub fn decrypt_message(_key: &Secret, payload: Vec) -> Result, Error> { + Ok(payload) // TODO: implement me +} + +/// Compute shared encryption key. +pub fn compute_shared_key(self_secret: &Secret, other_public: &Public) -> Result { + Ok(agree(self_secret, other_public)?) +} + +/// Serialize message header. +fn serialize_header(header: &MessageHeader) -> Result, Error> { + let mut buffer = Vec::with_capacity(MESSAGE_HEADER_SIZE); + buffer.write_u8(header.version)?; + buffer.write_u8(header.kind)?; + buffer.write_u16::(header.size)?; + Ok(buffer) +} + +/// Deserialize message header. +pub fn deserialize_header(data: &[u8]) -> Result { + let mut reader = Cursor::new(data); + Ok(MessageHeader { + version: reader.read_u8()?, + kind: reader.read_u8()?, + size: reader.read_u16::()?, + }) +} + +#[cfg(test)] +pub mod tests { + use std::io; + use ethkey::{KeyPair, Public}; + use key_server_cluster::message::Message; + use super::{MESSAGE_HEADER_SIZE, MessageHeader, serialize_message, serialize_header, deserialize_header}; + + pub struct TestIo { + self_key_pair: KeyPair, + peer_public: Public, + input_buffer: io::Cursor>, + output_buffer: Vec, + expected_output_buffer: Vec, + } + + impl TestIo { + pub fn new(self_key_pair: KeyPair, peer_public: Public) -> Self { + TestIo { + self_key_pair: self_key_pair, + peer_public: peer_public, + input_buffer: io::Cursor::new(Vec::new()), + output_buffer: Vec::new(), + expected_output_buffer: Vec::new(), + } + } + + pub fn self_key_pair(&self) -> &KeyPair { + &self.self_key_pair + } + + pub fn peer_public(&self) -> &Public { + &self.peer_public + } + + pub fn add_input_message(&mut self, message: Message) { + let serialized_message = serialize_message(message).unwrap(); + let serialized_message: Vec<_> = serialized_message.into(); + let input_buffer = self.input_buffer.get_mut(); + for b in serialized_message { + input_buffer.push(b); + } + } + + pub fn add_output_message(&mut self, message: Message) { + let serialized_message = serialize_message(message).unwrap(); + let serialized_message: Vec<_> = serialized_message.into(); + self.expected_output_buffer.extend(serialized_message); + } + + pub fn assert_output(&self) { + assert_eq!(self.output_buffer, self.expected_output_buffer); + } + } + + impl io::Read for TestIo { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + io::Read::read(&mut self.input_buffer, buf) + } + } + + impl io::Write for TestIo { + fn write(&mut self, buf: &[u8]) -> io::Result { + io::Write::write(&mut self.output_buffer, buf) + } + + fn flush(&mut self) -> io::Result<()> { + io::Write::flush(&mut self.output_buffer) + } + } + + #[test] + fn header_serialization_works() { + let header = MessageHeader { + kind: 1, + version: 2, + size: 3, + }; + + let serialized_header = serialize_header(&header).unwrap(); + assert_eq!(serialized_header.len(), MESSAGE_HEADER_SIZE); + + let deserialized_header = deserialize_header(&serialized_header).unwrap(); + assert_eq!(deserialized_header, header); + } +} diff --git a/secret_store/src/key_server_cluster/io/mod.rs b/secret_store/src/key_server_cluster/io/mod.rs new file mode 100644 index 000000000..57071038e --- /dev/null +++ b/secret_store/src/key_server_cluster/io/mod.rs @@ -0,0 +1,34 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +mod deadline; +mod handshake; +mod message; +mod read_header; +mod read_payload; +mod read_message; +mod shared_tcp_stream; +mod write_message; + +pub use self::deadline::{deadline, Deadline, DeadlineStatus}; +pub use self::handshake::{handshake, accept_handshake, Handshake, HandshakeResult}; +pub use self::message::{MessageHeader, SerializedMessage, serialize_message, deserialize_message, + encrypt_message, compute_shared_key}; +pub use self::read_header::{read_header, ReadHeader}; +pub use self::read_payload::{read_payload, read_encrypted_payload, ReadPayload}; +pub use self::read_message::{read_message, read_encrypted_message, ReadMessage}; +pub use self::shared_tcp_stream::SharedTcpStream; +pub use self::write_message::{write_message, write_encrypted_message, WriteMessage}; diff --git a/secret_store/src/key_server_cluster/io/read_header.rs b/secret_store/src/key_server_cluster/io/read_header.rs new file mode 100644 index 000000000..ab7ce360e --- /dev/null +++ b/secret_store/src/key_server_cluster/io/read_header.rs @@ -0,0 +1,44 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use futures::{Future, Poll, Async}; +use tokio_core::io::{ReadExact, read_exact}; +use key_server_cluster::Error; +use key_server_cluster::io::message::{MESSAGE_HEADER_SIZE, MessageHeader, deserialize_header}; + +/// Create future for read single message header from the stream. +pub fn read_header(a: A) -> ReadHeader where A: io::Read { + ReadHeader { + reader: read_exact(a, [0; MESSAGE_HEADER_SIZE]), + } +} + +/// Future for read single message header from the stream. +pub struct ReadHeader { + reader: ReadExact, +} + +impl Future for ReadHeader where A: io::Read { + type Item = (A, Result); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (read, data) = try_ready!(self.reader.poll()); + let header = deserialize_header(&data); + Ok(Async::Ready((read, header))) + } +} diff --git a/secret_store/src/key_server_cluster/io/read_message.rs b/secret_store/src/key_server_cluster/io/read_message.rs new file mode 100644 index 000000000..418e5e31d --- /dev/null +++ b/secret_store/src/key_server_cluster/io/read_message.rs @@ -0,0 +1,86 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use futures::{Poll, Future, Async}; +use ethkey::Secret; +use key_server_cluster::Error; +use key_server_cluster::message::Message; +use key_server_cluster::io::{read_header, ReadHeader, read_payload, read_encrypted_payload, ReadPayload}; + +/// Create future for read single message from the stream. +pub fn read_message(a: A) -> ReadMessage where A: io::Read { + ReadMessage { + key: None, + state: ReadMessageState::ReadHeader(read_header(a)), + } +} + +/// Create future for read single encrypted message from the stream. +pub fn read_encrypted_message(a: A, key: Secret) -> ReadMessage where A: io::Read { + ReadMessage { + key: Some(key), + state: ReadMessageState::ReadHeader(read_header(a)), + } +} + +enum ReadMessageState { + ReadHeader(ReadHeader), + ReadPayload(ReadPayload), + Finished, +} + +/// Future for read single message from the stream. +pub struct ReadMessage { + key: Option, + state: ReadMessageState, +} + +impl Future for ReadMessage where A: io::Read { + type Item = (A, Result); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (next, result) = match self.state { + ReadMessageState::ReadHeader(ref mut future) => { + let (read, header) = try_ready!(future.poll()); + let header = match header { + Ok(header) => header, + Err(err) => return Ok((read, Err(err)).into()), + }; + + let future = match self.key.take() { + Some(key) => read_encrypted_payload(read, header, key), + None => read_payload(read, header), + }; + let next = ReadMessageState::ReadPayload(future); + (next, Async::NotReady) + }, + ReadMessageState::ReadPayload(ref mut future) => { + let (read, payload) = try_ready!(future.poll()); + (ReadMessageState::Finished, Async::Ready((read, payload))) + }, + ReadMessageState::Finished => panic!("poll ReadMessage after it's done"), + }; + + self.state = next; + match result { + // by polling again, we register new future + Async::NotReady => self.poll(), + result => Ok(result) + } + } +} diff --git a/secret_store/src/key_server_cluster/io/read_payload.rs b/secret_store/src/key_server_cluster/io/read_payload.rs new file mode 100644 index 000000000..f6df3155e --- /dev/null +++ b/secret_store/src/key_server_cluster/io/read_payload.rs @@ -0,0 +1,64 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use futures::{Poll, Future}; +use tokio_core::io::{read_exact, ReadExact}; +use ethkey::Secret; +use key_server_cluster::Error; +use key_server_cluster::message::Message; +use key_server_cluster::io::message::{MessageHeader, deserialize_message, decrypt_message}; + +/// Create future for read single message payload from the stream. +pub fn read_payload(a: A, header: MessageHeader) -> ReadPayload where A: io::Read { + ReadPayload { + reader: read_exact(a, vec![0; header.size as usize]), + header: header, + key: None, + } +} + +/// Create future for read single encrypted message payload from the stream. +pub fn read_encrypted_payload(a: A, header: MessageHeader, key: Secret) -> ReadPayload where A: io::Read { + ReadPayload { + reader: read_exact(a, vec![0; header.size as usize]), + header: header, + key: Some(key), + } +} + +/// Future for read single message payload from the stream. +pub struct ReadPayload { + reader: ReadExact>, + header: MessageHeader, + key: Option, +} + +impl Future for ReadPayload where A: io::Read { + type Item = (A, Result); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (read, data) = try_ready!(self.reader.poll()); + let payload = if let Some(key) = self.key.take() { + decrypt_message(&key, data) + .and_then(|data| deserialize_message(&self.header, data)) + } else { + deserialize_message(&self.header, data) + }; + Ok((read, payload).into()) + } +} diff --git a/secret_store/src/key_server_cluster/io/shared_tcp_stream.rs b/secret_store/src/key_server_cluster/io/shared_tcp_stream.rs new file mode 100644 index 000000000..82933c8a2 --- /dev/null +++ b/secret_store/src/key_server_cluster/io/shared_tcp_stream.rs @@ -0,0 +1,60 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::io::{Read, Write, Error}; +use tokio_core::net::TcpStream; + +/// Read+Write implementation for Arc. +pub struct SharedTcpStream { + io: Arc, +} + +impl SharedTcpStream { + pub fn new(a: Arc) -> Self { + SharedTcpStream { + io: a, + } + } +} + +impl From for SharedTcpStream { + fn from(a: TcpStream) -> Self { + SharedTcpStream::new(Arc::new(a)) + } +} + +impl Read for SharedTcpStream { + fn read(&mut self, buf: &mut [u8]) -> Result { + Read::read(&mut (&*self.io as &TcpStream), buf) + } +} + +impl Write for SharedTcpStream { + fn write(&mut self, buf: &[u8]) -> Result { + Write::write(&mut (&*self.io as &TcpStream), buf) + } + + fn flush(&mut self) -> Result<(), Error> { + Write::flush(&mut (&*self.io as &TcpStream)) + } +} + +impl Clone for SharedTcpStream { + fn clone(&self) -> Self { + SharedTcpStream::new(self.io.clone()) + } +} diff --git a/secret_store/src/key_server_cluster/io/write_message.rs b/secret_store/src/key_server_cluster/io/write_message.rs new file mode 100644 index 000000000..457673676 --- /dev/null +++ b/secret_store/src/key_server_cluster/io/write_message.rs @@ -0,0 +1,70 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use futures::{Future, Poll}; +use tokio_core::io::{WriteAll, write_all}; +use ethkey::Secret; +use key_server_cluster::message::Message; +use key_server_cluster::io::{serialize_message, encrypt_message}; + +/// Write plain message to the channel. +pub fn write_message(a: A, message: Message) -> WriteMessage where A: io::Write { + let (error, future) = match serialize_message(message) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) { + Ok(message) => (None, write_all(a, message.into())), + Err(error) => (Some(error), write_all(a, Vec::new())), + }; + WriteMessage { + error: error, + future: future, + } +} + +/// Write encrypted message to the channel. +pub fn write_encrypted_message(a: A, key: &Secret, message: Message) -> WriteMessage where A: io::Write { + let (error, future) = match serialize_message(message) + .and_then(|message| encrypt_message(key, message)) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) { + Ok(message) => (None, write_all(a, message.into())), + Err(error) => (Some(error), write_all(a, Vec::new())), + }; + + + WriteMessage { + error: error, + future: future, + } +} + +/// Future message write. +pub struct WriteMessage { + error: Option, + future: WriteAll>, +} + +impl Future for WriteMessage where A: io::Write { + type Item = (A, Vec); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + if let Some(err) = self.error.take() { + return Err(err); + } + + self.future.poll() + } +} diff --git a/secret_store/src/key_server_cluster/math.rs b/secret_store/src/key_server_cluster/math.rs index 4da17ebc7..fdda08746 100644 --- a/secret_store/src/key_server_cluster/math.rs +++ b/secret_store/src/key_server_cluster/math.rs @@ -160,7 +160,7 @@ pub fn compute_joint_secret<'a, I>(mut secret_coeffs: I) -> Result Result { +pub fn encrypt_secret(secret: &Public, joint_public: &Public) -> Result { // this is performed by KS-cluster client (or KS master) let key_pair = Random.generate()?; @@ -171,7 +171,7 @@ pub fn encrypt_secret(secret: Public, joint_public: &Public) -> Result Result(node_number: &Secret, node_secret_share: &Secret, mut other_nodes_numbers: I) -> Result where I: Iterator { - let other_node_number = other_nodes_numbers.next().expect("compute_node_shadow is called when at least two nodes are required to decrypt secret; qed"); + let other_node_number = match other_nodes_numbers.next() { + Some(other_node_number) => other_node_number, + None => return Ok(node_secret_share.clone()), + }; + let mut shadow = node_number.clone(); shadow.sub(other_node_number)?; shadow.inv()?; @@ -231,17 +235,24 @@ pub fn compute_joint_shadow_point_test<'a, I>(access_key: &Secret, common_point: } /// Decrypt data using joint shadow point. -pub fn decrypt_with_joint_shadow(access_key: &Secret, encrypted_point: &Public, joint_shadow_point: &Public) -> Result { +pub fn decrypt_with_joint_shadow(threshold: usize, access_key: &Secret, encrypted_point: &Public, joint_shadow_point: &Public) -> Result { let mut inv_access_key = access_key.clone(); inv_access_key.inv()?; - - let mut decrypted_point = joint_shadow_point.clone(); - math::public_mul_secret(&mut decrypted_point, &inv_access_key)?; - math::public_add(&mut decrypted_point, encrypted_point)?; + + let mut mul = joint_shadow_point.clone(); + math::public_mul_secret(&mut mul, &inv_access_key)?; + + let mut decrypted_point = encrypted_point.clone(); + if threshold % 2 != 0 { + math::public_add(&mut decrypted_point, &mul)?; + } else { + math::public_sub(&mut decrypted_point, &mul)?; + } Ok(decrypted_point) } +#[cfg(test)] /// Decrypt data using joint secret (version for tests). pub fn decrypt_with_joint_secret(encrypted_point: &Public, common_point: &Public, joint_secret: &Secret) -> Result { let mut common_point_mul = common_point.clone(); @@ -262,7 +273,7 @@ pub mod tests { // === PART2: encryption using joint public key === // the next line is executed on KeyServer-client - let encrypted_secret = encrypt_secret(document_secret_plain.clone(), &joint_public).unwrap(); + let encrypted_secret = encrypt_secret(&document_secret_plain, &joint_public).unwrap(); // === PART3: decryption === @@ -285,7 +296,7 @@ pub mod tests { assert_eq!(joint_shadow_point, joint_shadow_point_test); // decrypt encrypted secret using joint shadow point - let document_secret_decrypted = decrypt_with_joint_shadow(&access_key, &encrypted_secret.encrypted_point, &joint_shadow_point).unwrap(); + let document_secret_decrypted = decrypt_with_joint_shadow(t, &access_key, &encrypted_secret.encrypted_point, &joint_shadow_point).unwrap(); // decrypt encrypted secret using joint secret [just for test] let document_secret_decrypted_test = match joint_secret { @@ -298,7 +309,8 @@ pub mod tests { #[test] fn full_encryption_math_session() { - let test_cases = [(1, 3)]; + let test_cases = [(0, 2), (1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5), + (1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)]; for &(t, n) in &test_cases { // === PART1: DKG === diff --git a/secret_store/src/key_server_cluster/message.rs b/secret_store/src/key_server_cluster/message.rs index 800dcf705..9958884a4 100644 --- a/secret_store/src/key_server_cluster/message.rs +++ b/secret_store/src/key_server_cluster/message.rs @@ -14,13 +14,42 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::fmt; use std::collections::{BTreeSet, BTreeMap}; -use ethkey::{Public, Secret, Signature}; -use key_server_cluster::{NodeId, SessionId}; +use ethkey::Secret; +use key_server_cluster::SessionId; +use super::{SerializableH256, SerializablePublic, SerializableSecret, SerializableSignature}; + +pub type MessageSessionId = SerializableH256; +pub type MessageNodeId = SerializablePublic; #[derive(Clone, Debug)] -/// All possible messages that can be sent during DKG. +/// All possible messages that can be sent during encryption/decryption sessions. pub enum Message { + /// Cluster message. + Cluster(ClusterMessage), + /// Encryption message. + Encryption(EncryptionMessage), + /// Decryption message. + Decryption(DecryptionMessage), +} + +#[derive(Clone, Debug)] +/// All possible cluster-level messages. +pub enum ClusterMessage { + /// Introduce node public key. + NodePublicKey(NodePublicKey), + /// Confirm that node owns its private key. + NodePrivateKeySignature(NodePrivateKeySignature), + /// Keep alive message. + KeepAlive(KeepAlive), + /// Keep alive message response. + KeepAliveResponse(KeepAliveResponse), +} + +#[derive(Clone, Debug)] +/// All possible messages that can be sent during encryption session. +pub enum EncryptionMessage { /// Initialize new DKG session. InitializeSession(InitializeSession), /// Confirm DKG session initialization. @@ -35,7 +64,15 @@ pub enum Message { ComplaintResponse(ComplaintResponse), /// Broadcast self public key portion. PublicKeyShare(PublicKeyShare), + /// When session error has occured. + SessionError(SessionError), + /// When session is completed. + SessionCompleted(SessionCompleted), +} +#[derive(Clone, Debug)] +/// All possible messages that can be sent during decryption session. +pub enum DecryptionMessage { /// Initialize decryption session. InitializeDecryptionSession(InitializeDecryptionSession), /// Confirm/reject decryption session initialization. @@ -44,125 +81,272 @@ pub enum Message { RequestPartialDecryption(RequestPartialDecryption), /// Partial decryption is completed PartialDecryption(PartialDecryption), + /// When decryption session error has occured. + DecryptionSessionError(DecryptionSessionError), } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Introduce node public key. +pub struct NodePublicKey { + /// Node identifier (aka node public key). + pub node_id: MessageNodeId, + /// Data, which must be signed by peer to prove that he owns the corresponding private key. + pub confirmation_plain: SerializableH256, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Confirm that node owns the private key of previously passed public key (aka node id). +pub struct NodePrivateKeySignature { + /// Previously passed `confirmation_plain`, signed with node private key. + pub confirmation_signed: SerializableSignature, +} + + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Ask if the node is still alive. +pub struct KeepAlive { +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Confirm that the node is still alive. +pub struct KeepAliveResponse { +} + +#[derive(Clone, Debug, Serialize, Deserialize)] /// Initialize new DKG session. pub struct InitializeSession { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Derived generation point. Starting from originator, every node must multiply this /// point by random scalar (unknown by other nodes). At the end of initialization /// `point` will be some (k1 * k2 * ... * kn) * G = `point` where `(k1 * k2 * ... * kn)` /// is unknown for every node. - pub derived_point: Public, + pub derived_point: SerializablePublic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Confirm DKG session initialization. pub struct ConfirmInitialization { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Derived generation point. - pub derived_point: Public, + pub derived_point: SerializablePublic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Broadcast generated point to every other node. pub struct CompleteInitialization { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// All session participants along with their identification numbers. - pub nodes: BTreeMap, + pub nodes: BTreeMap, /// Decryption threshold. During decryption threshold-of-route.len() nodes must came to /// consensus to successfully decrypt message. pub threshold: usize, /// Derived generation point. - pub derived_point: Public, + pub derived_point: SerializablePublic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Generated keys are sent to every node. pub struct KeysDissemination { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Secret 1. - pub secret1: Secret, + pub secret1: SerializableSecret, /// Secret 2. - pub secret2: Secret, + pub secret2: SerializableSecret, /// Public values. - pub publics: Vec, + pub publics: Vec, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Complaint against node is broadcasted. pub struct Complaint { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Public values. - pub against: NodeId, + pub against: MessageNodeId, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is responding to complaint. pub struct ComplaintResponse { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Secret 1. - pub secret1: Secret, + pub secret1: SerializableSecret, /// Secret 2. - pub secret2: Secret, + pub secret2: SerializableSecret, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is sharing its public key share. pub struct PublicKeyShare { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Public key share. - pub public_share: Public, + pub public_share: SerializablePublic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] +/// When session error has occured. +pub struct SessionError { + /// Session Id. + pub session: MessageSessionId, + /// Public key share. + pub error: String, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// When session is completed. +pub struct SessionCompleted { + /// Session Id. + pub session: MessageSessionId, + /// Common (shared) encryption point. + pub common_point: SerializablePublic, + /// Encrypted point. + pub encrypted_point: SerializablePublic, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is requested to decrypt data, encrypted in given session. pub struct InitializeDecryptionSession { /// Encryption session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Decryption session Id. - pub sub_session: Secret, + pub sub_session: SerializableSecret, /// Requestor signature. - pub requestor_signature: Signature, + pub requestor_signature: SerializableSignature, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is responding to decryption request. pub struct ConfirmDecryptionInitialization { /// Encryption session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Decryption session Id. - pub sub_session: Secret, + pub sub_session: SerializableSecret, /// Is node confirmed to make a decryption?. pub is_confirmed: bool, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is requested to do a partial decryption. pub struct RequestPartialDecryption { /// Encryption session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Decryption session Id. - pub sub_session: Secret, + pub sub_session: SerializableSecret, /// Nodes that are agreed to do a decryption. - pub nodes: BTreeSet, + pub nodes: BTreeSet, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node has partially decrypted the secret. pub struct PartialDecryption { /// Encryption session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Decryption session Id. - pub sub_session: Secret, + pub sub_session: SerializableSecret, /// Partially decrypted secret. - pub shadow_point: Public, + pub shadow_point: SerializablePublic, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// When decryption session error has occured. +pub struct DecryptionSessionError { + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Public key share. + pub error: String, +} + +impl EncryptionMessage { + pub fn session_id(&self) -> &SessionId { + match *self { + EncryptionMessage::InitializeSession(ref msg) => &msg.session, + EncryptionMessage::ConfirmInitialization(ref msg) => &msg.session, + EncryptionMessage::CompleteInitialization(ref msg) => &msg.session, + EncryptionMessage::KeysDissemination(ref msg) => &msg.session, + EncryptionMessage::Complaint(ref msg) => &msg.session, + EncryptionMessage::ComplaintResponse(ref msg) => &msg.session, + EncryptionMessage::PublicKeyShare(ref msg) => &msg.session, + EncryptionMessage::SessionError(ref msg) => &msg.session, + EncryptionMessage::SessionCompleted(ref msg) => &msg.session, + } + } +} + +impl DecryptionMessage { + pub fn session_id(&self) -> &SessionId { + match *self { + DecryptionMessage::InitializeDecryptionSession(ref msg) => &msg.session, + DecryptionMessage::ConfirmDecryptionInitialization(ref msg) => &msg.session, + DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.session, + DecryptionMessage::PartialDecryption(ref msg) => &msg.session, + DecryptionMessage::DecryptionSessionError(ref msg) => &msg.session, + } + } + + pub fn sub_session_id(&self) -> &Secret { + match *self { + DecryptionMessage::InitializeDecryptionSession(ref msg) => &msg.sub_session, + DecryptionMessage::ConfirmDecryptionInitialization(ref msg) => &msg.sub_session, + DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.sub_session, + DecryptionMessage::PartialDecryption(ref msg) => &msg.sub_session, + DecryptionMessage::DecryptionSessionError(ref msg) => &msg.sub_session, + } + } +} + +impl fmt::Display for Message { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Message::Cluster(ref message) => write!(f, "Cluster.{}", message), + Message::Encryption(ref message) => write!(f, "Encryption.{}", message), + Message::Decryption(ref message) => write!(f, "Decryption.{}", message), + } + } +} + +impl fmt::Display for ClusterMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ClusterMessage::NodePublicKey(_) => write!(f, "NodePublicKey"), + ClusterMessage::NodePrivateKeySignature(_) => write!(f, "NodePrivateKeySignature"), + ClusterMessage::KeepAlive(_) => write!(f, "KeepAlive"), + ClusterMessage::KeepAliveResponse(_) => write!(f, "KeepAliveResponse"), + } + } +} + +impl fmt::Display for EncryptionMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + EncryptionMessage::InitializeSession(_) => write!(f, "InitializeSession"), + EncryptionMessage::ConfirmInitialization(_) => write!(f, "ConfirmInitialization"), + EncryptionMessage::CompleteInitialization(_) => write!(f, "CompleteInitialization"), + EncryptionMessage::KeysDissemination(_) => write!(f, "KeysDissemination"), + EncryptionMessage::Complaint(_) => write!(f, "Complaint"), + EncryptionMessage::ComplaintResponse(_) => write!(f, "ComplaintResponse"), + EncryptionMessage::PublicKeyShare(_) => write!(f, "PublicKeyShare"), + EncryptionMessage::SessionError(ref msg) => write!(f, "SessionError({})", msg.error), + EncryptionMessage::SessionCompleted(_) => write!(f, "SessionCompleted"), + } + } +} + +impl fmt::Display for DecryptionMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + DecryptionMessage::InitializeDecryptionSession(_) => write!(f, "InitializeDecryptionSession"), + DecryptionMessage::ConfirmDecryptionInitialization(_) => write!(f, "ConfirmDecryptionInitialization"), + DecryptionMessage::RequestPartialDecryption(_) => write!(f, "RequestPartialDecryption"), + DecryptionMessage::PartialDecryption(_) => write!(f, "PartialDecryption"), + DecryptionMessage::DecryptionSessionError(_) => write!(f, "DecryptionSessionError"), + } + } } diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 5d0dacd11..8b33e06f7 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -14,21 +14,36 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -#![allow(dead_code)] // TODO: remove me - -use std::collections::BTreeMap; -use ethkey::{self, Public, Secret, Signature}; +use std::fmt; +use std::io::Error as IoError; +use ethkey; +use ethcrypto; use super::types::all::DocumentAddress; -pub use super::acl_storage::AclStorage; +pub use super::types::all::{NodeId, EncryptionConfiguration}; +pub use super::acl_storage::{AclStorage, DummyAclStorage}; +pub use super::key_storage::{KeyStorage, DocumentKeyShare}; +pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic}; +pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient}; +pub use self::encryption_session::Session as EncryptionSession; +pub use self::decryption_session::Session as DecryptionSession; + +#[cfg(test)] +pub use super::key_storage::tests::DummyKeyStorage; -pub type NodeId = Public; pub type SessionId = DocumentAddress; -pub type SessionIdSignature = Signature; -#[derive(Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq)] /// Errors which can occur during encryption/decryption session pub enum Error { + /// Invalid node address has been passed. + InvalidNodeAddress, + /// Invalid node id has been passed. + InvalidNodeId, + /// Session with the given id already exists. + DuplicateSessionId, + /// Session with the given id is unknown. + InvalidSessionId, /// Invalid number of nodes. /// There must be at least two nodes participating in encryption. /// There must be at least one node participating in decryption. @@ -39,28 +54,24 @@ pub enum Error { /// Threshold value must be in [0; n - 1], where n is a number of nodes participating in the encryption. InvalidThreshold, /// Current state of encryption/decryption session does not allow to proceed request. + /// Reschedule this request for later processing. + TooEarlyForRequest, + /// Current state of encryption/decryption session does not allow to proceed request. /// This means that either there is some comm-failure or node is misbehaving/cheating. InvalidStateForRequest, - /// Some data in passed message was recognized as invalid. + /// Message or some data in the message was recognized as invalid. /// This means that node is misbehaving/cheating. InvalidMessage, + /// Connection to node, required for this session is not established. + NodeDisconnected, /// Cryptographic error. EthKey(String), -} - -#[derive(Debug, Clone)] -/// Data, which is stored on every node after DKG && encryption is completed. -pub struct EncryptedData { - /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). - threshold: usize, - /// Nodes ids numbers. - id_numbers: BTreeMap, - /// Node secret share. - secret_share: Secret, - /// Common (shared) encryption point. - common_point: Public, - /// Encrypted point. - encrypted_point: Public, + /// I/O error has occured. + Io(String), + /// Deserialization error has occured. + Serde(String), + /// Key storage error. + KeyStorage(String), } impl From for Error { @@ -69,8 +80,50 @@ impl From for Error { } } +impl From for Error { + fn from(err: ethcrypto::Error) -> Self { + Error::EthKey(err.into()) + } +} + +impl From for Error { + fn from(err: IoError) -> Self { + Error::Io(err.to_string()) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::InvalidNodeAddress => write!(f, "invalid node address has been passed"), + Error::InvalidNodeId => write!(f, "invalid node id has been passed"), + Error::DuplicateSessionId => write!(f, "session with the same id is already registered"), + Error::InvalidSessionId => write!(f, "invalid session id has been passed"), + Error::InvalidNodesCount => write!(f, "invalid nodes count"), + Error::InvalidNodesConfiguration => write!(f, "invalid nodes configuration"), + Error::InvalidThreshold => write!(f, "invalid threshold value has been passed"), + Error::TooEarlyForRequest => write!(f, "session is not yet ready to process this request"), + Error::InvalidStateForRequest => write!(f, "session is in invalid state for processing this request"), + Error::InvalidMessage => write!(f, "invalid message is received"), + Error::NodeDisconnected => write!(f, "node required for this operation is currently disconnected"), + Error::EthKey(ref e) => write!(f, "cryptographic error {}", e), + Error::Io(ref e) => write!(f, "i/o error {}", e), + Error::Serde(ref e) => write!(f, "serde error {}", e), + Error::KeyStorage(ref e) => write!(f, "key storage error {}", e), + } + } +} + +impl Into for Error { + fn into(self) -> String { + format!("{}", self) + } +} + mod cluster; mod decryption_session; mod encryption_session; +mod io; mod math; mod message; +mod net; diff --git a/secret_store/src/key_server_cluster/net/accept_connection.rs b/secret_store/src/key_server_cluster/net/accept_connection.rs new file mode 100644 index 000000000..0daa8b2da --- /dev/null +++ b/secret_store/src/key_server_cluster/net/accept_connection.rs @@ -0,0 +1,63 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use std::net::SocketAddr; +use std::time::Duration; +use std::collections::BTreeSet; +use futures::{Future, Poll}; +use tokio_core::reactor::Handle; +use tokio_core::net::TcpStream; +use ethkey::KeyPair; +use key_server_cluster::{Error, NodeId}; +use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline}; +use key_server_cluster::net::Connection; + +/// Create future for accepting incoming connection. +pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Deadline { + let accept = AcceptConnection { + handshake: accept_handshake(stream, self_key_pair, trusted_nodes), + address: address, + }; + + deadline(Duration::new(5, 0), handle, accept).expect("Failed to create timeout") +} + +/// Future for accepting incoming connection. +pub struct AcceptConnection { + handshake: Handshake, + address: SocketAddr, +} + +impl Future for AcceptConnection { + type Item = Result; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (stream, result) = try_ready!(self.handshake.poll()); + let result = match result { + Ok(result) => result, + Err(err) => return Ok(Err(err).into()), + }; + let connection = Connection { + stream: stream.into(), + address: self.address, + node_id: result.node_id, + key: result.shared_key, + }; + Ok(Ok(connection).into()) + } +} diff --git a/secret_store/src/key_server_cluster/net/connect.rs b/secret_store/src/key_server_cluster/net/connect.rs new file mode 100644 index 000000000..449168ab2 --- /dev/null +++ b/secret_store/src/key_server_cluster/net/connect.rs @@ -0,0 +1,90 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::BTreeSet; +use std::io; +use std::time::Duration; +use std::net::SocketAddr; +use futures::{Future, Poll, Async}; +use tokio_core::reactor::Handle; +use tokio_core::net::{TcpStream, TcpStreamNew}; +use ethkey::KeyPair; +use key_server_cluster::{Error, NodeId}; +use key_server_cluster::io::{handshake, Handshake, Deadline, deadline}; +use key_server_cluster::net::Connection; + +/// Create future for connecting to other node. +pub fn connect(address: &SocketAddr, handle: &Handle, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Deadline { + let connect = Connect { + state: ConnectState::TcpConnect(TcpStream::connect(address, handle)), + address: address.clone(), + self_key_pair: self_key_pair, + trusted_nodes: trusted_nodes, + }; + + deadline(Duration::new(5, 0), handle, connect).expect("Failed to create timeout") +} + +enum ConnectState { + TcpConnect(TcpStreamNew), + Handshake(Handshake), + Connected, +} + +/// Future for connecting to other node. +pub struct Connect { + state: ConnectState, + address: SocketAddr, + self_key_pair: KeyPair, + trusted_nodes: BTreeSet, +} + +impl Future for Connect { + type Item = Result; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (next, result) = match self.state { + ConnectState::TcpConnect(ref mut future) => { + let stream = try_ready!(future.poll()); + let handshake = handshake(stream, self.self_key_pair.clone(), self.trusted_nodes.clone()); + (ConnectState::Handshake(handshake), Async::NotReady) + }, + ConnectState::Handshake(ref mut future) => { + let (stream, result) = try_ready!(future.poll()); + let result = match result { + Ok(result) => result, + Err(err) => return Ok(Async::Ready(Err(err))), + }; + let connection = Connection { + stream: stream.into(), + address: self.address, + node_id: result.node_id, + key: result.shared_key, + }; + (ConnectState::Connected, Async::Ready(Ok(connection))) + }, + ConnectState::Connected => panic!("poll Connect after it's done"), + }; + + self.state = next; + match result { + // by polling again, we register new future + Async::NotReady => self.poll(), + result => Ok(result) + } + } +} diff --git a/secret_store/src/key_server_cluster/net/connection.rs b/secret_store/src/key_server_cluster/net/connection.rs new file mode 100644 index 000000000..8125b81d3 --- /dev/null +++ b/secret_store/src/key_server_cluster/net/connection.rs @@ -0,0 +1,32 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::net; +use ethkey::Secret; +use key_server_cluster::NodeId; +use key_server_cluster::io::SharedTcpStream; + +/// Established connection data +pub struct Connection { + /// Peer address. + pub address: net::SocketAddr, + /// Connection stream. + pub stream: SharedTcpStream, + /// Peer node id. + pub node_id: NodeId, + /// Encryption key. + pub key: Secret, +} diff --git a/secret_store/src/key_server_cluster/net/mod.rs b/secret_store/src/key_server_cluster/net/mod.rs new file mode 100644 index 000000000..6abf83ceb --- /dev/null +++ b/secret_store/src/key_server_cluster/net/mod.rs @@ -0,0 +1,23 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +mod accept_connection; +mod connect; +mod connection; + +pub use self::accept_connection::{AcceptConnection, accept_connection}; +pub use self::connect::{Connect, connect}; +pub use self::connection::Connection; diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index fe7777410..e3106f221 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -15,15 +15,34 @@ // along with Parity. If not, see . use std::path::PathBuf; +use std::collections::BTreeMap; +use serde_json; +use ethkey::{Secret, Public}; use util::Database; -use types::all::{Error, ServiceConfiguration, DocumentAddress, DocumentKey}; +use types::all::{Error, ServiceConfiguration, DocumentAddress, NodeId}; +use serialization::{SerializablePublic, SerializableSecret}; + +#[derive(Debug, Clone, PartialEq)] +/// Encrypted key share, stored by key storage on the single key server. +pub struct DocumentKeyShare { + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Node secret share. + pub secret_share: Secret, + /// Common (shared) encryption point. + pub common_point: Public, + /// Encrypted point. + pub encrypted_point: Public, +} /// Document encryption keys storage pub trait KeyStorage: Send + Sync { /// Insert document encryption key - fn insert(&self, document: DocumentAddress, key: DocumentKey) -> Result<(), Error>; + fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error>; /// Get document encryption key - fn get(&self, document: &DocumentAddress) -> Result; + fn get(&self, document: &DocumentAddress) -> Result; } /// Persistent document encryption keys storage @@ -31,6 +50,21 @@ pub struct PersistentKeyStorage { db: Database, } +#[derive(Serialize, Deserialize)] +/// Encrypted key share, as it is stored by key storage on the single key server. +struct SerializableDocumentKeyShare { + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Node secret share. + pub secret_share: SerializableSecret, + /// Common (shared) encryption point. + pub common_point: SerializablePublic, + /// Encrypted point. + pub encrypted_point: SerializablePublic, +} + impl PersistentKeyStorage { /// Create new persistent document encryption keys storage pub fn new(config: &ServiceConfiguration) -> Result { @@ -45,41 +79,71 @@ impl PersistentKeyStorage { } impl KeyStorage for PersistentKeyStorage { - fn insert(&self, document: DocumentAddress, key: DocumentKey) -> Result<(), Error> { + fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error> { + let key: SerializableDocumentKeyShare = key.into(); + let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?; let mut batch = self.db.transaction(); batch.put(None, &document, &key); self.db.write(batch).map_err(Error::Database) } - fn get(&self, document: &DocumentAddress) -> Result { + fn get(&self, document: &DocumentAddress) -> Result { self.db.get(None, document) .map_err(Error::Database)? .ok_or(Error::DocumentNotFound) .map(|key| key.to_vec()) + .and_then(|key| serde_json::from_slice::(&key).map_err(|e| Error::Database(e.to_string()))) + .map(Into::into) + } +} + +impl From for SerializableDocumentKeyShare { + fn from(key: DocumentKeyShare) -> Self { + SerializableDocumentKeyShare { + threshold: key.threshold, + id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), + secret_share: key.secret_share.into(), + common_point: key.common_point.into(), + encrypted_point: key.encrypted_point.into(), + } + } +} + +impl From for DocumentKeyShare { + fn from(key: SerializableDocumentKeyShare) -> Self { + DocumentKeyShare { + threshold: key.threshold, + id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), + secret_share: key.secret_share.into(), + common_point: key.common_point.into(), + encrypted_point: key.encrypted_point.into(), + } } } #[cfg(test)] pub mod tests { - use std::collections::HashMap; + use std::collections::{BTreeMap, HashMap}; use parking_lot::RwLock; use devtools::RandomTempPath; - use super::super::types::all::{Error, ServiceConfiguration, DocumentAddress, DocumentKey}; - use super::{KeyStorage, PersistentKeyStorage}; + use ethkey::{Random, Generator}; + use super::super::types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, + DocumentAddress, EncryptionConfiguration}; + use super::{KeyStorage, PersistentKeyStorage, DocumentKeyShare}; #[derive(Default)] /// In-memory document encryption keys storage pub struct DummyKeyStorage { - keys: RwLock>, + keys: RwLock>, } impl KeyStorage for DummyKeyStorage { - fn insert(&self, document: DocumentAddress, key: DocumentKey) -> Result<(), Error> { + fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error> { self.keys.write().insert(document, key); Ok(()) } - fn get(&self, document: &DocumentAddress) -> Result { + fn get(&self, document: &DocumentAddress) -> Result { self.keys.read().get(document).cloned().ok_or(Error::DocumentNotFound) } } @@ -88,15 +152,46 @@ pub mod tests { fn persistent_key_storage() { let path = RandomTempPath::create_dir(); let config = ServiceConfiguration { - listener_addr: "0.0.0.0".to_owned(), - listener_port: 8082, + listener_address: NodeAddress { + address: "0.0.0.0".to_owned(), + port: 8082, + }, data_path: path.as_str().to_owned(), + cluster_config: ClusterConfiguration { + threads: 1, + self_private: (**Random.generate().unwrap().secret().clone()).into(), + listener_address: NodeAddress { + address: "0.0.0.0".to_owned(), + port: 8083, + }, + nodes: BTreeMap::new(), + allow_connecting_to_higher_nodes: false, + encryption_config: EncryptionConfiguration { + key_check_timeout_ms: 10, + }, + }, }; let key1 = DocumentAddress::from(1); - let value1: DocumentKey = vec![0x77, 0x88]; + let value1 = DocumentKeyShare { + threshold: 100, + id_numbers: vec![ + (Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()) + ].into_iter().collect(), + secret_share: Random.generate().unwrap().secret().clone(), + common_point: Random.generate().unwrap().public().clone(), + encrypted_point: Random.generate().unwrap().public().clone(), + }; let key2 = DocumentAddress::from(2); - let value2: DocumentKey = vec![0x11, 0x22]; + let value2 = DocumentKeyShare { + threshold: 200, + id_numbers: vec![ + (Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()) + ].into_iter().collect(), + secret_share: Random.generate().unwrap().secret().clone(), + common_point: Random.generate().unwrap().public().clone(), + encrypted_point: Random.generate().unwrap().public().clone(), + }; let key3 = DocumentAddress::from(3); let key_storage = PersistentKeyStorage::new(&config).unwrap(); diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 41d658963..bbb8474d4 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -14,10 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +extern crate byteorder; #[macro_use] extern crate log; +#[macro_use] +extern crate futures; +extern crate futures_cpupool; extern crate hyper; extern crate parking_lot; +extern crate rustc_serialize; +extern crate serde; +extern crate serde_json; +#[macro_use] +extern crate serde_derive; +extern crate tokio_core; +extern crate tokio_service; +extern crate tokio_proto; extern crate url; extern crate ethcore_devtools as devtools; @@ -38,16 +50,19 @@ mod acl_storage; mod http_listener; mod key_server; mod key_storage; +mod serialization; pub use types::all::{DocumentAddress, DocumentKey, DocumentEncryptedKey, RequestSignature, Public, - Error, ServiceConfiguration}; + Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, EncryptionConfiguration}; pub use traits::{KeyServer}; /// Start new key server instance pub fn start(config: ServiceConfiguration) -> Result, Error> { - let acl_storage = acl_storage::DummyAclStorage::default(); - let key_storage = key_storage::PersistentKeyStorage::new(&config)?; - let key_server = key_server::KeyServerImpl::new(acl_storage, key_storage); + use std::sync::Arc; + + let acl_storage = Arc::new(acl_storage::DummyAclStorage::default()); + let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); + let key_server = key_server::KeyServerImpl::new(&config.cluster_config, acl_storage, key_storage)?; let listener = http_listener::KeyServerHttpListener::start(config, key_server)?; Ok(Box::new(listener)) } diff --git a/secret_store/src/serialization.rs b/secret_store/src/serialization.rs new file mode 100644 index 000000000..0d0e904a7 --- /dev/null +++ b/secret_store/src/serialization.rs @@ -0,0 +1,260 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::fmt; +use std::cmp::{Ord, PartialOrd, Ordering}; +use std::ops::Deref; +use rustc_serialize::hex::ToHex; +use serde::{Serialize, Deserialize, Serializer, Deserializer}; +use serde::de::{Visitor, Error as SerdeError}; +use ethkey::{Public, Secret, Signature}; +use util::H256; + +#[derive(Clone, Debug)] +/// Serializable Signature. +pub struct SerializableSignature(Signature); + +impl From for SerializableSignature where Signature: From { + fn from(s: T) -> SerializableSignature { + SerializableSignature(s.into()) + } +} + +impl Into for SerializableSignature { + fn into(self) -> Signature { + self.0 + } +} + +impl Deref for SerializableSignature { + type Target = Signature; + + fn deref(&self) -> &Signature { + &self.0 + } +} + +impl Serialize for SerializableSignature { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(&(*self.0).to_hex()) + } +} + +impl Deserialize for SerializableSignature { + fn deserialize(deserializer: D) -> Result where D: Deserializer { + struct HashVisitor; + + impl Visitor for HashVisitor { + type Value = SerializableSignature; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded Signature") + } + + fn visit_str(self, value: &str) -> Result where E: SerdeError { + value.parse().map(|s| SerializableSignature(s)).map_err(SerdeError::custom) + } + + fn visit_string(self, value: String) -> Result where E: SerdeError { + self.visit_str(value.as_ref()) + } + } + + deserializer.deserialize(HashVisitor) + } +} + +#[derive(Clone, Debug)] +/// Serializable H256. +pub struct SerializableH256(H256); + +impl From for SerializableH256 where H256: From { + fn from(s: T) -> SerializableH256 { + SerializableH256(s.into()) + } +} + +impl Into for SerializableH256 { + fn into(self) -> H256 { + self.0 + } +} + +impl Deref for SerializableH256 { + type Target = H256; + + fn deref(&self) -> &H256 { + &self.0 + } +} + +impl Serialize for SerializableH256 { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(&(*self.0).to_hex()) + } +} + +impl Deserialize for SerializableH256 { + fn deserialize(deserializer: D) -> Result where D: Deserializer { + struct HashVisitor; + + impl Visitor for HashVisitor { + type Value = SerializableH256; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded H256") + } + + fn visit_str(self, value: &str) -> Result where E: SerdeError { + value.parse().map(|s| SerializableH256(s)).map_err(SerdeError::custom) + } + + fn visit_string(self, value: String) -> Result where E: SerdeError { + self.visit_str(value.as_ref()) + } + } + + deserializer.deserialize(HashVisitor) + } +} + +#[derive(Clone, Debug)] +/// Serializable EC scalar/secret key. +pub struct SerializableSecret(Secret); + +impl From for SerializableSecret where Secret: From { + fn from(s: T) -> SerializableSecret { + SerializableSecret(s.into()) + } +} + +impl Into for SerializableSecret { + fn into(self) -> Secret { + self.0 + } +} + +impl Deref for SerializableSecret { + type Target = Secret; + + fn deref(&self) -> &Secret { + &self.0 + } +} + +impl Serialize for SerializableSecret { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(&(*self.0).to_hex()) + } +} + +impl Deserialize for SerializableSecret { + fn deserialize(deserializer: D) -> Result where D: Deserializer { + struct HashVisitor; + + impl Visitor for HashVisitor { + type Value = SerializableSecret; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded EC scalar") + } + + fn visit_str(self, value: &str) -> Result where E: SerdeError { + value.parse().map(|s| SerializableSecret(s)).map_err(SerdeError::custom) + } + + fn visit_string(self, value: String) -> Result where E: SerdeError { + self.visit_str(value.as_ref()) + } + } + + deserializer.deserialize(HashVisitor) + } +} + +#[derive(Clone, Debug)] +/// Serializable EC point/public key. +pub struct SerializablePublic(Public); + +impl From for SerializablePublic where Public: From { + fn from(p: T) -> SerializablePublic { + SerializablePublic(p.into()) + } +} + +impl Into for SerializablePublic { + fn into(self) -> Public { + self.0 + } +} + +impl Deref for SerializablePublic { + type Target = Public; + + fn deref(&self) -> &Public { + &self.0 + } +} + +impl Eq for SerializablePublic { } + +impl PartialEq for SerializablePublic { + fn eq(&self, other: &SerializablePublic) -> bool { + self.0.eq(&other.0) + } +} + +impl Ord for SerializablePublic { + fn cmp(&self, other: &SerializablePublic) -> Ordering { + self.0.cmp(&other.0) + } +} + +impl PartialOrd for SerializablePublic { + fn partial_cmp(&self, other: &SerializablePublic) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Serialize for SerializablePublic { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(&(*self.0).to_hex()) + } +} + +impl Deserialize for SerializablePublic { + fn deserialize(deserializer: D) -> Result where D: Deserializer { + struct HashVisitor; + + impl Visitor for HashVisitor { + type Value = SerializablePublic; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded EC point") + } + + fn visit_str(self, value: &str) -> Result where E: SerdeError { + value.parse().map(|s| SerializablePublic(s)).map_err(SerdeError::custom) + } + + fn visit_string(self, value: String) -> Result where E: SerdeError { + self.visit_str(value.as_ref()) + } + } + + deserializer.deserialize(HashVisitor) + } +} diff --git a/secret_store/src/traits.rs b/secret_store/src/traits.rs index 9a68e9c4d..1a407e5c7 100644 --- a/secret_store/src/traits.rs +++ b/secret_store/src/traits.rs @@ -19,6 +19,8 @@ use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey} #[ipc(client_ident="RemoteKeyServer")] /// Secret store key server pub trait KeyServer: Send + Sync { + /// Generate encryption key for given document. + fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result; /// Request encryption key of given document for given requestor fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result; } diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index f318e6543..514b4eb6b 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -15,10 +15,14 @@ // along with Parity. If not, see . use std::fmt; +use std::collections::BTreeMap; use ethkey; use util; +use key_server_cluster; +/// Node id. +pub type NodeId = ethkey::Public; /// Document address type. pub type DocumentAddress = util::H256; /// Document key type. @@ -46,16 +50,53 @@ pub enum Error { Internal(String), } +#[derive(Debug)] +#[binary] +/// Secret store configuration +pub struct NodeAddress { + /// IP address. + pub address: String, + /// IP port. + pub port: u16, +} + #[derive(Debug)] #[binary] /// Secret store configuration pub struct ServiceConfiguration { - /// Interface to listen to - pub listener_addr: String, - /// Port to listen to - pub listener_port: u16, + /// HTTP listener address. + pub listener_address: NodeAddress, /// Data directory path for secret store pub data_path: String, + /// Cluster configuration. + pub cluster_config: ClusterConfiguration, +} + +#[derive(Debug)] +#[binary] +/// Key server cluster configuration +pub struct ClusterConfiguration { + /// Number of threads reserved by cluster. + pub threads: usize, + /// Private key this node holds. + pub self_private: Vec, // holds ethkey::Secret + /// This node address. + pub listener_address: NodeAddress, + /// All cluster nodes addresses. + pub nodes: BTreeMap, + /// Allow outbound connections to 'higher' nodes. + /// This is useful for tests, but slower a bit for production. + pub allow_connecting_to_higher_nodes: bool, + /// Encryption session configuration. + pub encryption_config: EncryptionConfiguration, +} + +#[derive(Clone, Debug)] +#[binary] +/// Encryption parameters. +pub struct EncryptionConfiguration { + /// Key check timeout. + pub key_check_timeout_ms: u64, } impl fmt::Display for Error { @@ -70,6 +111,18 @@ impl fmt::Display for Error { } } +impl From for Error { + fn from(err: ethkey::Error) -> Self { + Error::Internal(err.into()) + } +} + +impl From for Error { + fn from(err: key_server_cluster::Error) -> Self { + Error::Internal(err.into()) + } +} + impl Into for Error { fn into(self) -> String { format!("{}", self)