Merge branch 'master' into on-demand-priority
This commit is contained in:
commit
bbe0eb96f4
@ -14,13 +14,14 @@ cache:
|
||||
untracked: true
|
||||
linux-stable:
|
||||
stage: build
|
||||
image: ethcore/rust:stable
|
||||
image: parity/rust:gitlab-ci
|
||||
only:
|
||||
- beta
|
||||
- tags
|
||||
- stable
|
||||
- triggers
|
||||
script:
|
||||
- rustup default stable
|
||||
- cargo build -j $(nproc) --release --features final $CARGOFLAGS
|
||||
- cargo build -j $(nproc) --release -p evmbin
|
||||
- cargo build -j $(nproc) --release -p ethstore
|
||||
@ -105,13 +106,14 @@ linux-stable-debian:
|
||||
name: "stable-x86_64-unknown-debian-gnu_parity"
|
||||
linux-beta:
|
||||
stage: build
|
||||
image: ethcore/rust:beta
|
||||
image: parity/rust:gitlab-ci
|
||||
only:
|
||||
- beta
|
||||
- tags
|
||||
- stable
|
||||
- triggers
|
||||
script:
|
||||
- rustup default beta
|
||||
- cargo build -j $(nproc) --release $CARGOFLAGS
|
||||
- strip target/release/parity
|
||||
tags:
|
||||
@ -124,13 +126,14 @@ linux-beta:
|
||||
allow_failure: true
|
||||
linux-nightly:
|
||||
stage: build
|
||||
image: ethcore/rust:nightly
|
||||
image: parity/rust:gitlab-ci
|
||||
only:
|
||||
- beta
|
||||
- tags
|
||||
- stable
|
||||
- triggers
|
||||
script:
|
||||
- rustup default nightly
|
||||
- cargo build -j $(nproc) --release $CARGOFLAGS
|
||||
- strip target/release/parity
|
||||
tags:
|
||||
@ -544,11 +547,12 @@ test-windows:
|
||||
allow_failure: true
|
||||
test-rust-stable:
|
||||
stage: test
|
||||
image: ethcore/rust:stable
|
||||
image: parity/rust:gitlab-ci
|
||||
before_script:
|
||||
- git submodule update --init --recursive
|
||||
- export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^appveyor.yml -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l)
|
||||
script:
|
||||
- rustup show
|
||||
- export RUST_BACKTRACE=1
|
||||
- if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi
|
||||
tags:
|
||||
|
173
Cargo.lock
generated
173
Cargo.lock
generated
@ -30,7 +30,7 @@ dependencies = [
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-dapps 1.7.0",
|
||||
@ -350,7 +350,7 @@ name = "env_logger"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -382,7 +382,7 @@ dependencies = [
|
||||
name = "ethash"
|
||||
version = "1.7.0"
|
||||
dependencies = [
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sha3 0.1.0",
|
||||
@ -420,7 +420,7 @@ dependencies = [
|
||||
"itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lru-cache 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"native-contracts 0.1.0",
|
||||
"num 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -465,7 +465,7 @@ name = "ethcore-io"
|
||||
version = "1.7.0"
|
||||
dependencies = [
|
||||
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.1 (git+https://github.com/paritytech/mio)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -499,7 +499,7 @@ dependencies = [
|
||||
"ethcore-ipc 1.7.0",
|
||||
"ethcore-ipc-codegen 1.7.0",
|
||||
"ethcore-ipc-nano 1.7.0",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"nanomsg 0.5.1 (git+https://github.com/paritytech/nanomsg.rs.git?branch=parity-1.7)",
|
||||
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -511,7 +511,7 @@ version = "1.7.0"
|
||||
dependencies = [
|
||||
"ethcore-ipc 1.7.0",
|
||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"nanomsg 0.5.1 (git+https://github.com/paritytech/nanomsg.rs.git?branch=parity-1.7)",
|
||||
]
|
||||
|
||||
@ -524,7 +524,7 @@ dependencies = [
|
||||
"ethcore-ipc-codegen 1.7.0",
|
||||
"ethcore-ipc-nano 1.7.0",
|
||||
"ethcore-util 1.7.0",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"nanomsg 0.5.1 (git+https://github.com/paritytech/nanomsg.rs.git?branch=parity-1.7)",
|
||||
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -542,7 +542,7 @@ dependencies = [
|
||||
"ethcore-util 1.7.0",
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rlp 0.1.0",
|
||||
"smallvec 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -559,7 +559,7 @@ dependencies = [
|
||||
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -579,7 +579,7 @@ dependencies = [
|
||||
"ethkey 0.2.0",
|
||||
"igd 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.1 (git+https://github.com/paritytech/mio)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"path 0.1.0",
|
||||
@ -596,6 +596,7 @@ dependencies = [
|
||||
name = "ethcore-rpc"
|
||||
version = "1.7.0"
|
||||
dependencies = [
|
||||
"cid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethash 1.7.0",
|
||||
"ethcore 1.7.0",
|
||||
@ -617,11 +618,13 @@ dependencies = [
|
||||
"jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"multihash 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-reactor 0.1.0",
|
||||
"parity-updater 1.7.0",
|
||||
"rlp 0.1.0",
|
||||
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -649,14 +652,15 @@ dependencies = [
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"native-contracts 0.1.0",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -674,7 +678,7 @@ dependencies = [
|
||||
"ethcore-util 1.7.0",
|
||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-ui 1.7.0",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -698,9 +702,9 @@ dependencies = [
|
||||
"jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -719,7 +723,7 @@ dependencies = [
|
||||
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lru-cache 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -786,7 +790,7 @@ dependencies = [
|
||||
"ethkey 0.2.0",
|
||||
"itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-wordlist 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -817,7 +821,7 @@ dependencies = [
|
||||
"ethcore-util 1.7.0",
|
||||
"ethkey 0.2.0",
|
||||
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rlp 0.1.0",
|
||||
@ -857,7 +861,7 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"reqwest 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -922,7 +926,7 @@ dependencies = [
|
||||
"ethkey 0.2.0",
|
||||
"hidapi 0.3.1 (git+https://github.com/paritytech/hidapi-rs)",
|
||||
"libusb 0.3.0 (git+https://github.com/paritytech/libusb-rs)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -964,7 +968,7 @@ dependencies = [
|
||||
"cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rotor 0.6.3 (git+https://github.com/paritytech/rotor)",
|
||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -982,7 +986,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1079,7 +1083,7 @@ version = "7.0.0"
|
||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3"
|
||||
dependencies = [
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1093,7 +1097,7 @@ dependencies = [
|
||||
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
|
||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -1105,7 +1109,7 @@ source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c0
|
||||
dependencies = [
|
||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-tokio-ipc 0.1.0 (git+https://github.com/nikvolf/parity-tokio-ipc)",
|
||||
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -1127,7 +1131,7 @@ source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c0
|
||||
dependencies = [
|
||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)",
|
||||
"tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)",
|
||||
@ -1140,7 +1144,7 @@ version = "7.0.0"
|
||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3"
|
||||
dependencies = [
|
||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -1150,8 +1154,8 @@ version = "7.0.0"
|
||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3"
|
||||
dependencies = [
|
||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -1162,7 +1166,7 @@ source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c0
|
||||
dependencies = [
|
||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1233,7 +1237,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.3.6"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
@ -1262,7 +1266,7 @@ name = "mime"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -1293,9 +1297,9 @@ dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.27 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1303,16 +1307,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.6.2"
|
||||
version = "0.6.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.27 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -1324,8 +1328,8 @@ source = "git+https://github.com/alexcrichton/mio-named-pipes#903dc2f7eac6700c62
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazycell 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -1336,7 +1340,7 @@ version = "0.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -1345,7 +1349,7 @@ version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.27 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -1356,7 +1360,7 @@ version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.27 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -1439,7 +1443,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "net2"
|
||||
version = "0.2.23"
|
||||
version = "0.2.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1620,7 +1624,7 @@ dependencies = [
|
||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1660,7 +1664,7 @@ dependencies = [
|
||||
"ethcore-util 1.7.0",
|
||||
"fetch 0.1.0",
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-reactor 0.1.0",
|
||||
@ -1689,7 +1693,7 @@ dependencies = [
|
||||
"ethcore-io 1.7.0",
|
||||
"ethcore-util 1.7.0",
|
||||
"ethkey 0.2.0",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rlp 0.1.0",
|
||||
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1701,7 +1705,7 @@ name = "parity-reactor"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -1713,7 +1717,7 @@ dependencies = [
|
||||
"ethcore-util 1.7.0",
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1728,11 +1732,11 @@ version = "0.1.0"
|
||||
source = "git+https://github.com/nikvolf/parity-tokio-ipc#3d4234de6bdc78688ef803935111003080fd5375"
|
||||
dependencies = [
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)",
|
||||
"miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)",
|
||||
"tokio-named-pipes 0.1.0 (git+https://github.com/alexcrichton/tokio-named-pipes)",
|
||||
"tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1757,7 +1761,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "parity-ui-precompiled"
|
||||
version = "1.4.0"
|
||||
source = "git+https://github.com/paritytech/js-precompiled.git#9bfc6f3dfca2c337c53084bedcc65c2b526927a1"
|
||||
source = "git+https://github.com/paritytech/js-precompiled.git#b4c41885c6e02c64fb773546b2f135f56ea7022f"
|
||||
dependencies = [
|
||||
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -1773,7 +1777,7 @@ dependencies = [
|
||||
"ethcore-util 1.7.0",
|
||||
"ethsync 1.7.0",
|
||||
"ipc-common-types 1.7.0",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-hash-fetch 1.7.0",
|
||||
"parity-reactor 0.1.0",
|
||||
"path 0.1.0",
|
||||
@ -1992,7 +1996,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hyper-native-tls 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_urlencoded 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2043,7 +2047,7 @@ name = "rotor"
|
||||
version = "0.6.3"
|
||||
source = "git+https://github.com/paritytech/rotor#2a3764a830174aa94405593be550e8fc7ecea25a"
|
||||
dependencies = [
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.1 (git+https://github.com/paritytech/mio)",
|
||||
"quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2309,7 +2313,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
name = "stats"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -2361,7 +2365,7 @@ version = "0.58.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syntex_errors 0.58.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syntex_pos 0.58.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2442,14 +2446,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "tokio-core"
|
||||
version = "0.1.4"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bytes 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -2459,7 +2466,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bytes 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -2468,7 +2475,7 @@ version = "0.1.0"
|
||||
source = "git+https://github.com/tokio-rs/tokio-line#482614ae0c82daf584727ae65a80d854fe861f81"
|
||||
dependencies = [
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -2480,10 +2487,10 @@ source = "git+https://github.com/tomusdrw/tokio-minihttp#8acbafae3e77e7f7eb516b4
|
||||
dependencies = [
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.27 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)",
|
||||
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -2495,7 +2502,7 @@ source = "git+https://github.com/alexcrichton/tokio-named-pipes#3a22f8fc9a441b54
|
||||
dependencies = [
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)",
|
||||
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -2504,13 +2511,13 @@ version = "0.1.0"
|
||||
source = "git+https://github.com/tomusdrw/tokio-proto#f6ee08cb594fa2fc1b4178eaaca0855d66e68fd3"
|
||||
dependencies = [
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.27 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -2520,13 +2527,13 @@ version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.27 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -2544,10 +2551,10 @@ version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio-uds 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -2695,7 +2702,7 @@ source = "git+https://github.com/paritytech/ws-rs.git?branch=parity-1.7#30415c17
|
||||
dependencies = [
|
||||
"bytes 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.1 (git+https://github.com/paritytech/mio)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2827,7 +2834,7 @@ dependencies = [
|
||||
"checksum libusb-sys 0.2.3 (git+https://github.com/paritytech/libusb-sys)" = "<none>"
|
||||
"checksum linked-hash-map 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bda158e0dabeb97ee8a401f4d17e479d6b891a14de0bba79d5cc2d4d325b5e48"
|
||||
"checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd"
|
||||
"checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054"
|
||||
"checksum log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "5141eca02775a762cc6cd564d8d2c50f67c0ea3a372cbf1c51592b3e029e10ad"
|
||||
"checksum lru-cache 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "656fa4dfcb02bcf1063c592ba3ff6a5303ee1f2afe98c8a889e8b1a77c6dfdb7"
|
||||
"checksum matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "15305656809ce5a4805b1ff2946892810992197ce1270ff79baded852187942e"
|
||||
"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4"
|
||||
@ -2835,7 +2842,7 @@ dependencies = [
|
||||
"checksum mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e50bf542f81754ef69e5cea856946a3819f7c09ea97b4903c8bc8a89f74e7b6"
|
||||
"checksum miniz-sys 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "9d1f4d337a01c32e1f2122510fed46393d53ca35a7f429cb0450abaedfa3ed54"
|
||||
"checksum mio 0.6.1 (git+https://github.com/paritytech/mio)" = "<none>"
|
||||
"checksum mio 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5b493dc9fd96bd2077f2117f178172b0765db4dfda3ea4d8000401e6d65d3e80"
|
||||
"checksum mio 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f27d38f824a0d267d55b29b171e9e99269a53812e385fa75c1fe700ae254a6a4"
|
||||
"checksum mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)" = "<none>"
|
||||
"checksum mio-uds 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "78437f00d9615c366932cbfe79790b5c2945706ba67cf78378ffacc0069ed9de"
|
||||
"checksum miow 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3e690c5df6b2f60acd45d56378981e827ff8295562fc8d34f573deb267a59cd1"
|
||||
@ -2846,7 +2853,7 @@ dependencies = [
|
||||
"checksum nanomsg 0.5.1 (git+https://github.com/paritytech/nanomsg.rs.git?branch=parity-1.7)" = "<none>"
|
||||
"checksum nanomsg-sys 0.5.0 (git+https://github.com/paritytech/nanomsg.rs.git?branch=parity-1.7)" = "<none>"
|
||||
"checksum native-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aa4e52995154bb6f0b41e4379a279482c9387c1632e3798ba4e511ef8c54ee09"
|
||||
"checksum net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)" = "6a816012ca11cb47009693c1e0c6130e26d39e4d97ee2a13c50e868ec83e3204"
|
||||
"checksum net2 0.2.27 (registry+https://github.com/rust-lang/crates.io-index)" = "18b9642ad6222faf5ce46f6966f59b71b9775ad5758c9e09fcf0a6c8061972b4"
|
||||
"checksum nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a0d95c5fa8b641c10ad0b8887454ebaafa3c92b5cd5350f8fc693adafd178e7b"
|
||||
"checksum nodrop 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "52cd74cd09beba596430cc6e3091b74007169a56246e1262f0ba451ea95117b2"
|
||||
"checksum nom 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6caab12c5f97aa316cb249725aa32115118e1522b445e26c257dd77cad5ffd4e"
|
||||
@ -2944,7 +2951,7 @@ dependencies = [
|
||||
"checksum thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c85048c6260d17cf486ceae3282d9fb6b90be220bf5b28c400f5485ffc29f0c7"
|
||||
"checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af"
|
||||
"checksum tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7aef43048292ca0bae4ab32180e85f6202cf2816c2a210c396a84b99dab9270"
|
||||
"checksum tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3d1be481b55126f02ef88ff86748086473cb537a949fc4a8f4be403a530ae54b"
|
||||
"checksum tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "99e958104a67877907c1454386d5482fe8e965a55d60be834a15a44328e7dc76"
|
||||
"checksum tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6a278fde45f1be68e44995227d426aaa4841e0980bb0a21b981092f28c3c8473"
|
||||
"checksum tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)" = "<none>"
|
||||
"checksum tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)" = "<none>"
|
||||
|
@ -14,6 +14,8 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use unicase::UniCase;
|
||||
use hyper::{server, net, Decoder, Encoder, Next, Control};
|
||||
use hyper::header;
|
||||
@ -28,16 +30,16 @@ use endpoint::{Endpoint, Endpoints, Handler, EndpointPath};
|
||||
use jsonrpc_http_server::{self, AccessControlAllowOrigin};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RestApi<F> {
|
||||
pub struct RestApi {
|
||||
// TODO [ToDr] cors_domains should be handled by the server to avoid duplicated logic.
|
||||
// RequestMiddleware should be able to tell that cors headers should be included.
|
||||
cors_domains: Option<Vec<AccessControlAllowOrigin>>,
|
||||
apps: Vec<App>,
|
||||
fetcher: F,
|
||||
fetcher: Arc<Fetcher>,
|
||||
}
|
||||
|
||||
impl<F: Fetcher + Clone> RestApi<F> {
|
||||
pub fn new(cors_domains: Vec<AccessControlAllowOrigin>, endpoints: &Endpoints, fetcher: F) -> Box<Endpoint> {
|
||||
impl RestApi {
|
||||
pub fn new(cors_domains: Vec<AccessControlAllowOrigin>, endpoints: &Endpoints, fetcher: Arc<Fetcher>) -> Box<Endpoint> {
|
||||
Box::new(RestApi {
|
||||
cors_domains: Some(cors_domains),
|
||||
apps: Self::list_apps(endpoints),
|
||||
@ -52,22 +54,22 @@ impl<F: Fetcher + Clone> RestApi<F> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Fetcher + Clone> Endpoint for RestApi<F> {
|
||||
impl Endpoint for RestApi {
|
||||
fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box<Handler> {
|
||||
Box::new(RestApiRouter::new((*self).clone(), path, control))
|
||||
}
|
||||
}
|
||||
|
||||
struct RestApiRouter<F> {
|
||||
api: RestApi<F>,
|
||||
struct RestApiRouter {
|
||||
api: RestApi,
|
||||
cors_header: Option<header::AccessControlAllowOrigin>,
|
||||
path: Option<EndpointPath>,
|
||||
control: Option<Control>,
|
||||
handler: Box<Handler>,
|
||||
}
|
||||
|
||||
impl<F: Fetcher> RestApiRouter<F> {
|
||||
fn new(api: RestApi<F>, path: EndpointPath, control: Control) -> Self {
|
||||
impl RestApiRouter {
|
||||
fn new(api: RestApi, path: EndpointPath, control: Control) -> Self {
|
||||
RestApiRouter {
|
||||
path: Some(path),
|
||||
cors_header: None,
|
||||
@ -82,6 +84,7 @@ impl<F: Fetcher> RestApiRouter<F> {
|
||||
}
|
||||
|
||||
fn resolve_content(&self, hash: Option<&str>, path: EndpointPath, control: Control) -> Option<Box<Handler>> {
|
||||
trace!(target: "dapps", "Resolving content: {:?} from path: {:?}", hash, path);
|
||||
match hash {
|
||||
Some(hash) if self.api.fetcher.contains(hash) => {
|
||||
Some(self.api.fetcher.to_async_handler(path, control))
|
||||
@ -114,8 +117,7 @@ impl<F: Fetcher> RestApiRouter<F> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Fetcher> server::Handler<net::HttpStream> for RestApiRouter<F> {
|
||||
|
||||
impl server::Handler<net::HttpStream> for RestApiRouter {
|
||||
fn on_request(&mut self, request: server::Request<net::HttpStream>) -> Next {
|
||||
self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains).into();
|
||||
|
||||
@ -168,5 +170,4 @@ impl<F: Fetcher> server::Handler<net::HttpStream> for RestApiRouter<F> {
|
||||
fn on_response_writable(&mut self, encoder: &mut Encoder<net::HttpStream>) -> Next {
|
||||
self.handler.on_response_writable(encoder)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -47,8 +47,7 @@ pub trait Fetcher: Send + Sync + 'static {
|
||||
fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box<Handler>;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ContentFetcher<F: Fetch + Clone = FetchClient, R: URLHint + Clone + 'static = URLHintContract> {
|
||||
pub struct ContentFetcher<F: Fetch = FetchClient, R: URLHint + 'static = URLHintContract> {
|
||||
dapps_path: PathBuf,
|
||||
resolver: R,
|
||||
cache: Arc<Mutex<ContentCache>>,
|
||||
@ -58,14 +57,14 @@ pub struct ContentFetcher<F: Fetch + Clone = FetchClient, R: URLHint + Clone + '
|
||||
fetch: F,
|
||||
}
|
||||
|
||||
impl<R: URLHint + Clone + 'static, F: Fetch + Clone> Drop for ContentFetcher<F, R> {
|
||||
impl<R: URLHint + 'static, F: Fetch> Drop for ContentFetcher<F, R> {
|
||||
fn drop(&mut self) {
|
||||
// Clear cache path
|
||||
let _ = fs::remove_dir_all(&self.dapps_path);
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: URLHint + Clone + 'static, F: Fetch + Clone> ContentFetcher<F, R> {
|
||||
impl<R: URLHint + 'static, F: Fetch> ContentFetcher<F, R> {
|
||||
|
||||
pub fn new(resolver: R, sync_status: Arc<SyncStatus>, embeddable_on: Option<(String, u16)>, remote: Remote, fetch: F) -> Self {
|
||||
let mut dapps_path = env::temp_dir();
|
||||
@ -98,7 +97,7 @@ impl<R: URLHint + Clone + 'static, F: Fetch + Clone> ContentFetcher<F, R> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: URLHint + Clone + 'static, F: Fetch + Clone> Fetcher for ContentFetcher<F, R> {
|
||||
impl<R: URLHint + 'static, F: Fetch> Fetcher for ContentFetcher<F, R> {
|
||||
fn contains(&self, content_id: &str) -> bool {
|
||||
{
|
||||
let mut cache = self.cache.lock();
|
||||
|
@ -98,13 +98,13 @@ impl<F> WebProxyTokens for F where F: Fn(String) -> bool + Send + Sync {
|
||||
}
|
||||
|
||||
/// Dapps server as `jsonrpc-http-server` request middleware.
|
||||
pub struct Middleware<F: Fetch + Clone> {
|
||||
router: router::Router<apps::fetcher::ContentFetcher<F>>,
|
||||
pub struct Middleware {
|
||||
router: router::Router,
|
||||
}
|
||||
|
||||
impl<F: Fetch + Clone> Middleware<F> {
|
||||
impl Middleware {
|
||||
/// Creates new Dapps server middleware.
|
||||
pub fn new(
|
||||
pub fn new<F: Fetch + Clone>(
|
||||
remote: Remote,
|
||||
signer_address: Option<(String, u16)>,
|
||||
dapps_path: PathBuf,
|
||||
@ -114,13 +114,13 @@ impl<F: Fetch + Clone> Middleware<F> {
|
||||
web_proxy_tokens: Arc<WebProxyTokens>,
|
||||
fetch: F,
|
||||
) -> Self {
|
||||
let content_fetcher = apps::fetcher::ContentFetcher::new(
|
||||
let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(
|
||||
hash_fetch::urlhint::URLHintContract::new(registrar),
|
||||
sync_status,
|
||||
signer_address.clone(),
|
||||
remote.clone(),
|
||||
fetch.clone(),
|
||||
);
|
||||
));
|
||||
let endpoints = apps::all_endpoints(
|
||||
dapps_path,
|
||||
extra_dapps,
|
||||
@ -138,7 +138,11 @@ impl<F: Fetch + Clone> Middleware<F> {
|
||||
special.insert(router::SpecialEndpoint::Utils, Some(apps::utils()));
|
||||
special.insert(
|
||||
router::SpecialEndpoint::Api,
|
||||
Some(api::RestApi::new(cors_domains.clone(), &endpoints, content_fetcher.clone())),
|
||||
Some(api::RestApi::new(
|
||||
cors_domains.clone(),
|
||||
&endpoints,
|
||||
content_fetcher.clone()
|
||||
)),
|
||||
);
|
||||
special
|
||||
};
|
||||
@ -156,7 +160,7 @@ impl<F: Fetch + Clone> Middleware<F> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Fetch + Clone> http::RequestMiddleware for Middleware<F> {
|
||||
impl http::RequestMiddleware for Middleware {
|
||||
fn on_request(&self, req: &hyper::server::Request<hyper::net::HttpStream>, control: &hyper::Control) -> http::RequestMiddlewareAction {
|
||||
self.router.on_request(req, control)
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
use address;
|
||||
use std::cmp;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use url::{Url, Host};
|
||||
@ -40,14 +41,14 @@ pub enum SpecialEndpoint {
|
||||
None,
|
||||
}
|
||||
|
||||
pub struct Router<F> {
|
||||
pub struct Router {
|
||||
signer_address: Option<(String, u16)>,
|
||||
endpoints: Endpoints,
|
||||
fetch: F,
|
||||
fetch: Arc<Fetcher>,
|
||||
special: HashMap<SpecialEndpoint, Option<Box<Endpoint>>>,
|
||||
}
|
||||
|
||||
impl<F: Fetcher + 'static> http::RequestMiddleware for Router<F> {
|
||||
impl http::RequestMiddleware for Router {
|
||||
fn on_request(&self, req: &server::Request<HttpStream>, control: &Control) -> http::RequestMiddlewareAction {
|
||||
// Choose proper handler depending on path / domain
|
||||
let url = handlers::extract_url(req);
|
||||
@ -146,10 +147,10 @@ impl<F: Fetcher + 'static> http::RequestMiddleware for Router<F> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> Router<F> {
|
||||
impl Router {
|
||||
pub fn new(
|
||||
signer_address: Option<(String, u16)>,
|
||||
content_fetcher: F,
|
||||
content_fetcher: Arc<Fetcher>,
|
||||
endpoints: Endpoints,
|
||||
special: HashMap<SpecialEndpoint, Option<Box<Endpoint>>>,
|
||||
) -> Self {
|
||||
|
@ -23,11 +23,10 @@ use snapshot::Error;
|
||||
use util::{U256, H256, Bytes, HashDB, SHA3_EMPTY, SHA3_NULL_RLP};
|
||||
use util::trie::{TrieDB, Trie};
|
||||
use rlp::{RlpStream, UntrustedRlp};
|
||||
use itertools::Itertools;
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
// An empty account -- these are replaced with RLP null data for a space optimization.
|
||||
// An empty account -- these were replaced with RLP null data for a space optimization in v1.
|
||||
const ACC_EMPTY: BasicAccount = BasicAccount {
|
||||
nonce: U256([0, 0, 0, 0]),
|
||||
balance: U256([0, 0, 0, 0]),
|
||||
@ -62,28 +61,19 @@ impl CodeState {
|
||||
}
|
||||
|
||||
// walk the account's storage trie, returning a vector of RLP items containing the
|
||||
// account properties and the storage. Each item contains at most `max_storage_items`
|
||||
// account address hash, account properties and the storage. Each item contains at most `max_storage_items`
|
||||
// storage records split according to snapshot format definition.
|
||||
pub fn to_fat_rlps(acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet<H256>, max_storage_items: usize) -> Result<Vec<Bytes>, Error> {
|
||||
if acc == &ACC_EMPTY {
|
||||
return Ok(vec![::rlp::NULL_RLP.to_vec()]);
|
||||
}
|
||||
|
||||
pub fn to_fat_rlps(account_hash: &H256, acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet<H256>, first_chunk_size: usize, max_chunk_size: usize) -> Result<Vec<Bytes>, Error> {
|
||||
let db = TrieDB::new(acct_db, &acc.storage_root)?;
|
||||
let mut chunks = Vec::new();
|
||||
let mut db_iter = db.iter()?;
|
||||
let mut target_chunk_size = first_chunk_size;
|
||||
let mut account_stream = RlpStream::new_list(2);
|
||||
let mut leftover: Option<Vec<u8>> = None;
|
||||
loop {
|
||||
account_stream.append(account_hash);
|
||||
account_stream.begin_list(5);
|
||||
|
||||
let chunks = db.iter()?.chunks(max_storage_items);
|
||||
let pair_chunks = chunks.into_iter().map(|chunk| chunk.collect());
|
||||
pair_chunks.pad_using(1, |_| Vec::new(), ).map(|pairs| {
|
||||
let mut stream = RlpStream::new_list(pairs.len());
|
||||
|
||||
for r in pairs {
|
||||
let (k, v) = r?;
|
||||
stream.begin_list(2).append(&k).append(&&*v);
|
||||
}
|
||||
|
||||
let pairs_rlp = stream.out();
|
||||
|
||||
let mut account_stream = RlpStream::new_list(5);
|
||||
account_stream.append(&acc.nonce)
|
||||
.append(&acc.balance);
|
||||
|
||||
@ -105,9 +95,49 @@ pub fn to_fat_rlps(acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut Hash
|
||||
}
|
||||
}
|
||||
|
||||
account_stream.append_raw(&pairs_rlp, 1);
|
||||
Ok(account_stream.out())
|
||||
}).collect()
|
||||
account_stream.begin_unbounded_list();
|
||||
if account_stream.len() > target_chunk_size {
|
||||
// account does not fit, push an empty record to mark a new chunk
|
||||
target_chunk_size = max_chunk_size;
|
||||
chunks.push(Vec::new());
|
||||
}
|
||||
|
||||
if let Some(pair) = leftover.take() {
|
||||
if !account_stream.append_raw_checked(&pair, 1, target_chunk_size) {
|
||||
return Err(Error::ChunkTooSmall);
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
match db_iter.next() {
|
||||
Some(Ok((k, v))) => {
|
||||
let pair = {
|
||||
let mut stream = RlpStream::new_list(2);
|
||||
stream.append(&k).append(&&*v);
|
||||
stream.drain()
|
||||
};
|
||||
if !account_stream.append_raw_checked(&pair, 1, target_chunk_size) {
|
||||
account_stream.complete_unbounded_list();
|
||||
let stream = ::std::mem::replace(&mut account_stream, RlpStream::new_list(2));
|
||||
chunks.push(stream.out());
|
||||
target_chunk_size = max_chunk_size;
|
||||
leftover = Some(pair.to_vec());
|
||||
break;
|
||||
}
|
||||
},
|
||||
Some(Err(e)) => {
|
||||
return Err(e.into());
|
||||
},
|
||||
None => {
|
||||
account_stream.complete_unbounded_list();
|
||||
let stream = ::std::mem::replace(&mut account_stream, RlpStream::new_list(2));
|
||||
chunks.push(stream.out());
|
||||
return Ok(chunks);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// decode a fat rlp, and rebuild the storage trie as we go.
|
||||
@ -181,7 +211,7 @@ mod tests {
|
||||
use snapshot::tests::helpers::fill_storage;
|
||||
|
||||
use util::sha3::{SHA3_EMPTY, SHA3_NULL_RLP};
|
||||
use util::{Address, H256, HashDB, DBValue};
|
||||
use util::{Address, H256, HashDB, DBValue, Hashable};
|
||||
use rlp::UntrustedRlp;
|
||||
|
||||
use std::collections::HashSet;
|
||||
@ -203,8 +233,8 @@ mod tests {
|
||||
let thin_rlp = ::rlp::encode(&account);
|
||||
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp), account);
|
||||
|
||||
let fat_rlps = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value()).unwrap();
|
||||
let fat_rlp = UntrustedRlp::new(&fat_rlps[0]);
|
||||
let fat_rlps = to_fat_rlps(&addr.sha3(), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap();
|
||||
let fat_rlp = UntrustedRlp::new(&fat_rlps[0]).at(1).unwrap();
|
||||
assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account);
|
||||
}
|
||||
|
||||
@ -228,8 +258,8 @@ mod tests {
|
||||
let thin_rlp = ::rlp::encode(&account);
|
||||
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp), account);
|
||||
|
||||
let fat_rlp = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value()).unwrap();
|
||||
let fat_rlp = UntrustedRlp::new(&fat_rlp[0]);
|
||||
let fat_rlp = to_fat_rlps(&addr.sha3(), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap();
|
||||
let fat_rlp = UntrustedRlp::new(&fat_rlp[0]).at(1).unwrap();
|
||||
assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account);
|
||||
}
|
||||
|
||||
@ -253,11 +283,11 @@ mod tests {
|
||||
let thin_rlp = ::rlp::encode(&account);
|
||||
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp), account);
|
||||
|
||||
let fat_rlps = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 100).unwrap();
|
||||
let fat_rlps = to_fat_rlps(&addr.sha3(), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 500, 1000).unwrap();
|
||||
let mut root = SHA3_NULL_RLP;
|
||||
let mut restored_account = None;
|
||||
for rlp in fat_rlps {
|
||||
let fat_rlp = UntrustedRlp::new(&rlp);
|
||||
let fat_rlp = UntrustedRlp::new(&rlp).at(1).unwrap();
|
||||
restored_account = Some(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, root).unwrap().0);
|
||||
root = restored_account.as_ref().unwrap().storage_root.clone();
|
||||
}
|
||||
@ -297,12 +327,12 @@ mod tests {
|
||||
|
||||
let mut used_code = HashSet::new();
|
||||
|
||||
let fat_rlp1 = to_fat_rlps(&account1, &AccountDB::new(db.as_hashdb(), &addr1), &mut used_code, usize::max_value()).unwrap();
|
||||
let fat_rlp2 = to_fat_rlps(&account2, &AccountDB::new(db.as_hashdb(), &addr2), &mut used_code, usize::max_value()).unwrap();
|
||||
let fat_rlp1 = to_fat_rlps(&addr1.sha3(), &account1, &AccountDB::new(db.as_hashdb(), &addr1), &mut used_code, usize::max_value(), usize::max_value()).unwrap();
|
||||
let fat_rlp2 = to_fat_rlps(&addr2.sha3(), &account2, &AccountDB::new(db.as_hashdb(), &addr2), &mut used_code, usize::max_value(), usize::max_value()).unwrap();
|
||||
assert_eq!(used_code.len(), 1);
|
||||
|
||||
let fat_rlp1 = UntrustedRlp::new(&fat_rlp1[0]);
|
||||
let fat_rlp2 = UntrustedRlp::new(&fat_rlp2[0]);
|
||||
let fat_rlp1 = UntrustedRlp::new(&fat_rlp1[0]).at(1).unwrap();
|
||||
let fat_rlp2 = UntrustedRlp::new(&fat_rlp2[0]).at(1).unwrap();
|
||||
|
||||
let (acc, maybe_code) = from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr2), fat_rlp2, H256::zero()).unwrap();
|
||||
assert!(maybe_code.is_none());
|
||||
@ -316,9 +346,6 @@ mod tests {
|
||||
#[test]
|
||||
fn encoding_empty_acc() {
|
||||
let mut db = get_temp_state_db();
|
||||
let mut used_code = HashSet::new();
|
||||
|
||||
assert_eq!(to_fat_rlps(&ACC_EMPTY, &AccountDB::new(db.as_hashdb(), &Address::default()), &mut used_code, usize::max_value()).unwrap(), vec![::rlp::NULL_RLP.to_vec()]);
|
||||
assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &Address::default()), UntrustedRlp::new(&::rlp::NULL_RLP), H256::zero()).unwrap(), (ACC_EMPTY, None));
|
||||
}
|
||||
}
|
||||
|
@ -55,6 +55,8 @@ pub enum Error {
|
||||
Io(::std::io::Error),
|
||||
/// Snapshot version is not supported.
|
||||
VersionNotSupported(u64),
|
||||
/// Max chunk size is to small to fit basic account data.
|
||||
ChunkTooSmall,
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
@ -76,6 +78,7 @@ impl fmt::Display for Error {
|
||||
Error::Decoder(ref err) => err.fmt(f),
|
||||
Error::Trie(ref err) => err.fmt(f),
|
||||
Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver),
|
||||
Error::ChunkTooSmall => write!(f, "Chunk size is too small."),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -83,9 +83,6 @@ mod traits {
|
||||
// Try to have chunks be around 4MB (before compression)
|
||||
const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024;
|
||||
|
||||
// Try to have chunks be around 4MB (before compression)
|
||||
const MAX_STORAGE_ENTRIES_PER_ACCOUNT_RECORD: usize = 80_000;
|
||||
|
||||
// How many blocks to include in a snapshot, starting from the head of the chain.
|
||||
const SNAPSHOT_BLOCKS: u64 = 30000;
|
||||
|
||||
@ -305,20 +302,9 @@ impl<'a> StateChunker<'a> {
|
||||
//
|
||||
// If the buffer is greater than the desired chunk size,
|
||||
// this will write out the data to disk.
|
||||
fn push(&mut self, account_hash: Bytes, data: Bytes, force_chunk: bool) -> Result<(), Error> {
|
||||
let pair = {
|
||||
let mut stream = RlpStream::new_list(2);
|
||||
stream.append(&account_hash).append_raw(&data, 1);
|
||||
stream.out()
|
||||
};
|
||||
|
||||
if force_chunk || self.cur_size + pair.len() >= PREFERRED_CHUNK_SIZE {
|
||||
self.write_chunk()?;
|
||||
}
|
||||
|
||||
self.cur_size += pair.len();
|
||||
self.rlps.push(pair);
|
||||
|
||||
fn push(&mut self, data: Bytes) -> Result<(), Error> {
|
||||
self.cur_size += data.len();
|
||||
self.rlps.push(data);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -348,6 +334,11 @@ impl<'a> StateChunker<'a> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Get current chunk size.
|
||||
fn chunk_size(&self) -> usize {
|
||||
self.cur_size
|
||||
}
|
||||
}
|
||||
|
||||
/// Walk the given state database starting from the given root,
|
||||
@ -377,9 +368,12 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter +
|
||||
|
||||
let account_db = AccountDB::from_hash(db, account_key_hash);
|
||||
|
||||
let fat_rlps = account::to_fat_rlps(&account, &account_db, &mut used_code, MAX_STORAGE_ENTRIES_PER_ACCOUNT_RECORD)?;
|
||||
let fat_rlps = account::to_fat_rlps(&account_key_hash, &account, &account_db, &mut used_code, PREFERRED_CHUNK_SIZE - chunker.chunk_size(), PREFERRED_CHUNK_SIZE)?;
|
||||
for (i, fat_rlp) in fat_rlps.into_iter().enumerate() {
|
||||
chunker.push(account_key.clone(), fat_rlp, i > 0)?;
|
||||
if i > 0 {
|
||||
chunker.write_chunk()?;
|
||||
}
|
||||
chunker.push(fat_rlp)?;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -122,10 +122,9 @@ fn get_code_from_prev_chunk() {
|
||||
let mut db = MemoryDB::new();
|
||||
AccountDBMut::from_hash(&mut db, hash).insert(&code[..]);
|
||||
|
||||
let fat_rlp = account::to_fat_rlps(&acc, &AccountDB::from_hash(&db, hash), &mut used_code, usize::max_value()).unwrap();
|
||||
|
||||
let fat_rlp = account::to_fat_rlps(&hash, &acc, &AccountDB::from_hash(&db, hash), &mut used_code, usize::max_value(), usize::max_value()).unwrap();
|
||||
let mut stream = RlpStream::new_list(1);
|
||||
stream.begin_list(2).append(&hash).append_raw(&fat_rlp[0], 1);
|
||||
stream.append_raw(&fat_rlp[0], 1);
|
||||
stream.out()
|
||||
};
|
||||
|
||||
|
@ -214,10 +214,10 @@ mod derivation {
|
||||
use rcrypto::sha2::Sha512;
|
||||
use bigint::hash::{H512, H256};
|
||||
use bigint::prelude::{U256, U512, Uint};
|
||||
use secp256k1;
|
||||
use secp256k1::key::{SecretKey, PublicKey};
|
||||
use SECP256K1;
|
||||
use keccak;
|
||||
use math::curve_order;
|
||||
use super::{Label, Derivation};
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -233,7 +233,7 @@ mod derivation {
|
||||
// For hardened derivation, pass u32 index at least 2^31 or custom Derivation::Hard(T) enum
|
||||
//
|
||||
// Can panic if passed `private_key` is not a valid secp256k1 private key
|
||||
// (outside of (0..curve_n()]) field
|
||||
// (outside of (0..curve_order()]) field
|
||||
pub fn private<T>(private_key: H256, chain_code: H256, index: Derivation<T>) -> (H256, H256) where T: Label {
|
||||
match index {
|
||||
Derivation::Soft(index) => private_soft(private_key, chain_code, index),
|
||||
@ -260,7 +260,7 @@ mod derivation {
|
||||
}
|
||||
|
||||
// Can panic if passed `private_key` is not a valid secp256k1 private key
|
||||
// (outside of (0..curve_n()]) field
|
||||
// (outside of (0..curve_order()]) field
|
||||
fn private_soft<T>(private_key: H256, chain_code: H256, index: T) -> (H256, H256) where T: Label {
|
||||
let mut data = vec![0u8; 33 + T::len()];
|
||||
|
||||
@ -295,7 +295,7 @@ mod derivation {
|
||||
|
||||
fn private_add(k1: U256, k2: U256) -> U256 {
|
||||
let sum = U512::from(k1) + U512::from(k2);
|
||||
modulo(sum, curve_n())
|
||||
modulo(sum, curve_order())
|
||||
}
|
||||
|
||||
// todo: surely can be optimized
|
||||
@ -305,12 +305,6 @@ mod derivation {
|
||||
md.into()
|
||||
}
|
||||
|
||||
// returns n (for mod(n)) for the secp256k1 elliptic curve
|
||||
// todo: maybe lazy static
|
||||
fn curve_n() -> U256 {
|
||||
H256::from_slice(&secp256k1::constants::CURVE_ORDER).into()
|
||||
}
|
||||
|
||||
pub fn public<T>(public_key: H512, chain_code: H256, derivation: Derivation<T>) -> Result<(H512, H256), Error> where T: Label {
|
||||
let index = match derivation {
|
||||
Derivation::Soft(index) => index,
|
||||
@ -339,7 +333,7 @@ mod derivation {
|
||||
let new_chain_code = H256::from(&i_512[32..64]);
|
||||
|
||||
// Generated private key can (extremely rarely) be out of secp256k1 key field
|
||||
if curve_n() <= new_private.clone().into() { return Err(Error::MissingIndex); }
|
||||
if curve_order() <= new_private.clone().into() { return Err(Error::MissingIndex); }
|
||||
let new_private_sec = SecretKey::from_slice(&SECP256K1, &*new_private)
|
||||
.expect("Private key belongs to the field [0..CURVE_ORDER) (checked above); So initializing can never fail; qed");
|
||||
let mut new_public = PublicKey::from_secret_key(&SECP256K1, &new_private_sec)
|
||||
|
@ -27,7 +27,7 @@ pub fn public_to_address(public: &Public) -> Address {
|
||||
result
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
/// secp256k1 key pair
|
||||
pub struct KeyPair {
|
||||
secret: Secret,
|
||||
|
@ -16,7 +16,9 @@
|
||||
|
||||
use super::{SECP256K1, Public, Secret, Error};
|
||||
use secp256k1::key;
|
||||
use secp256k1::constants::{GENERATOR_X, GENERATOR_Y};
|
||||
use secp256k1::constants::{GENERATOR_X, GENERATOR_Y, CURVE_ORDER};
|
||||
use bigint::prelude::U256;
|
||||
use bigint::hash::H256;
|
||||
|
||||
/// Inplace multiply public key by secret key (EC point * scalar)
|
||||
pub fn public_mul_secret(public: &mut Public, secret: &Secret) -> Result<(), Error> {
|
||||
@ -47,6 +49,14 @@ pub fn public_sub(public: &mut Public, other: &Public) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Replace public key with its negation (EC point = - EC point)
|
||||
pub fn public_negate(public: &mut Public) -> Result<(), Error> {
|
||||
let mut key_public = to_secp256k1_public(public)?;
|
||||
key_public.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
|
||||
set_public(public, &key_public);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return base point of secp256k1
|
||||
pub fn generation_point() -> Public {
|
||||
let mut public_sec_raw = [0u8; 65];
|
||||
@ -61,6 +71,11 @@ pub fn generation_point() -> Public {
|
||||
public
|
||||
}
|
||||
|
||||
/// Return secp256k1 elliptic curve order
|
||||
pub fn curve_order() -> U256 {
|
||||
H256::from_slice(&CURVE_ORDER).into()
|
||||
}
|
||||
|
||||
fn to_secp256k1_public(public: &Public) -> Result<key::PublicKey, Error> {
|
||||
let public_data = {
|
||||
let mut temp = [4u8; 65];
|
||||
|
@ -67,6 +67,15 @@ impl Secret {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Inplace decrease secret key (scalar - 1)
|
||||
pub fn dec(&mut self) -> Result<(), Error> {
|
||||
let mut key_secret = self.to_secp256k1_secret()?;
|
||||
key_secret.add_assign(&SECP256K1, &key::MINUS_ONE_KEY)?;
|
||||
|
||||
*self = key_secret.into();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Inplace multiply one secret key to another (scalar * scalar)
|
||||
pub fn mul(&mut self, other: &Secret) -> Result<(), Error> {
|
||||
let mut key_secret = self.to_secp256k1_secret()?;
|
||||
|
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "parity.js",
|
||||
"version": "1.7.46",
|
||||
"version": "1.7.49",
|
||||
"main": "release/index.js",
|
||||
"jsnext:main": "src/index.js",
|
||||
"author": "Parity Team <admin@parity.io>",
|
||||
|
@ -37,7 +37,11 @@ Object.keys(rustMethods).forEach((group) => {
|
||||
});
|
||||
});
|
||||
|
||||
function printType (type) {
|
||||
function printType (type, obj) {
|
||||
if (!type) {
|
||||
throw new Error(`Invalid type in ${JSON.stringify(obj)}`);
|
||||
}
|
||||
|
||||
return type.print || `\`${type.name}\``;
|
||||
}
|
||||
|
||||
@ -45,7 +49,7 @@ function formatDescription (obj, prefix = '', indent = '') {
|
||||
const optional = obj.optional ? '(optional) ' : '';
|
||||
const defaults = obj.default ? `(default: \`${obj.default}\`) ` : '';
|
||||
|
||||
return `${indent}${prefix}${printType(obj.type)} - ${optional}${defaults}${obj.desc}`;
|
||||
return `${indent}${prefix}${printType(obj.type, obj)} - ${optional}${defaults}${obj.desc}`;
|
||||
}
|
||||
|
||||
function formatType (obj) {
|
||||
|
@ -165,10 +165,6 @@ export function inOptions (_options = {}) {
|
||||
options[key] = inNumber16((new BigNumber(options[key])).round());
|
||||
break;
|
||||
|
||||
case 'minBlock':
|
||||
options[key] = options[key] ? inNumber16(options[key]) : null;
|
||||
break;
|
||||
|
||||
case 'value':
|
||||
case 'nonce':
|
||||
options[key] = inNumber16(options[key]);
|
||||
@ -211,3 +207,36 @@ export function inTraceType (whatTrace) {
|
||||
|
||||
return whatTrace;
|
||||
}
|
||||
|
||||
function inDeriveType (derive) {
|
||||
return derive && derive.type === 'hard' ? 'hard' : 'soft';
|
||||
}
|
||||
|
||||
export function inDeriveHash (derive) {
|
||||
const hash = derive && derive.hash ? derive.hash : derive;
|
||||
const type = inDeriveType(derive);
|
||||
|
||||
return {
|
||||
hash: inHex(hash),
|
||||
type
|
||||
};
|
||||
}
|
||||
|
||||
export function inDeriveIndex (derive) {
|
||||
if (!derive) {
|
||||
return [];
|
||||
}
|
||||
|
||||
if (!isArray(derive)) {
|
||||
derive = [derive];
|
||||
}
|
||||
|
||||
return derive.map(item => {
|
||||
const index = inNumber10(item && item.index ? item.index : item);
|
||||
|
||||
return {
|
||||
index,
|
||||
type: inDeriveType(item)
|
||||
};
|
||||
});
|
||||
}
|
||||
|
@ -16,7 +16,11 @@
|
||||
|
||||
import BigNumber from 'bignumber.js';
|
||||
|
||||
import { inAddress, inBlockNumber, inData, inFilter, inHex, inNumber10, inNumber16, inOptions, inTraceType } from './input';
|
||||
import {
|
||||
inAddress, inBlockNumber, inData, inFilter, inHex,
|
||||
inNumber10, inNumber16, inOptions, inTraceType,
|
||||
inDeriveHash, inDeriveIndex
|
||||
} from './input';
|
||||
import { isAddress } from '../../../test/types';
|
||||
|
||||
describe('api/format/input', () => {
|
||||
@ -215,7 +219,7 @@ describe('api/format/input', () => {
|
||||
expect(formatted.to).to.equal('');
|
||||
});
|
||||
|
||||
['gas', 'gasPrice', 'value', 'minBlock', 'nonce'].forEach((input) => {
|
||||
['gas', 'gasPrice', 'value', 'nonce'].forEach((input) => {
|
||||
it(`formats ${input} number as hexnumber`, () => {
|
||||
const block = {};
|
||||
|
||||
@ -226,8 +230,8 @@ describe('api/format/input', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('passes minBlock as null when specified as such', () => {
|
||||
expect(inOptions({ minBlock: null })).to.deep.equal({ minBlock: null });
|
||||
it('passes condition as null when specified as such', () => {
|
||||
expect(inOptions({ condition: null })).to.deep.equal({ condition: null });
|
||||
});
|
||||
|
||||
it('ignores and passes through unknown keys', () => {
|
||||
@ -272,4 +276,66 @@ describe('api/format/input', () => {
|
||||
expect(inTraceType(type)).to.deep.equal([type]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('inDeriveHash', () => {
|
||||
it('returns derive hash', () => {
|
||||
expect(inDeriveHash(1)).to.deep.equal({
|
||||
hash: '0x1',
|
||||
type: 'soft'
|
||||
});
|
||||
|
||||
expect(inDeriveHash(null)).to.deep.equal({
|
||||
hash: '0x',
|
||||
type: 'soft'
|
||||
});
|
||||
|
||||
expect(inDeriveHash({
|
||||
hash: 5
|
||||
})).to.deep.equal({
|
||||
hash: '0x5',
|
||||
type: 'soft'
|
||||
});
|
||||
|
||||
expect(inDeriveHash({
|
||||
hash: 5,
|
||||
type: 'hard'
|
||||
})).to.deep.equal({
|
||||
hash: '0x5',
|
||||
type: 'hard'
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('inDeriveIndex', () => {
|
||||
it('returns derive hash', () => {
|
||||
expect(inDeriveIndex(null)).to.deep.equal([]);
|
||||
expect(inDeriveIndex([])).to.deep.equal([]);
|
||||
|
||||
expect(inDeriveIndex([1])).to.deep.equal([{
|
||||
index: 1,
|
||||
type: 'soft'
|
||||
}]);
|
||||
|
||||
expect(inDeriveIndex({
|
||||
index: 1
|
||||
})).to.deep.equal([{
|
||||
index: 1,
|
||||
type: 'soft'
|
||||
}]);
|
||||
|
||||
expect(inDeriveIndex([{
|
||||
index: 1,
|
||||
type: 'hard'
|
||||
}, 5])).to.deep.equal([
|
||||
{
|
||||
index: 1,
|
||||
type: 'hard'
|
||||
},
|
||||
{
|
||||
index: 5,
|
||||
type: 'soft'
|
||||
}
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@ -216,6 +216,8 @@ export function outSignerRequest (request) {
|
||||
break;
|
||||
|
||||
case 'payload':
|
||||
request[key].decrypt = outSigningPayload(request[key].decrypt);
|
||||
request[key].sign = outSigningPayload(request[key].sign);
|
||||
request[key].signTransaction = outTransaction(request[key].signTransaction);
|
||||
request[key].sendTransaction = outTransaction(request[key].sendTransaction);
|
||||
break;
|
||||
@ -284,12 +286,6 @@ export function outTransaction (tx) {
|
||||
tx[key] = outTransactionCondition(tx[key]);
|
||||
break;
|
||||
|
||||
case 'minBlock':
|
||||
tx[key] = tx[key]
|
||||
? outNumber(tx[key])
|
||||
: null;
|
||||
break;
|
||||
|
||||
case 'creates':
|
||||
case 'from':
|
||||
case 'to':
|
||||
@ -302,6 +298,20 @@ export function outTransaction (tx) {
|
||||
return tx;
|
||||
}
|
||||
|
||||
export function outSigningPayload (payload) {
|
||||
if (payload) {
|
||||
Object.keys(payload).forEach((key) => {
|
||||
switch (key) {
|
||||
case 'address':
|
||||
payload[key] = outAddress(payload[key]);
|
||||
break;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return payload;
|
||||
}
|
||||
|
||||
export function outTrace (trace) {
|
||||
if (trace) {
|
||||
if (trace.action) {
|
||||
|
@ -392,7 +392,7 @@ describe('api/format/output', () => {
|
||||
});
|
||||
});
|
||||
|
||||
['blockNumber', 'gasPrice', 'gas', 'minBlock', 'nonce', 'transactionIndex', 'value'].forEach((input) => {
|
||||
['blockNumber', 'gasPrice', 'gas', 'nonce', 'transactionIndex', 'value'].forEach((input) => {
|
||||
it(`formats ${input} number as hexnumber`, () => {
|
||||
const block = {};
|
||||
|
||||
@ -404,8 +404,8 @@ describe('api/format/output', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('passes minBlock as null when null', () => {
|
||||
expect(outTransaction({ minBlock: null })).to.deep.equal({ minBlock: null });
|
||||
it('passes condition as null when null', () => {
|
||||
expect(outTransaction({ condition: null })).to.deep.equal({ condition: null });
|
||||
});
|
||||
|
||||
it('ignores and passes through unknown keys', () => {
|
||||
|
@ -14,7 +14,9 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import { inAddress, inAddresses, inData, inHex, inNumber16, inOptions, inBlockNumber } from '../../format/input';
|
||||
import {
|
||||
inAddress, inAddresses, inData, inHex, inNumber16, inOptions, inBlockNumber, inDeriveHash, inDeriveIndex
|
||||
} from '../../format/input';
|
||||
import { outAccountInfo, outAddress, outAddresses, outChainStatus, outHistogram, outHwAccountInfo, outNodeKind, outNumber, outPeers, outRecentDapps, outTransaction, outVaultMeta } from '../../format/output';
|
||||
|
||||
export default class Parity {
|
||||
@ -117,6 +119,18 @@ export default class Parity {
|
||||
.execute('parity_devLogsLevels');
|
||||
}
|
||||
|
||||
deriveAddressHash (address, password, hash, shouldSave) {
|
||||
return this._transport
|
||||
.execute('parity_deriveAddressHash', inAddress(address), password, inDeriveHash(hash), !!shouldSave)
|
||||
.then(outAddress);
|
||||
}
|
||||
|
||||
deriveAddressIndex (address, password, index, shouldSave) {
|
||||
return this._transport
|
||||
.execute('parity_deriveAddressIndex', inAddress(address), password, inDeriveIndex(index), !!shouldSave)
|
||||
.then(outAddress);
|
||||
}
|
||||
|
||||
dropNonReservedPeers () {
|
||||
return this._transport
|
||||
.execute('parity_dropNonReservedPeers');
|
||||
@ -137,6 +151,11 @@ export default class Parity {
|
||||
.execute('parity_executeUpgrade');
|
||||
}
|
||||
|
||||
exportAccount (address, password) {
|
||||
return this._transport
|
||||
.execute('parity_exportAccount', inAddress(address), password);
|
||||
}
|
||||
|
||||
extraData () {
|
||||
return this._transport
|
||||
.execute('parity_extraData');
|
||||
@ -401,6 +420,12 @@ export default class Parity {
|
||||
.execute('parity_removeReservedPeer', encode);
|
||||
}
|
||||
|
||||
removeTransaction (hash) {
|
||||
return this._transport
|
||||
.execute('parity_removeTransaction', inHex(hash))
|
||||
.then(outTransaction);
|
||||
}
|
||||
|
||||
rpcSettings () {
|
||||
return this._transport
|
||||
.execute('parity_rpcSettings');
|
||||
|
@ -23,7 +23,7 @@ const parityNode = (
|
||||
process.env.PARITY_URL && `http://${process.env.PARITY_URL}`
|
||||
) || (
|
||||
process.env.NODE_ENV === 'production'
|
||||
? 'http://127.0.0.1:8080'
|
||||
? 'http://127.0.0.1:8545'
|
||||
: ''
|
||||
);
|
||||
|
||||
|
@ -379,7 +379,9 @@ export default {
|
||||
gasPrice: '0x2d20cff33',
|
||||
hash: '0x09e64eb1ae32bb9ac415ce4ddb3dbad860af72d9377bb5f073c9628ab413c532',
|
||||
input: '0x',
|
||||
minBlock: null,
|
||||
condition: {
|
||||
block: 1
|
||||
},
|
||||
networkId: null,
|
||||
nonce: '0x0',
|
||||
publicKey: '0x3fa8c08c65a83f6b4ea3e04e1cc70cbe3cd391499e3e05ab7dedf28aff9afc538200ff93e3f2b2cb5029f03c7ebee820d63a4c5a9541c83acebe293f54cacf0e',
|
||||
@ -435,17 +437,17 @@ export default {
|
||||
type: String,
|
||||
desc: 'Capability, either `full` or `light`.'
|
||||
}
|
||||
},
|
||||
example: {
|
||||
availability: 'personal',
|
||||
capability: 'light'
|
||||
}
|
||||
},
|
||||
example: {
|
||||
availability: 'personal',
|
||||
capability: 'light'
|
||||
}
|
||||
},
|
||||
|
||||
netChain: {
|
||||
section: SECTION_NET,
|
||||
desc: 'Returns the name of the connected chain.',
|
||||
desc: 'Returns the name of the connected chain. DEPRECATED use `parity_chain` instead.',
|
||||
params: [],
|
||||
returns: {
|
||||
type: String,
|
||||
@ -454,6 +456,17 @@ export default {
|
||||
}
|
||||
},
|
||||
|
||||
chain: {
|
||||
section: SECTION_NET,
|
||||
desc: 'Returns the name of the connected chain. ',
|
||||
params: [],
|
||||
returns: {
|
||||
type: String,
|
||||
desc: 'chain name, one of: "foundation", "kovan", &c. of a filename.',
|
||||
example: 'homestead'
|
||||
}
|
||||
},
|
||||
|
||||
netPeers: {
|
||||
section: SECTION_NET,
|
||||
desc: 'Returns number of peers.',
|
||||
@ -589,7 +602,9 @@ export default {
|
||||
gasPrice: '0xba43b7400',
|
||||
hash: '0x160b3c30ab1cf5871083f97ee1cee3901cfba3b0a2258eb337dd20a7e816b36e',
|
||||
input: '0x095ea7b3000000000000000000000000bf4ed7b27f1d666546e30d74d50d173d20bca75400000000000000000000000000002643c948210b4bd99244ccd64d5555555555',
|
||||
minBlock: null,
|
||||
condition: {
|
||||
block: 1
|
||||
},
|
||||
networkId: 1,
|
||||
nonce: '0x5',
|
||||
publicKey: '0x96157302dade55a1178581333e57d60ffe6fdf5a99607890456a578b4e6b60e335037d61ed58aa4180f9fd747dc50d44a7924aa026acbfb988b5062b629d6c36',
|
||||
@ -609,6 +624,7 @@ export default {
|
||||
},
|
||||
|
||||
pendingTransactionsStats: {
|
||||
section: SECTION_NET,
|
||||
desc: 'Returns propagation stats for transactions in the queue.',
|
||||
params: [],
|
||||
returns: {
|
||||
@ -626,6 +642,49 @@ export default {
|
||||
}
|
||||
},
|
||||
|
||||
removeTransaction: {
|
||||
section: SECTION_NET,
|
||||
desc: 'Removes transaction from local transaction pool. Scheduled transactions and not-propagated transactions are safe to remove, removal of other transactions may have no effect though.',
|
||||
params: [{
|
||||
type: Hash,
|
||||
desc: 'Hash of transaction to remove.',
|
||||
example: '0x2547ea3382099c7c76d33dd468063b32d41016aacb02cbd51ebc14ff5d2b6a43'
|
||||
}],
|
||||
returns: {
|
||||
type: Object,
|
||||
desc: 'Removed transaction or `null`.',
|
||||
details: TransactionResponse.details,
|
||||
example: [
|
||||
{
|
||||
blockHash: null,
|
||||
blockNumber: null,
|
||||
creates: null,
|
||||
from: '0xee3ea02840129123d5397f91be0391283a25bc7d',
|
||||
gas: '0x23b58',
|
||||
gasPrice: '0xba43b7400',
|
||||
hash: '0x160b3c30ab1cf5871083f97ee1cee3901cfba3b0a2258eb337dd20a7e816b36e',
|
||||
input: '0x095ea7b3000000000000000000000000bf4ed7b27f1d666546e30d74d50d173d20bca75400000000000000000000000000002643c948210b4bd99244ccd64d5555555555',
|
||||
condition: {
|
||||
block: 1
|
||||
},
|
||||
networkId: 1,
|
||||
nonce: '0x5',
|
||||
publicKey: '0x96157302dade55a1178581333e57d60ffe6fdf5a99607890456a578b4e6b60e335037d61ed58aa4180f9fd747dc50d44a7924aa026acbfb988b5062b629d6c36',
|
||||
r: '0x92e8beb19af2bad0511d516a86e77fa73004c0811b2173657a55797bdf8558e1',
|
||||
raw: '0xf8aa05850ba43b740083023b5894bb9bc244d798123fde783fcc1c72d3bb8c18941380b844095ea7b3000000000000000000000000bf4ed7b27f1d666546e30d74d50d173d20bca75400000000000000000000000000002643c948210b4bd99244ccd64d555555555526a092e8beb19af2bad0511d516a86e77fa73004c0811b2173657a55797bdf8558e1a062b4d4d125bbcb9c162453bc36ca156537543bb4414d59d1805d37fb63b351b8',
|
||||
s: '0x62b4d4d125bbcb9c162453bc36ca156537543bb4414d59d1805d37fb63b351b8',
|
||||
standardV: '0x1',
|
||||
to: '0xbb9bc244d798123fde783fcc1c72d3bb8c189413',
|
||||
transactionIndex: null,
|
||||
v: '0x26',
|
||||
value: '0x0'
|
||||
},
|
||||
new Dummy('{ ... }'),
|
||||
new Dummy('{ ... }')
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
phraseToAddress: {
|
||||
section: SECTION_ACCOUNTS,
|
||||
desc: 'Converts a secret phrase into the corresponding address.',
|
||||
@ -916,7 +975,9 @@ export default {
|
||||
v: '0x25',
|
||||
r: '0xb40c6967a7e8bbdfd99a25fd306b9ef23b80e719514aeb7ddd19e2303d6fc139',
|
||||
s: '0x6bf770ab08119e67dc29817e1412a0e3086f43da308c314db1b3bca9fb6d32bd',
|
||||
minBlock: null
|
||||
condition: {
|
||||
block: 1
|
||||
}
|
||||
},
|
||||
new Dummy('{ ... }, { ... }, ...')
|
||||
]
|
||||
@ -1307,12 +1368,14 @@ export default {
|
||||
params: [
|
||||
{
|
||||
type: Array,
|
||||
desc: 'List of the Geth addresses to import.'
|
||||
desc: 'List of the Geth addresses to import.',
|
||||
example: ['0x407d73d8a49eeb85d32cf465507dd71d507100c1']
|
||||
}
|
||||
],
|
||||
returns: {
|
||||
type: Array,
|
||||
desc: 'Array of the imported addresses.'
|
||||
desc: 'Array of the imported addresses.',
|
||||
example: ['0x407d73d8a49eeb85d32cf465507dd71d507100c1']
|
||||
}
|
||||
},
|
||||
|
||||
@ -1322,7 +1385,114 @@ export default {
|
||||
params: [],
|
||||
returns: {
|
||||
type: Array,
|
||||
desc: '20 Bytes addresses owned by the client.'
|
||||
desc: '20 Bytes addresses owned by the client.',
|
||||
example: ['0x407d73d8a49eeb85d32cf465507dd71d507100c1']
|
||||
}
|
||||
},
|
||||
|
||||
deriveAddressHash: {
|
||||
subdoc: SUBDOC_ACCOUNTS,
|
||||
desc: 'Derive new address from given account address using specific hash.',
|
||||
params: [
|
||||
{
|
||||
type: Address,
|
||||
desc: 'Account address to derive from.',
|
||||
example: '0x407d73d8a49eeb85d32cf465507dd71d507100c1'
|
||||
},
|
||||
{
|
||||
type: String,
|
||||
desc: 'Password to the account.',
|
||||
example: 'hunter2'
|
||||
},
|
||||
{
|
||||
type: Object,
|
||||
desc: 'Derivation hash and type (`soft` or `hard`). E.g. `{ hash: "0x123..123", type: "hard" }`.',
|
||||
example: {
|
||||
hash: '0x2547ea3382099c7c76d33dd468063b32d41016aacb02cbd51ebc14ff5d2b6a43',
|
||||
type: 'hard'
|
||||
}
|
||||
},
|
||||
{
|
||||
type: Boolean,
|
||||
desc: 'Flag indicating if the account should be saved.',
|
||||
example: false
|
||||
}
|
||||
],
|
||||
returns: {
|
||||
type: Address,
|
||||
desc: '20 Bytes new derived address.',
|
||||
example: '0x407d73d8a49eeb85d32cf465507dd71d507100c1'
|
||||
}
|
||||
},
|
||||
|
||||
deriveAddressIndex: {
|
||||
subdoc: SUBDOC_ACCOUNTS,
|
||||
desc: 'Derive new address from given account address using hierarchical derivation (sequence of 32-bit integer indices).',
|
||||
params: [
|
||||
{
|
||||
type: Address,
|
||||
desc: 'Account address to export.',
|
||||
example: '0x407d73d8a49eeb85d32cf465507dd71d507100c1'
|
||||
},
|
||||
{
|
||||
type: String,
|
||||
desc: 'Password to the account.',
|
||||
example: 'hunter2'
|
||||
},
|
||||
{
|
||||
type: Array,
|
||||
desc: 'Hierarchical derivation sequence of index and type (`soft` or `hard`). E.g. `[{index:1,type:"hard"},{index:2,type:"soft"}]`.',
|
||||
example: [
|
||||
{ index: 1, type: 'hard' },
|
||||
{ index: 2, type: 'soft' }
|
||||
]
|
||||
},
|
||||
{
|
||||
type: Boolean,
|
||||
desc: 'Flag indicating if the account should be saved.',
|
||||
example: false
|
||||
}
|
||||
],
|
||||
returns: {
|
||||
type: Address,
|
||||
desc: '20 Bytes new derived address.',
|
||||
example: '0x407d73d8a49eeb85d32cf465507dd71d507100c1'
|
||||
}
|
||||
},
|
||||
|
||||
exportAccount: {
|
||||
subdoc: SUBDOC_ACCOUNTS,
|
||||
desc: 'Returns a standard wallet file for given account if password matches.',
|
||||
params: [
|
||||
{
|
||||
type: Address,
|
||||
desc: 'Account address to export.',
|
||||
example: '0x407d73d8a49eeb85d32cf465507dd71d507100c1'
|
||||
},
|
||||
{
|
||||
type: String,
|
||||
desc: 'Password to the account.',
|
||||
example: 'hunter2'
|
||||
}
|
||||
],
|
||||
returns: {
|
||||
type: Object,
|
||||
desc: 'Standard wallet JSON.',
|
||||
example: {
|
||||
'address': '0042e5d2a662eeaca8a7e828c174f98f35d8925b',
|
||||
'crypto': {
|
||||
'cipher': 'aes-128-ctr',
|
||||
'cipherparams': { 'iv': 'a1c6ff99070f8032ca1c4e8add006373' },
|
||||
'ciphertext': 'df27e3db64aa18d984b6439443f73660643c2d119a6f0fa2fa9a6456fc802d75',
|
||||
'kdf': 'pbkdf2',
|
||||
'kdfparams': { 'c': 10240, 'dklen': 32, 'prf': 'hmac-sha256', 'salt': 'ddc325335cda5567a1719313e73b4842511f3e4a837c9658eeb78e51ebe8c815' },
|
||||
'mac': '3dc888ae79cbb226ff9c455669f6cf2d79be72120f2298f6cb0d444fddc0aa3d'
|
||||
},
|
||||
'id': '6a186c80-7797-cff2-bc2e-7c1d6a6cc76e',
|
||||
'meta': '{"passwordHint":"parity-export-test","timestamp":1490017814987}',
|
||||
'name': 'parity-export-test',
|
||||
'version': 3
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@ -1529,6 +1699,23 @@ export default {
|
||||
}
|
||||
},
|
||||
|
||||
setChain: {
|
||||
subdoc: SUBDOC_SET,
|
||||
desc: 'Sets the network spec file Parity is using.',
|
||||
params: [
|
||||
{
|
||||
type: String,
|
||||
desc: 'Chain spec name, one of: "foundation", "ropsten", "morden", "kovan", "olympic", "classic", "dev", "expanse" or a filename.',
|
||||
example: 'foundation'
|
||||
}
|
||||
],
|
||||
returns: {
|
||||
type: Boolean,
|
||||
desc: '`true` if the call succeeded.',
|
||||
example: true
|
||||
}
|
||||
},
|
||||
|
||||
setMode: {
|
||||
subdoc: SUBDOC_SET,
|
||||
desc: 'Changes the operating mode of Parity.',
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import { Quantity, Data, BlockNumber } from '../types';
|
||||
import { Quantity, Data } from '../types';
|
||||
import { fromDecimal, Dummy } from '../helpers';
|
||||
|
||||
export default {
|
||||
@ -71,9 +71,9 @@ export default {
|
||||
desc: 'Gas provided by the sender in Wei.',
|
||||
optional: true
|
||||
},
|
||||
minBlock: {
|
||||
type: BlockNumber,
|
||||
desc: 'Integer block number, or the string `\'latest\'`, `\'earliest\'` or `\'pending\'`. Request will not be propagated till the given block is reached.',
|
||||
condition: {
|
||||
type: Object,
|
||||
desc: 'Condition for scheduled transaction. Can be either an integer block number `{ block: 1 }` or UTC timestamp (in seconds) `{ timestamp: 1491290692 }`.',
|
||||
optional: true
|
||||
}
|
||||
},
|
||||
@ -114,7 +114,7 @@ export default {
|
||||
},
|
||||
|
||||
confirmRequestWithToken: {
|
||||
desc: 'Confirm specific request with token.',
|
||||
desc: 'Confirm specific request with rolling token.',
|
||||
params: [
|
||||
{
|
||||
type: Quantity,
|
||||
@ -135,9 +135,9 @@ export default {
|
||||
desc: 'Gas provided by the sender in Wei.',
|
||||
optional: true
|
||||
},
|
||||
minBlock: {
|
||||
type: BlockNumber,
|
||||
desc: 'Integer block number, or the string `\'latest\'`, `\'earliest\'` or `\'pending\'`. Request will not be propagated till the given block is reached.',
|
||||
condition: {
|
||||
type: Object,
|
||||
desc: 'Conditional submission of the transaction. Can be either an integer block number `{ block: 1 }` or UTC timestamp (in seconds) `{ time: 1491290692 }` or `null`.',
|
||||
optional: true
|
||||
}
|
||||
},
|
||||
@ -145,7 +145,7 @@ export default {
|
||||
},
|
||||
{
|
||||
type: String,
|
||||
desc: 'Password.',
|
||||
desc: 'Password (initially) or a token returned by the previous call.',
|
||||
example: 'hunter2'
|
||||
}
|
||||
],
|
||||
@ -159,7 +159,7 @@ export default {
|
||||
},
|
||||
token: {
|
||||
type: String,
|
||||
desc: 'Token used to authenticate the request.'
|
||||
desc: 'Token used to authenticate the next request.'
|
||||
}
|
||||
},
|
||||
example: {
|
||||
|
@ -102,9 +102,9 @@ export class TransactionRequest {
|
||||
desc: 'Integer of a nonce. This allows to overwrite your own pending transactions that use the same nonce.',
|
||||
optional: true
|
||||
},
|
||||
minBlock: {
|
||||
type: BlockNumber,
|
||||
desc: 'Delay until this block if specified.',
|
||||
condition: {
|
||||
type: Object,
|
||||
desc: 'Conditional submission of the transaction. Can be either an integer block number `{ block: 1 }` or UTC timestamp (in seconds) `{ time: 1491290692 }` or `null`.',
|
||||
optional: true
|
||||
}
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ export default class SecureApi extends Api {
|
||||
_tokens = [];
|
||||
|
||||
_dappsInterface = null;
|
||||
_dappsPort = 8080;
|
||||
_dappsPort = 8545;
|
||||
_signerPort = 8180;
|
||||
|
||||
static getTransport (url, sysuiToken) {
|
||||
|
183
js/src/views/Signer/components/DecryptRequest/decryptRequest.js
Normal file
183
js/src/views/Signer/components/DecryptRequest/decryptRequest.js
Normal file
@ -0,0 +1,183 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import { observer } from 'mobx-react';
|
||||
import React, { Component, PropTypes } from 'react';
|
||||
import { FormattedMessage } from 'react-intl';
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
import Account from '../Account';
|
||||
import TransactionPendingForm from '../TransactionPendingForm';
|
||||
import RequestOrigin from '../RequestOrigin';
|
||||
|
||||
import styles from '../SignRequest/signRequest.css';
|
||||
|
||||
@observer
|
||||
class DecryptRequest extends Component {
|
||||
static contextTypes = {
|
||||
api: PropTypes.object
|
||||
};
|
||||
|
||||
static propTypes = {
|
||||
accounts: PropTypes.object.isRequired,
|
||||
address: PropTypes.string.isRequired,
|
||||
data: PropTypes.string.isRequired,
|
||||
id: PropTypes.object.isRequired,
|
||||
isFinished: PropTypes.bool.isRequired,
|
||||
netVersion: PropTypes.string.isRequired,
|
||||
signerStore: PropTypes.object.isRequired,
|
||||
|
||||
className: PropTypes.string,
|
||||
focus: PropTypes.bool,
|
||||
isSending: PropTypes.bool,
|
||||
onConfirm: PropTypes.func,
|
||||
onReject: PropTypes.func,
|
||||
origin: PropTypes.any,
|
||||
status: PropTypes.string
|
||||
};
|
||||
|
||||
static defaultProps = {
|
||||
focus: false,
|
||||
origin: {
|
||||
type: 'unknown',
|
||||
details: ''
|
||||
}
|
||||
};
|
||||
|
||||
componentWillMount () {
|
||||
const { address, signerStore } = this.props;
|
||||
|
||||
signerStore.fetchBalance(address);
|
||||
}
|
||||
|
||||
render () {
|
||||
const { className } = this.props;
|
||||
|
||||
return (
|
||||
<div className={ `${styles.container} ${className}` }>
|
||||
{ this.renderDetails() }
|
||||
{ this.renderActions() }
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
renderDetails () {
|
||||
const { api } = this.context;
|
||||
const { address, data, netVersion, origin, signerStore } = this.props;
|
||||
const { balances, externalLink } = signerStore;
|
||||
|
||||
const balance = balances[address];
|
||||
|
||||
if (!balance) {
|
||||
return <div />;
|
||||
}
|
||||
|
||||
return (
|
||||
<div className={ styles.signDetails }>
|
||||
<div className={ styles.address }>
|
||||
<Account
|
||||
address={ address }
|
||||
balance={ balance }
|
||||
className={ styles.account }
|
||||
externalLink={ externalLink }
|
||||
netVersion={ netVersion }
|
||||
/>
|
||||
<RequestOrigin origin={ origin } />
|
||||
</div>
|
||||
<div className={ styles.info } title={ api.util.sha3(data) }>
|
||||
<p>
|
||||
<FormattedMessage
|
||||
id='signer.decryptRequest.request'
|
||||
defaultMessage='A request to decrypt data using your account:'
|
||||
/>
|
||||
</p>
|
||||
|
||||
<div className={ styles.signData }>
|
||||
<p>{ data }</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
renderActions () {
|
||||
const { accounts, address, focus, isFinished, status } = this.props;
|
||||
const account = accounts[address];
|
||||
|
||||
if (isFinished) {
|
||||
if (status === 'confirmed') {
|
||||
return (
|
||||
<div className={ styles.actions }>
|
||||
<span className={ styles.isConfirmed }>
|
||||
<FormattedMessage
|
||||
id='signer.decryptRequest.state.confirmed'
|
||||
defaultMessage='Confirmed'
|
||||
/>
|
||||
</span>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className={ styles.actions }>
|
||||
<span className={ styles.isRejected }>
|
||||
<FormattedMessage
|
||||
id='signer.decryptRequest.state.rejected'
|
||||
defaultMessage='Rejected'
|
||||
/>
|
||||
</span>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<TransactionPendingForm
|
||||
account={ account }
|
||||
address={ address }
|
||||
focus={ focus }
|
||||
isSending={ this.props.isSending }
|
||||
netVersion={ this.props.netVersion }
|
||||
onConfirm={ this.onConfirm }
|
||||
onReject={ this.onReject }
|
||||
className={ styles.actions }
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
onConfirm = (data) => {
|
||||
const { id } = this.props;
|
||||
const { password } = data;
|
||||
|
||||
this.props.onConfirm({ id, password });
|
||||
}
|
||||
|
||||
onReject = () => {
|
||||
this.props.onReject(this.props.id);
|
||||
}
|
||||
}
|
||||
|
||||
function mapStateToProps (state) {
|
||||
const { accounts } = state.personal;
|
||||
|
||||
return {
|
||||
accounts
|
||||
};
|
||||
}
|
||||
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
null
|
||||
)(DecryptRequest);
|
17
js/src/views/Signer/components/DecryptRequest/index.js
Normal file
17
js/src/views/Signer/components/DecryptRequest/index.js
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
export default from './decryptRequest';
|
@ -33,7 +33,7 @@
|
||||
}
|
||||
|
||||
.hash {
|
||||
margin-left: .2em;
|
||||
margin-left: .5em;
|
||||
}
|
||||
|
||||
.hash, .url {
|
||||
|
@ -16,8 +16,9 @@
|
||||
|
||||
import React, { Component, PropTypes } from 'react';
|
||||
|
||||
import TransactionPending from '../TransactionPending';
|
||||
import DecryptRequest from '../DecryptRequest';
|
||||
import SignRequest from '../SignRequest';
|
||||
import TransactionPending from '../TransactionPending';
|
||||
|
||||
export default class RequestPending extends Component {
|
||||
static propTypes = {
|
||||
@ -32,6 +33,7 @@ export default class RequestPending extends Component {
|
||||
onReject: PropTypes.func.isRequired,
|
||||
origin: PropTypes.object.isRequired,
|
||||
payload: PropTypes.oneOfType([
|
||||
PropTypes.shape({ decrypt: PropTypes.object.isRequired }),
|
||||
PropTypes.shape({ sendTransaction: PropTypes.object.isRequired }),
|
||||
PropTypes.shape({ sign: PropTypes.object.isRequired }),
|
||||
PropTypes.shape({ signTransaction: PropTypes.object.isRequired })
|
||||
@ -68,6 +70,27 @@ export default class RequestPending extends Component {
|
||||
);
|
||||
}
|
||||
|
||||
if (payload.decrypt) {
|
||||
const { decrypt } = payload;
|
||||
|
||||
return (
|
||||
<DecryptRequest
|
||||
address={ decrypt.address }
|
||||
className={ className }
|
||||
focus={ focus }
|
||||
data={ decrypt.msg }
|
||||
id={ id }
|
||||
isFinished={ false }
|
||||
isSending={ isSending }
|
||||
netVersion={ netVersion }
|
||||
onConfirm={ this.onConfirm }
|
||||
onReject={ onReject }
|
||||
origin={ origin }
|
||||
signerStore={ signerStore }
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
const transaction = payload.sendTransaction || payload.signTransaction;
|
||||
|
||||
if (transaction) {
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
.container {
|
||||
display: flex;
|
||||
padding: 1.5em 0 1em;
|
||||
padding: 1.5em 1em 1.5em 0;
|
||||
}
|
||||
|
||||
.actions, .signDetails {
|
||||
@ -28,35 +28,43 @@
|
||||
}
|
||||
|
||||
.signData {
|
||||
border: 0.25em solid red;
|
||||
padding: 0.5em;
|
||||
margin-left: -2em;
|
||||
overflow: auto;
|
||||
max-height: 6em;
|
||||
border: 0.25em solid red;
|
||||
margin-left: 2em;
|
||||
padding: 0.5em;
|
||||
overflow: auto;
|
||||
max-height: 6em;
|
||||
max-width: calc(100% - 2em);
|
||||
}
|
||||
|
||||
.signData > p {
|
||||
color: white;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.signDetails {
|
||||
flex: 10;
|
||||
flex: 1;
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
.account img {
|
||||
display: inline-block;
|
||||
height: 50px;
|
||||
margin: 5px;
|
||||
width: 50px;
|
||||
}
|
||||
|
||||
.address, .info {
|
||||
box-sizing: border-box;
|
||||
display: inline-block;
|
||||
width: 50%;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
.address {
|
||||
padding-right: $accountPadding;
|
||||
width: 40%;
|
||||
}
|
||||
|
||||
.info {
|
||||
padding: 0 30px;
|
||||
color: #E53935;
|
||||
vertical-align: top;
|
||||
width: 60%;
|
||||
}
|
||||
|
||||
.info p:first-child {
|
||||
@ -81,10 +89,3 @@
|
||||
display: inline-block;
|
||||
min-height: $finishedHeight;
|
||||
}
|
||||
|
||||
.signDetails img {
|
||||
display: inline-block;
|
||||
width: 50px;
|
||||
height: 50px;
|
||||
margin: 5px;
|
||||
}
|
||||
|
@ -123,6 +123,7 @@ class SignRequest extends Component {
|
||||
<Account
|
||||
address={ address }
|
||||
balance={ balance }
|
||||
className={ styles.account }
|
||||
externalLink={ externalLink }
|
||||
netVersion={ netVersion }
|
||||
/>
|
||||
@ -155,9 +156,7 @@ class SignRequest extends Component {
|
||||
|
||||
renderActions () {
|
||||
const { accounts, address, focus, isFinished, status } = this.props;
|
||||
const account = Object
|
||||
.values(accounts)
|
||||
.find((account) => address === account.address.toLowerCase());
|
||||
const account = accounts[address];
|
||||
|
||||
if (isFinished) {
|
||||
if (status === 'confirmed') {
|
||||
@ -191,6 +190,7 @@ class SignRequest extends Component {
|
||||
address={ address }
|
||||
focus={ focus }
|
||||
isSending={ this.props.isSending }
|
||||
netVersion={ this.props.netVersion }
|
||||
onConfirm={ this.onConfirm }
|
||||
onReject={ this.onReject }
|
||||
className={ styles.actions }
|
||||
|
@ -171,13 +171,13 @@ function addProxies (app) {
|
||||
}));
|
||||
|
||||
app.use('/api', proxy({
|
||||
target: 'http://127.0.0.1:8080',
|
||||
target: 'http://127.0.0.1:8545',
|
||||
changeOrigin: true,
|
||||
autoRewrite: true
|
||||
}));
|
||||
|
||||
app.use('/app', proxy({
|
||||
target: 'http://127.0.0.1:8080',
|
||||
target: 'http://127.0.0.1:8545',
|
||||
changeOrigin: true,
|
||||
pathRewrite: {
|
||||
'^/app': ''
|
||||
@ -193,7 +193,7 @@ function addProxies (app) {
|
||||
}));
|
||||
|
||||
app.use('/rpc', proxy({
|
||||
target: 'http://127.0.0.1:8080',
|
||||
target: 'http://127.0.0.1:8545',
|
||||
changeOrigin: true
|
||||
}));
|
||||
}
|
||||
|
@ -69,8 +69,11 @@ pass = "test_pass"
|
||||
|
||||
[secretstore]
|
||||
disable = false
|
||||
port = 8082
|
||||
nodes = []
|
||||
http_interface = "local"
|
||||
http_port = 8082
|
||||
interface = "local"
|
||||
port = 8083
|
||||
path = "$HOME/.parity/secretstore"
|
||||
|
||||
[ipfs]
|
||||
|
@ -38,7 +38,8 @@ user = "username"
|
||||
pass = "password"
|
||||
|
||||
[secretstore]
|
||||
port = 8082
|
||||
http_port = 8082
|
||||
port = 8083
|
||||
|
||||
[ipfs]
|
||||
enable = false
|
||||
|
@ -187,10 +187,18 @@ usage! {
|
||||
// Secret Store
|
||||
flag_no_secretstore: bool = false,
|
||||
or |c: &Config| otry!(c.secretstore).disable.clone(),
|
||||
flag_secretstore_port: u16 = 8082u16,
|
||||
or |c: &Config| otry!(c.secretstore).port.clone(),
|
||||
flag_secretstore_secret: Option<String> = None,
|
||||
or |c: &Config| otry!(c.secretstore).self_secret.clone().map(Some),
|
||||
flag_secretstore_nodes: String = "",
|
||||
or |c: &Config| otry!(c.secretstore).nodes.as_ref().map(|vec| vec.join(",")),
|
||||
flag_secretstore_interface: String = "local",
|
||||
or |c: &Config| otry!(c.secretstore).interface.clone(),
|
||||
flag_secretstore_port: u16 = 8083u16,
|
||||
or |c: &Config| otry!(c.secretstore).port.clone(),
|
||||
flag_secretstore_http_interface: String = "local",
|
||||
or |c: &Config| otry!(c.secretstore).http_interface.clone(),
|
||||
flag_secretstore_http_port: u16 = 8082u16,
|
||||
or |c: &Config| otry!(c.secretstore).http_port.clone(),
|
||||
flag_secretstore_path: String = "$BASE/secretstore",
|
||||
or |c: &Config| otry!(c.secretstore).path.clone(),
|
||||
|
||||
@ -454,8 +462,12 @@ struct Dapps {
|
||||
#[derive(Default, Debug, PartialEq, RustcDecodable)]
|
||||
struct SecretStore {
|
||||
disable: Option<bool>,
|
||||
port: Option<u16>,
|
||||
self_secret: Option<String>,
|
||||
nodes: Option<Vec<String>>,
|
||||
interface: Option<String>,
|
||||
port: Option<u16>,
|
||||
http_interface: Option<String>,
|
||||
http_port: Option<u16>,
|
||||
path: Option<String>,
|
||||
}
|
||||
|
||||
@ -697,8 +709,12 @@ mod tests {
|
||||
flag_no_dapps: false,
|
||||
|
||||
flag_no_secretstore: false,
|
||||
flag_secretstore_port: 8082u16,
|
||||
flag_secretstore_secret: None,
|
||||
flag_secretstore_nodes: "".into(),
|
||||
flag_secretstore_interface: "local".into(),
|
||||
flag_secretstore_port: 8083u16,
|
||||
flag_secretstore_http_interface: "local".into(),
|
||||
flag_secretstore_http_port: 8082u16,
|
||||
flag_secretstore_path: "$HOME/.parity/secretstore".into(),
|
||||
|
||||
// IPFS
|
||||
@ -909,8 +925,12 @@ mod tests {
|
||||
}),
|
||||
secretstore: Some(SecretStore {
|
||||
disable: None,
|
||||
port: Some(8082),
|
||||
self_secret: None,
|
||||
nodes: None,
|
||||
interface: None,
|
||||
port: Some(8083),
|
||||
http_interface: None,
|
||||
http_port: Some(8082),
|
||||
path: None,
|
||||
}),
|
||||
ipfs: Some(Ipfs {
|
||||
|
@ -22,398 +22,408 @@ Usage:
|
||||
parity db kill [options]
|
||||
|
||||
Operating Options:
|
||||
--mode MODE Set the operating mode. MODE can be one of:
|
||||
last - Uses the last-used mode, active if none.
|
||||
active - Parity continuously syncs the chain.
|
||||
passive - Parity syncs initially, then sleeps and
|
||||
wakes regularly to resync.
|
||||
dark - Parity syncs only when the RPC is active.
|
||||
offline - Parity doesn't sync. (default: {flag_mode}).
|
||||
--mode-timeout SECS Specify the number of seconds before inactivity
|
||||
timeout occurs when mode is dark or passive
|
||||
(default: {flag_mode_timeout}).
|
||||
--mode-alarm SECS Specify the number of seconds before auto sleep
|
||||
reawake timeout occurs when mode is passive
|
||||
(default: {flag_mode_alarm}).
|
||||
--auto-update SET Set a releases set to automatically update and
|
||||
install.
|
||||
all - All updates in the our release track.
|
||||
critical - Only consensus/security updates.
|
||||
none - No updates will be auto-installed.
|
||||
(default: {flag_auto_update}).
|
||||
--release-track TRACK Set which release track we should use for updates.
|
||||
stable - Stable releases.
|
||||
beta - Beta releases.
|
||||
nightly - Nightly releases (unstable).
|
||||
testing - Testing releases (do not use).
|
||||
current - Whatever track this executable was
|
||||
released on (default: {flag_release_track}).
|
||||
--public-node Start Parity as a public web server. Account storage
|
||||
and transaction signing will be delegated to the UI.
|
||||
(default: {flag_public_node}).
|
||||
--no-download Normally new releases will be downloaded ready for
|
||||
updating. This disables it. Not recommended.
|
||||
(default: {flag_no_download}).
|
||||
--no-consensus Force the binary to run even if there are known
|
||||
issues regarding consensus. Not recommended.
|
||||
(default: {flag_no_consensus}).
|
||||
--force-direct Run the originally installed version of Parity,
|
||||
ignoring any updates that have since been installed.
|
||||
--chain CHAIN Specify the blockchain type. CHAIN may be either a
|
||||
JSON chain specification file or olympic, frontier,
|
||||
homestead, mainnet, morden, ropsten, classic, expanse,
|
||||
testnet, kovan or dev (default: {flag_chain}).
|
||||
-d --base-path PATH Specify the base data storage path.
|
||||
(default: {flag_base_path}).
|
||||
--db-path PATH Specify the database directory path
|
||||
(default: {flag_db_path}).
|
||||
--keys-path PATH Specify the path for JSON key files to be found
|
||||
(default: {flag_keys_path}).
|
||||
--identity NAME Specify your node's name. (default: {flag_identity})
|
||||
--light Experimental: run in light client mode. Light clients
|
||||
synchronize a bare minimum of data and fetch necessary
|
||||
data on-demand from the network. Much lower in storage,
|
||||
potentially higher in bandwidth. Has no effect with
|
||||
subcommands (default: {flag_light}).
|
||||
--mode MODE Set the operating mode. MODE can be one of:
|
||||
last - Uses the last-used mode, active if none.
|
||||
active - Parity continuously syncs the chain.
|
||||
passive - Parity syncs initially, then sleeps and
|
||||
wakes regularly to resync.
|
||||
dark - Parity syncs only when the RPC is active.
|
||||
offline - Parity doesn't sync. (default: {flag_mode}).
|
||||
--mode-timeout SECS Specify the number of seconds before inactivity
|
||||
timeout occurs when mode is dark or passive
|
||||
(default: {flag_mode_timeout}).
|
||||
--mode-alarm SECS Specify the number of seconds before auto sleep
|
||||
reawake timeout occurs when mode is passive
|
||||
(default: {flag_mode_alarm}).
|
||||
--auto-update SET Set a releases set to automatically update and
|
||||
install.
|
||||
all - All updates in the our release track.
|
||||
critical - Only consensus/security updates.
|
||||
none - No updates will be auto-installed.
|
||||
(default: {flag_auto_update}).
|
||||
--release-track TRACK Set which release track we should use for updates.
|
||||
stable - Stable releases.
|
||||
beta - Beta releases.
|
||||
nightly - Nightly releases (unstable).
|
||||
testing - Testing releases (do not use).
|
||||
current - Whatever track this executable was
|
||||
released on (default: {flag_release_track}).
|
||||
--public-node Start Parity as a public web server. Account storage
|
||||
and transaction signing will be delegated to the UI.
|
||||
(default: {flag_public_node}).
|
||||
--no-download Normally new releases will be downloaded ready for
|
||||
updating. This disables it. Not recommended.
|
||||
(default: {flag_no_download}).
|
||||
--no-consensus Force the binary to run even if there are known
|
||||
issues regarding consensus. Not recommended.
|
||||
(default: {flag_no_consensus}).
|
||||
--force-direct Run the originally installed version of Parity,
|
||||
ignoring any updates that have since been installed.
|
||||
--chain CHAIN Specify the blockchain type. CHAIN may be either a
|
||||
JSON chain specification file or olympic, frontier,
|
||||
homestead, mainnet, morden, ropsten, classic, expanse,
|
||||
testnet, kovan or dev (default: {flag_chain}).
|
||||
-d --base-path PATH Specify the base data storage path.
|
||||
(default: {flag_base_path}).
|
||||
--db-path PATH Specify the database directory path
|
||||
(default: {flag_db_path}).
|
||||
--keys-path PATH Specify the path for JSON key files to be found
|
||||
(default: {flag_keys_path}).
|
||||
--identity NAME Specify your node's name. (default: {flag_identity})
|
||||
--light Experimental: run in light client mode. Light clients
|
||||
synchronize a bare minimum of data and fetch necessary
|
||||
data on-demand from the network. Much lower in storage,
|
||||
potentially higher in bandwidth. Has no effect with
|
||||
subcommands (default: {flag_light}).
|
||||
|
||||
Account Options:
|
||||
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
|
||||
ACCOUNTS is a comma-delimited list of addresses.
|
||||
Implies --no-ui. (default: {flag_unlock:?})
|
||||
--password FILE Provide a file containing a password for unlocking
|
||||
an account. Leading and trailing whitespace is trimmed.
|
||||
(default: {flag_password:?})
|
||||
--keys-iterations NUM Specify the number of iterations to use when
|
||||
deriving key from the password (bigger is more
|
||||
secure) (default: {flag_keys_iterations}).
|
||||
--no-hardware-wallets Disables hardware wallet support. (default: {flag_no_hardware_wallets})
|
||||
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
|
||||
ACCOUNTS is a comma-delimited list of addresses.
|
||||
Implies --no-ui. (default: {flag_unlock:?})
|
||||
--password FILE Provide a file containing a password for unlocking
|
||||
an account. Leading and trailing whitespace is trimmed.
|
||||
(default: {flag_password:?})
|
||||
--keys-iterations NUM Specify the number of iterations to use when
|
||||
deriving key from the password (bigger is more
|
||||
secure) (default: {flag_keys_iterations}).
|
||||
--no-hardware-wallets Disables hardware wallet support. (default: {flag_no_hardware_wallets})
|
||||
|
||||
UI Options:
|
||||
--force-ui Enable Trusted UI WebSocket endpoint,
|
||||
even when --unlock is in use. (default: ${flag_force_ui})
|
||||
--no-ui Disable Trusted UI WebSocket endpoint.
|
||||
(default: ${flag_no_ui})
|
||||
--ui-port PORT Specify the port of Trusted UI server
|
||||
(default: {flag_ui_port}).
|
||||
--ui-interface IP Specify the hostname portion of the Trusted UI
|
||||
server, IP should be an interface's IP address,
|
||||
or local (default: {flag_ui_interface}).
|
||||
--ui-path PATH Specify directory where Trusted UIs tokens should
|
||||
be stored. (default: {flag_ui_path})
|
||||
--ui-no-validation Disable Origin and Host headers validation for
|
||||
Trusted UI. WARNING: INSECURE. Used only for
|
||||
development. (default: {flag_ui_no_validation})
|
||||
--force-ui Enable Trusted UI WebSocket endpoint,
|
||||
even when --unlock is in use. (default: ${flag_force_ui})
|
||||
--no-ui Disable Trusted UI WebSocket endpoint.
|
||||
(default: ${flag_no_ui})
|
||||
--ui-port PORT Specify the port of Trusted UI server
|
||||
(default: {flag_ui_port}).
|
||||
--ui-interface IP Specify the hostname portion of the Trusted UI
|
||||
server, IP should be an interface's IP address,
|
||||
or local (default: {flag_ui_interface}).
|
||||
--ui-path PATH Specify directory where Trusted UIs tokens should
|
||||
be stored. (default: {flag_ui_path})
|
||||
--ui-no-validation Disable Origin and Host headers validation for
|
||||
Trusted UI. WARNING: INSECURE. Used only for
|
||||
development. (default: {flag_ui_no_validation})
|
||||
|
||||
Networking Options:
|
||||
--no-warp Disable syncing from the snapshot over the network. (default: {flag_no_warp})
|
||||
--port PORT Override the port on which the node should listen
|
||||
(default: {flag_port}).
|
||||
--min-peers NUM Try to maintain at least NUM peers (default: {flag_min_peers}).
|
||||
--max-peers NUM Allow up to NUM peers (default: {flag_max_peers}).
|
||||
--snapshot-peers NUM Allow additional NUM peers for a snapshot sync
|
||||
(default: {flag_snapshot_peers}).
|
||||
--nat METHOD Specify method to use for determining public
|
||||
address. Must be one of: any, none, upnp,
|
||||
extip:<IP> (default: {flag_nat}).
|
||||
--network-id INDEX Override the network identifier from the chain we
|
||||
are on. (default: {flag_network_id:?})
|
||||
--bootnodes NODES Override the bootnodes from our chain. NODES should
|
||||
be comma-delimited enodes. (default: {flag_bootnodes:?})
|
||||
--no-discovery Disable new peer discovery. (default: {flag_no_discovery})
|
||||
--node-key KEY Specify node secret key, either as 64-character hex
|
||||
string or input to SHA3 operation. (default: {flag_node_key:?})
|
||||
--reserved-peers FILE Provide a file containing enodes, one per line.
|
||||
These nodes will always have a reserved slot on top
|
||||
of the normal maximum peers. (default: {flag_reserved_peers:?})
|
||||
--reserved-only Connect only to reserved nodes. (default: {flag_reserved_only})
|
||||
--allow-ips FILTER Filter outbound connections. Must be one of:
|
||||
private - connect to private network IP addresses only;
|
||||
public - connect to public network IP addresses only;
|
||||
all - connect to any IP address.
|
||||
(default: {flag_allow_ips})
|
||||
--max-pending-peers NUM Allow up to NUM pending connections. (default: {flag_max_pending_peers})
|
||||
--no-ancient-blocks Disable downloading old blocks after snapshot restoration
|
||||
or warp sync. (default: {flag_no_ancient_blocks})
|
||||
--no-serve-light Disable serving of light peers. (default: {flag_no_serve_light})
|
||||
--no-warp Disable syncing from the snapshot over the network. (default: {flag_no_warp})
|
||||
--port PORT Override the port on which the node should listen
|
||||
(default: {flag_port}).
|
||||
--min-peers NUM Try to maintain at least NUM peers (default: {flag_min_peers}).
|
||||
--max-peers NUM Allow up to NUM peers (default: {flag_max_peers}).
|
||||
--snapshot-peers NUM Allow additional NUM peers for a snapshot sync
|
||||
(default: {flag_snapshot_peers}).
|
||||
--nat METHOD Specify method to use for determining public
|
||||
address. Must be one of: any, none, upnp,
|
||||
extip:<IP> (default: {flag_nat}).
|
||||
--network-id INDEX Override the network identifier from the chain we
|
||||
are on. (default: {flag_network_id:?})
|
||||
--bootnodes NODES Override the bootnodes from our chain. NODES should
|
||||
be comma-delimited enodes. (default: {flag_bootnodes:?})
|
||||
--no-discovery Disable new peer discovery. (default: {flag_no_discovery})
|
||||
--node-key KEY Specify node secret key, either as 64-character hex
|
||||
string or input to SHA3 operation. (default: {flag_node_key:?})
|
||||
--reserved-peers FILE Provide a file containing enodes, one per line.
|
||||
These nodes will always have a reserved slot on top
|
||||
of the normal maximum peers. (default: {flag_reserved_peers:?})
|
||||
--reserved-only Connect only to reserved nodes. (default: {flag_reserved_only})
|
||||
--allow-ips FILTER Filter outbound connections. Must be one of:
|
||||
private - connect to private network IP addresses only;
|
||||
public - connect to public network IP addresses only;
|
||||
all - connect to any IP address.
|
||||
(default: {flag_allow_ips})
|
||||
--max-pending-peers NUM Allow up to NUM pending connections. (default: {flag_max_pending_peers})
|
||||
--no-ancient-blocks Disable downloading old blocks after snapshot restoration
|
||||
or warp sync. (default: {flag_no_ancient_blocks})
|
||||
--no-serve-light Disable serving of light peers. (default: {flag_no_serve_light})
|
||||
|
||||
API and Console Options:
|
||||
--no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc})
|
||||
--jsonrpc-port PORT Specify the port portion of the JSONRPC API server
|
||||
(default: {flag_jsonrpc_port}).
|
||||
--jsonrpc-interface IP Specify the hostname portion of the JSONRPC API
|
||||
server, IP should be an interface's IP address, or
|
||||
all (all interfaces) or local (default: {flag_jsonrpc_interface}).
|
||||
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses.
|
||||
(default: {flag_jsonrpc_cors:?})
|
||||
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC
|
||||
interface. APIS is a comma-delimited list of API
|
||||
name. Possible name are web3, eth, net, personal,
|
||||
parity, parity_set, traces, rpc, parity_accounts.
|
||||
(default: {flag_jsonrpc_apis}).
|
||||
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will
|
||||
validate the Host header sent by the browser, it
|
||||
is additional security against some attack
|
||||
vectors. Special options: "all", "none",
|
||||
(default: {flag_jsonrpc_hosts}).
|
||||
--jsonrpc-threads THREADS Enables experimental faster implementation of JSON-RPC server.
|
||||
Requires Dapps server to be disabled using --no-dapps. (default: {flag_jsonrpc_threads:?})
|
||||
--no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc})
|
||||
--jsonrpc-port PORT Specify the port portion of the JSONRPC API server
|
||||
(default: {flag_jsonrpc_port}).
|
||||
--jsonrpc-interface IP Specify the hostname portion of the JSONRPC API
|
||||
server, IP should be an interface's IP address, or
|
||||
all (all interfaces) or local (default: {flag_jsonrpc_interface}).
|
||||
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses.
|
||||
(default: {flag_jsonrpc_cors:?})
|
||||
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC
|
||||
interface. APIS is a comma-delimited list of API
|
||||
name. Possible name are all, safe, web3, eth, net, personal,
|
||||
parity, parity_set, traces, rpc, parity_accounts.
|
||||
You can also disable a specific API by putting '-' in the front: all,-personal
|
||||
(default: {flag_jsonrpc_apis}).
|
||||
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will
|
||||
validate the Host header sent by the browser, it
|
||||
is additional security against some attack
|
||||
vectors. Special options: "all", "none",
|
||||
(default: {flag_jsonrpc_hosts}).
|
||||
--jsonrpc-threads THREADS Enables experimental faster implementation of JSON-RPC server.
|
||||
Requires Dapps server to be disabled using --no-dapps. (default: {flag_jsonrpc_threads:?})
|
||||
|
||||
--no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc})
|
||||
--ipc-path PATH Specify custom path for JSON-RPC over IPC service
|
||||
(default: {flag_ipc_path}).
|
||||
--ipc-apis APIS Specify custom API set available via JSON-RPC over
|
||||
IPC (default: {flag_ipc_apis}).
|
||||
|
||||
--no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps})
|
||||
--dapps-path PATH Specify directory where dapps should be installed.
|
||||
(default: {flag_dapps_path})
|
||||
--ipfs-api Enable IPFS-compatible HTTP API. (default: {flag_ipfs_api})
|
||||
--ipfs-api-port PORT Configure on which port the IPFS HTTP API should listen.
|
||||
(default: {flag_ipfs_api_port})
|
||||
--ipfs-api-interface IP Specify the hostname portion of the IPFS API server,
|
||||
IP should be an interface's IP address or local.
|
||||
(default: {flag_ipfs_api_interface})
|
||||
--ipfs-api-cors URL Specify CORS header for IPFS API responses.
|
||||
(default: {flag_ipfs_api_cors:?})
|
||||
--ipfs-api-hosts HOSTS List of allowed Host header values. This option will
|
||||
validate the Host header sent by the browser, it
|
||||
is additional security against some attack
|
||||
vectors. Special options: "all", "none"
|
||||
(default: {flag_ipfs_api_hosts}).
|
||||
--no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc})
|
||||
--ipc-path PATH Specify custom path for JSON-RPC over IPC service
|
||||
(default: {flag_ipc_path}).
|
||||
--ipc-apis APIS Specify custom API set available via JSON-RPC over
|
||||
IPC (default: {flag_ipc_apis}).
|
||||
|
||||
--no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps})
|
||||
--dapps-path PATH Specify directory where dapps should be installed.
|
||||
(default: {flag_dapps_path})
|
||||
--ipfs-api Enable IPFS-compatible HTTP API. (default: {flag_ipfs_api})
|
||||
--ipfs-api-port PORT Configure on which port the IPFS HTTP API should listen.
|
||||
(default: {flag_ipfs_api_port})
|
||||
--ipfs-api-interface IP Specify the hostname portion of the IPFS API server,
|
||||
IP should be an interface's IP address or local.
|
||||
(default: {flag_ipfs_api_interface})
|
||||
--ipfs-api-cors URL Specify CORS header for IPFS API responses.
|
||||
(default: {flag_ipfs_api_cors:?})
|
||||
--ipfs-api-hosts HOSTS List of allowed Host header values. This option will
|
||||
validate the Host header sent by the browser, it
|
||||
is additional security against some attack
|
||||
vectors. Special options: "all", "none"
|
||||
(default: {flag_ipfs_api_hosts}).
|
||||
|
||||
Secret Store Options:
|
||||
--no-secretstore Disable Secret Store functionality. (default: {flag_no_secretstore})
|
||||
--secretstore-port PORT Specify the port portion for Secret Store Key Server
|
||||
(default: {flag_secretstore_port}).
|
||||
--secretstore-interface IP Specify the hostname portion for Secret Store Key Server, IP
|
||||
should be an interface's IP address, or local
|
||||
(default: {flag_secretstore_interface}).
|
||||
--secretstore-path PATH Specify directory where Secret Store should save its data.
|
||||
(default: {flag_secretstore_path})
|
||||
--no-secretstore Disable Secret Store functionality. (default: {flag_no_secretstore})
|
||||
--secretstore-secret SECRET Hex-encoded secret key of this node.
|
||||
(required, default: {flag_secretstore_secret:?}).
|
||||
--secretstore-nodes NODES Comma-separated list of other secret store cluster nodes in form
|
||||
NODE_PUBLIC_KEY_IN_HEX@NODE_IP_ADDR:NODE_PORT.
|
||||
(required, default: {flag_secretstore_nodes}).
|
||||
--secretstore-interface IP Specify the hostname portion for listening to Secret Store Key Server
|
||||
internal requests, IP should be an interface's IP address, or local
|
||||
(default: {flag_secretstore_interface}).
|
||||
--secretstore-port PORT Specify the port portion for listening to Secret Store Key Server
|
||||
internal requests (default: {flag_secretstore_port}).
|
||||
--secretstore-http-interface IP Specify the hostname portion for listening to Secret Store Key Server
|
||||
HTTP requests, IP should be an interface's IP address, or local
|
||||
(default: {flag_secretstore_http_interface}).
|
||||
--secretstore-http-port PORT Specify the port portion for listening to Secret Store Key Server
|
||||
HTTP requests (default: {flag_secretstore_http_port}).
|
||||
--secretstore-path PATH Specify directory where Secret Store should save its data.
|
||||
(default: {flag_secretstore_path}).
|
||||
|
||||
Sealing/Mining Options:
|
||||
--author ADDRESS Specify the block author (aka "coinbase") address
|
||||
for sending block rewards from sealed blocks.
|
||||
NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION.
|
||||
(default: {flag_author:?})
|
||||
--engine-signer ADDRESS Specify the address which should be used to
|
||||
sign consensus messages and issue blocks.
|
||||
Relevant only to non-PoW chains.
|
||||
(default: {flag_engine_signer:?})
|
||||
--force-sealing Force the node to author new blocks as if it were
|
||||
always sealing/mining.
|
||||
(default: {flag_force_sealing})
|
||||
--reseal-on-txs SET Specify which transactions should force the node
|
||||
to reseal a block. SET is one of:
|
||||
none - never reseal on new transactions;
|
||||
own - reseal only on a new local transaction;
|
||||
ext - reseal only on a new external transaction;
|
||||
all - reseal on all new transactions
|
||||
(default: {flag_reseal_on_txs}).
|
||||
--reseal-min-period MS Specify the minimum time between reseals from
|
||||
incoming transactions. MS is time measured in
|
||||
milliseconds (default: {flag_reseal_min_period}).
|
||||
--reseal-max-period MS Specify the maximum time since last block to enable
|
||||
force-sealing. MS is time measured in
|
||||
milliseconds (default: {flag_reseal_max_period}).
|
||||
--work-queue-size ITEMS Specify the number of historical work packages
|
||||
which are kept cached lest a solution is found for
|
||||
them later. High values take more memory but result
|
||||
in fewer unusable solutions (default: {flag_work_queue_size}).
|
||||
--tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas
|
||||
a single transaction may have for it to be mined.
|
||||
(default: {flag_tx_gas_limit:?})
|
||||
--tx-time-limit MS Maximal time for processing single transaction.
|
||||
If enabled senders/recipients/code of transactions
|
||||
offending the limit will be banned from being included
|
||||
in transaction queue for 180 seconds.
|
||||
(default: {flag_tx_time_limit:?})
|
||||
--relay-set SET Set of transactions to relay. SET may be:
|
||||
cheap - Relay any transaction in the queue (this
|
||||
may include invalid transactions);
|
||||
strict - Relay only executed transactions (this
|
||||
guarantees we don't relay invalid transactions, but
|
||||
means we relay nothing if not mining);
|
||||
lenient - Same as strict when mining, and cheap
|
||||
when not (default: {flag_relay_set}).
|
||||
--usd-per-tx USD Amount of USD to be paid for a basic transaction
|
||||
(default: {flag_usd_per_tx}). The minimum gas price is set
|
||||
accordingly.
|
||||
--usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an
|
||||
amount in USD, a web service or 'auto' to use each
|
||||
web service in turn and fallback on the last known
|
||||
good value (default: {flag_usd_per_eth}).
|
||||
--price-update-period T T will be allowed to pass between each gas price
|
||||
update. T may be daily, hourly, a number of seconds,
|
||||
or a time string of the form "2 days", "30 minutes"
|
||||
etc. (default: {flag_price_update_period}).
|
||||
--gas-floor-target GAS Amount of gas per block to target when sealing a new
|
||||
block (default: {flag_gas_floor_target}).
|
||||
--gas-cap GAS A cap on how large we will raise the gas limit per
|
||||
block due to transaction volume (default: {flag_gas_cap}).
|
||||
--extra-data STRING Specify a custom extra-data for authored blocks, no
|
||||
more than 32 characters. (default: {flag_extra_data:?})
|
||||
--tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting
|
||||
to be included in next block) (default: {flag_tx_queue_size}).
|
||||
--tx-queue-gas LIMIT Maximum amount of total gas for external transactions in
|
||||
the queue. LIMIT can be either an amount of gas or
|
||||
'auto' or 'off'. 'auto' sets the limit to be 20x
|
||||
the current block gas limit. (default: {flag_tx_queue_gas}).
|
||||
--tx-queue-strategy S Prioritization strategy used to order transactions
|
||||
in the queue. S may be:
|
||||
gas - Prioritize txs with low gas limit;
|
||||
gas_price - Prioritize txs with high gas price;
|
||||
gas_factor - Prioritize txs using gas price
|
||||
and gas limit ratio (default: {flag_tx_queue_strategy}).
|
||||
--tx-queue-ban-count C Number of times maximal time for execution (--tx-time-limit)
|
||||
can be exceeded before banning sender/recipient/code.
|
||||
(default: {flag_tx_queue_ban_count})
|
||||
--tx-queue-ban-time SEC Banning time (in seconds) for offenders of specified
|
||||
execution time limit. Also number of offending actions
|
||||
have to reach the threshold within that time.
|
||||
(default: {flag_tx_queue_ban_time} seconds)
|
||||
--remove-solved Move solved blocks from the work package queue
|
||||
instead of cloning them. This gives a slightly
|
||||
faster import speed, but means that extra solutions
|
||||
submitted for the same work package will go unused.
|
||||
(default: {flag_remove_solved})
|
||||
--notify-work URLS URLs to which work package notifications are pushed.
|
||||
URLS should be a comma-delimited list of HTTP URLs.
|
||||
(default: {flag_notify_work:?})
|
||||
--refuse-service-transactions Always refuse service transactions.
|
||||
(default: {flag_refuse_service_transactions}).
|
||||
--stratum Run Stratum server for miner push notification. (default: {flag_stratum})
|
||||
--stratum-interface IP Interface address for Stratum server. (default: {flag_stratum_interface})
|
||||
--stratum-port PORT Port for Stratum server to listen on. (default: {flag_stratum_port})
|
||||
--stratum-secret STRING Secret for authorizing Stratum server for peers.
|
||||
(default: {flag_stratum_secret:?})
|
||||
--author ADDRESS Specify the block author (aka "coinbase") address
|
||||
for sending block rewards from sealed blocks.
|
||||
NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION.
|
||||
(default: {flag_author:?})
|
||||
--engine-signer ADDRESS Specify the address which should be used to
|
||||
sign consensus messages and issue blocks.
|
||||
Relevant only to non-PoW chains.
|
||||
(default: {flag_engine_signer:?})
|
||||
--force-sealing Force the node to author new blocks as if it were
|
||||
always sealing/mining.
|
||||
(default: {flag_force_sealing})
|
||||
--reseal-on-txs SET Specify which transactions should force the node
|
||||
to reseal a block. SET is one of:
|
||||
none - never reseal on new transactions;
|
||||
own - reseal only on a new local transaction;
|
||||
ext - reseal only on a new external transaction;
|
||||
all - reseal on all new transactions
|
||||
(default: {flag_reseal_on_txs}).
|
||||
--reseal-min-period MS Specify the minimum time between reseals from
|
||||
incoming transactions. MS is time measured in
|
||||
milliseconds (default: {flag_reseal_min_period}).
|
||||
--reseal-max-period MS Specify the maximum time since last block to enable
|
||||
force-sealing. MS is time measured in
|
||||
milliseconds (default: {flag_reseal_max_period}).
|
||||
--work-queue-size ITEMS Specify the number of historical work packages
|
||||
which are kept cached lest a solution is found for
|
||||
them later. High values take more memory but result
|
||||
in fewer unusable solutions (default: {flag_work_queue_size}).
|
||||
--tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas
|
||||
a single transaction may have for it to be mined.
|
||||
(default: {flag_tx_gas_limit:?})
|
||||
--tx-time-limit MS Maximal time for processing single transaction.
|
||||
If enabled senders/recipients/code of transactions
|
||||
offending the limit will be banned from being included
|
||||
in transaction queue for 180 seconds.
|
||||
(default: {flag_tx_time_limit:?})
|
||||
--relay-set SET Set of transactions to relay. SET may be:
|
||||
cheap - Relay any transaction in the queue (this
|
||||
may include invalid transactions);
|
||||
strict - Relay only executed transactions (this
|
||||
guarantees we don't relay invalid transactions, but
|
||||
means we relay nothing if not mining);
|
||||
lenient - Same as strict when mining, and cheap
|
||||
when not (default: {flag_relay_set}).
|
||||
--usd-per-tx USD Amount of USD to be paid for a basic transaction
|
||||
(default: {flag_usd_per_tx}). The minimum gas price is set
|
||||
accordingly.
|
||||
--usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an
|
||||
amount in USD, a web service or 'auto' to use each
|
||||
web service in turn and fallback on the last known
|
||||
good value (default: {flag_usd_per_eth}).
|
||||
--price-update-period T T will be allowed to pass between each gas price
|
||||
update. T may be daily, hourly, a number of seconds,
|
||||
or a time string of the form "2 days", "30 minutes"
|
||||
etc. (default: {flag_price_update_period}).
|
||||
--gas-floor-target GAS Amount of gas per block to target when sealing a new
|
||||
block (default: {flag_gas_floor_target}).
|
||||
--gas-cap GAS A cap on how large we will raise the gas limit per
|
||||
block due to transaction volume (default: {flag_gas_cap}).
|
||||
--extra-data STRING Specify a custom extra-data for authored blocks, no
|
||||
more than 32 characters. (default: {flag_extra_data:?})
|
||||
--tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting
|
||||
to be included in next block) (default: {flag_tx_queue_size}).
|
||||
--tx-queue-gas LIMIT Maximum amount of total gas for external transactions in
|
||||
the queue. LIMIT can be either an amount of gas or
|
||||
'auto' or 'off'. 'auto' sets the limit to be 20x
|
||||
the current block gas limit. (default: {flag_tx_queue_gas}).
|
||||
--tx-queue-strategy S Prioritization strategy used to order transactions
|
||||
in the queue. S may be:
|
||||
gas - Prioritize txs with low gas limit;
|
||||
gas_price - Prioritize txs with high gas price;
|
||||
gas_factor - Prioritize txs using gas price
|
||||
and gas limit ratio (default: {flag_tx_queue_strategy}).
|
||||
--tx-queue-ban-count C Number of times maximal time for execution (--tx-time-limit)
|
||||
can be exceeded before banning sender/recipient/code.
|
||||
(default: {flag_tx_queue_ban_count})
|
||||
--tx-queue-ban-time SEC Banning time (in seconds) for offenders of specified
|
||||
execution time limit. Also number of offending actions
|
||||
have to reach the threshold within that time.
|
||||
(default: {flag_tx_queue_ban_time} seconds)
|
||||
--remove-solved Move solved blocks from the work package queue
|
||||
instead of cloning them. This gives a slightly
|
||||
faster import speed, but means that extra solutions
|
||||
submitted for the same work package will go unused.
|
||||
(default: {flag_remove_solved})
|
||||
--notify-work URLS URLs to which work package notifications are pushed.
|
||||
URLS should be a comma-delimited list of HTTP URLs.
|
||||
(default: {flag_notify_work:?})
|
||||
--refuse-service-transactions Always refuse service transactions.
|
||||
(default: {flag_refuse_service_transactions}).
|
||||
--stratum Run Stratum server for miner push notification. (default: {flag_stratum})
|
||||
--stratum-interface IP Interface address for Stratum server. (default: {flag_stratum_interface})
|
||||
--stratum-port PORT Port for Stratum server to listen on. (default: {flag_stratum_port})
|
||||
--stratum-secret STRING Secret for authorizing Stratum server for peers.
|
||||
(default: {flag_stratum_secret:?})
|
||||
|
||||
Footprint Options:
|
||||
--tracing BOOL Indicates if full transaction tracing should be
|
||||
enabled. Works only if client had been fully synced
|
||||
with tracing enabled. BOOL may be one of auto, on,
|
||||
off. auto uses last used value of this option (off
|
||||
if it does not exist) (default: {flag_tracing}).
|
||||
--pruning METHOD Configure pruning of the state/storage trie. METHOD
|
||||
may be one of auto, archive, fast:
|
||||
archive - keep all state trie data. No pruning.
|
||||
fast - maintain journal overlay. Fast but 50MB used.
|
||||
auto - use the method most recently synced or
|
||||
default to fast if none synced (default: {flag_pruning}).
|
||||
--pruning-history NUM Set a minimum number of recent states to keep when pruning
|
||||
is active. (default: {flag_pruning_history}).
|
||||
--pruning-memory MB The ideal amount of memory in megabytes to use to store
|
||||
recent states. As many states as possible will be kept
|
||||
within this limit, and at least --pruning-history states
|
||||
will always be kept. (default: {flag_pruning_memory})
|
||||
--cache-size-db MB Override database cache size (default: {flag_cache_size_db}).
|
||||
--cache-size-blocks MB Specify the prefered size of the blockchain cache in
|
||||
megabytes (default: {flag_cache_size_blocks}).
|
||||
--cache-size-queue MB Specify the maximum size of memory to use for block
|
||||
queue (default: {flag_cache_size_queue}).
|
||||
--cache-size-state MB Specify the maximum size of memory to use for
|
||||
the state cache (default: {flag_cache_size_state}).
|
||||
--cache-size MB Set total amount of discretionary memory to use for
|
||||
the entire system, overrides other cache and queue
|
||||
options. (default: {flag_cache_size:?})
|
||||
--fast-and-loose Disables DB WAL, which gives a significant speed up
|
||||
but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose})
|
||||
--db-compaction TYPE Database compaction type. TYPE may be one of:
|
||||
ssd - suitable for SSDs and fast HDDs;
|
||||
hdd - suitable for slow HDDs;
|
||||
auto - determine automatically (default: {flag_db_compaction}).
|
||||
--fat-db BOOL Build appropriate information to allow enumeration
|
||||
of all accounts and storage keys. Doubles the size
|
||||
of the state database. BOOL may be one of on, off
|
||||
or auto. (default: {flag_fat_db})
|
||||
--scale-verifiers Automatically scale amount of verifier threads based on
|
||||
workload. Not guaranteed to be faster.
|
||||
(default: {flag_scale_verifiers})
|
||||
--num-verifiers INT Amount of verifier threads to use or to begin with, if verifier
|
||||
auto-scaling is enabled. (default: {flag_num_verifiers:?})
|
||||
--tracing BOOL Indicates if full transaction tracing should be
|
||||
enabled. Works only if client had been fully synced
|
||||
with tracing enabled. BOOL may be one of auto, on,
|
||||
off. auto uses last used value of this option (off
|
||||
if it does not exist) (default: {flag_tracing}).
|
||||
--pruning METHOD Configure pruning of the state/storage trie. METHOD
|
||||
may be one of auto, archive, fast:
|
||||
archive - keep all state trie data. No pruning.
|
||||
fast - maintain journal overlay. Fast but 50MB used.
|
||||
auto - use the method most recently synced or
|
||||
default to fast if none synced (default: {flag_pruning}).
|
||||
--pruning-history NUM Set a minimum number of recent states to keep when pruning
|
||||
is active. (default: {flag_pruning_history}).
|
||||
--pruning-memory MB The ideal amount of memory in megabytes to use to store
|
||||
recent states. As many states as possible will be kept
|
||||
within this limit, and at least --pruning-history states
|
||||
will always be kept. (default: {flag_pruning_memory})
|
||||
--cache-size-db MB Override database cache size (default: {flag_cache_size_db}).
|
||||
--cache-size-blocks MB Specify the prefered size of the blockchain cache in
|
||||
megabytes (default: {flag_cache_size_blocks}).
|
||||
--cache-size-queue MB Specify the maximum size of memory to use for block
|
||||
queue (default: {flag_cache_size_queue}).
|
||||
--cache-size-state MB Specify the maximum size of memory to use for
|
||||
the state cache (default: {flag_cache_size_state}).
|
||||
--cache-size MB Set total amount of discretionary memory to use for
|
||||
the entire system, overrides other cache and queue
|
||||
options. (default: {flag_cache_size:?})
|
||||
--fast-and-loose Disables DB WAL, which gives a significant speed up
|
||||
but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose})
|
||||
--db-compaction TYPE Database compaction type. TYPE may be one of:
|
||||
ssd - suitable for SSDs and fast HDDs;
|
||||
hdd - suitable for slow HDDs;
|
||||
auto - determine automatically (default: {flag_db_compaction}).
|
||||
--fat-db BOOL Build appropriate information to allow enumeration
|
||||
of all accounts and storage keys. Doubles the size
|
||||
of the state database. BOOL may be one of on, off
|
||||
or auto. (default: {flag_fat_db})
|
||||
--scale-verifiers Automatically scale amount of verifier threads based on
|
||||
workload. Not guaranteed to be faster.
|
||||
(default: {flag_scale_verifiers})
|
||||
--num-verifiers INT Amount of verifier threads to use or to begin with, if verifier
|
||||
auto-scaling is enabled. (default: {flag_num_verifiers:?})
|
||||
|
||||
Import/Export Options:
|
||||
--from BLOCK Export from block BLOCK, which may be an index or
|
||||
hash (default: {flag_from}).
|
||||
--to BLOCK Export to (including) block BLOCK, which may be an
|
||||
index, hash or 'latest' (default: {flag_to}).
|
||||
--format FORMAT For import/export in given format. FORMAT must be
|
||||
one of 'hex' and 'binary'.
|
||||
(default: {flag_format:?} = Import: auto, Export: binary)
|
||||
--no-seal-check Skip block seal check. (default: {flag_no_seal_check})
|
||||
--at BLOCK Export state at the given block, which may be an
|
||||
index, hash, or 'latest'. (default: {flag_at})
|
||||
--no-storage Don't export account storage. (default: {flag_no_storage})
|
||||
--no-code Don't export account code. (default: {flag_no_code})
|
||||
--min-balance WEI Don't export accounts with balance less than specified.
|
||||
(default: {flag_min_balance:?})
|
||||
--max-balance WEI Don't export accounts with balance greater than specified.
|
||||
(default: {flag_max_balance:?})
|
||||
--from BLOCK Export from block BLOCK, which may be an index or
|
||||
hash (default: {flag_from}).
|
||||
--to BLOCK Export to (including) block BLOCK, which may be an
|
||||
index, hash or 'latest' (default: {flag_to}).
|
||||
--format FORMAT For import/export in given format. FORMAT must be
|
||||
one of 'hex' and 'binary'.
|
||||
(default: {flag_format:?} = Import: auto, Export: binary)
|
||||
--no-seal-check Skip block seal check. (default: {flag_no_seal_check})
|
||||
--at BLOCK Export state at the given block, which may be an
|
||||
index, hash, or 'latest'. (default: {flag_at})
|
||||
--no-storage Don't export account storage. (default: {flag_no_storage})
|
||||
--no-code Don't export account code. (default: {flag_no_code})
|
||||
--min-balance WEI Don't export accounts with balance less than specified.
|
||||
(default: {flag_min_balance:?})
|
||||
--max-balance WEI Don't export accounts with balance greater than specified.
|
||||
(default: {flag_max_balance:?})
|
||||
|
||||
Snapshot Options:
|
||||
--at BLOCK Take a snapshot at the given block, which may be an
|
||||
index, hash, or 'latest'. Note that taking snapshots at
|
||||
non-recent blocks will only work with --pruning archive
|
||||
(default: {flag_at})
|
||||
--no-periodic-snapshot Disable automated snapshots which usually occur once
|
||||
every 10000 blocks. (default: {flag_no_periodic_snapshot})
|
||||
--at BLOCK Take a snapshot at the given block, which may be an
|
||||
index, hash, or 'latest'. Note that taking snapshots at
|
||||
non-recent blocks will only work with --pruning archive
|
||||
(default: {flag_at})
|
||||
--no-periodic-snapshot Disable automated snapshots which usually occur once
|
||||
every 10000 blocks. (default: {flag_no_periodic_snapshot})
|
||||
|
||||
Virtual Machine Options:
|
||||
--jitvm Enable the JIT VM. (default: {flag_jitvm})
|
||||
--jitvm Enable the JIT VM. (default: {flag_jitvm})
|
||||
|
||||
Legacy Options:
|
||||
--geth Run in Geth-compatibility mode. Sets the IPC path
|
||||
to be the same as Geth's. Overrides the --ipc-path
|
||||
and --ipcpath options. Alters RPCs to reflect Geth
|
||||
bugs. Includes the personal_ RPC by default.
|
||||
--testnet Testnet mode. Equivalent to --chain testnet.
|
||||
Overrides the --keys-path option.
|
||||
--import-geth-keys Attempt to import keys from Geth client.
|
||||
--datadir PATH Equivalent to --base-path PATH.
|
||||
--networkid INDEX Equivalent to --network-id INDEX.
|
||||
--peers NUM Equivalent to --min-peers NUM.
|
||||
--nodekey KEY Equivalent to --node-key KEY.
|
||||
--nodiscover Equivalent to --no-discovery.
|
||||
-j --jsonrpc Does nothing; JSON-RPC is on by default now.
|
||||
--jsonrpc-off Equivalent to --no-jsonrpc.
|
||||
-w --webapp Does nothing; dapps server is on by default now.
|
||||
--dapps-off Equivalent to --no-dapps.
|
||||
--dapps-user USERNAME Dapps server authentication has been removed. (default: {flag_dapps_user:?})
|
||||
--dapps-pass PASSWORD Dapps server authentication has been removed. (default: {flag_dapps_pass:?})
|
||||
--dapps-apis-all Dapps server is merged with RPC server. Use --jsonrpc-apis. (default: {flag_dapps_apis_all:?})
|
||||
--dapps-cors URL Dapps server is merged with RPC server. Use --jsonrpc-cors. (default: {flag_dapps_cors:?})
|
||||
--dapps-hosts HOSTS Dapps server is merged with RPC server. Use --jsonrpc-hosts. (default: {flag_dapps_hosts:?})
|
||||
--dapps-interface IP Dapps server is merged with RPC server. Use --jsonrpc-interface. (default: {flag_dapps_interface:?})
|
||||
--dapps-port PORT Dapps server is merged with RPC server. Use --jsonrpc-port. (default: {flag_dapps_port:?})
|
||||
--rpc Does nothing; JSON-RPC is on by default now.
|
||||
--warp Does nothing; Warp sync is on by default. (default: {flag_warp})
|
||||
--rpcaddr IP Equivalent to --jsonrpc-interface IP.
|
||||
--rpcport PORT Equivalent to --jsonrpc-port PORT.
|
||||
--rpcapi APIS Equivalent to --jsonrpc-apis APIS.
|
||||
--rpccorsdomain URL Equivalent to --jsonrpc-cors URL.
|
||||
--ipcdisable Equivalent to --no-ipc.
|
||||
--ipc-off Equivalent to --no-ipc.
|
||||
--ipcapi APIS Equivalent to --ipc-apis APIS.
|
||||
--ipcpath PATH Equivalent to --ipc-path PATH.
|
||||
--gasprice WEI Minimum amount of Wei per GAS to be paid for a
|
||||
transaction to be accepted for mining. Overrides
|
||||
--basic-tx-usd.
|
||||
--etherbase ADDRESS Equivalent to --author ADDRESS.
|
||||
--extradata STRING Equivalent to --extra-data STRING.
|
||||
--cache MB Equivalent to --cache-size MB.
|
||||
--geth Run in Geth-compatibility mode. Sets the IPC path
|
||||
to be the same as Geth's. Overrides the --ipc-path
|
||||
and --ipcpath options. Alters RPCs to reflect Geth
|
||||
bugs. Includes the personal_ RPC by default.
|
||||
--testnet Testnet mode. Equivalent to --chain testnet.
|
||||
Overrides the --keys-path option.
|
||||
--import-geth-keys Attempt to import keys from Geth client.
|
||||
--datadir PATH Equivalent to --base-path PATH.
|
||||
--networkid INDEX Equivalent to --network-id INDEX.
|
||||
--peers NUM Equivalent to --min-peers NUM.
|
||||
--nodekey KEY Equivalent to --node-key KEY.
|
||||
--nodiscover Equivalent to --no-discovery.
|
||||
-j --jsonrpc Does nothing; JSON-RPC is on by default now.
|
||||
--jsonrpc-off Equivalent to --no-jsonrpc.
|
||||
-w --webapp Does nothing; dapps server is on by default now.
|
||||
--dapps-off Equivalent to --no-dapps.
|
||||
--dapps-user USERNAME Dapps server authentication has been removed. (default: {flag_dapps_user:?})
|
||||
--dapps-pass PASSWORD Dapps server authentication has been removed. (default: {flag_dapps_pass:?})
|
||||
--dapps-apis-all Dapps server is merged with RPC server. Use --jsonrpc-apis. (default: {flag_dapps_apis_all:?})
|
||||
--dapps-cors URL Dapps server is merged with RPC server. Use --jsonrpc-cors. (default: {flag_dapps_cors:?})
|
||||
--dapps-hosts HOSTS Dapps server is merged with RPC server. Use --jsonrpc-hosts. (default: {flag_dapps_hosts:?})
|
||||
--dapps-interface IP Dapps server is merged with RPC server. Use --jsonrpc-interface. (default: {flag_dapps_interface:?})
|
||||
--dapps-port PORT Dapps server is merged with RPC server. Use --jsonrpc-port. (default: {flag_dapps_port:?})
|
||||
--rpc Does nothing; JSON-RPC is on by default now.
|
||||
--warp Does nothing; Warp sync is on by default. (default: {flag_warp})
|
||||
--rpcaddr IP Equivalent to --jsonrpc-interface IP.
|
||||
--rpcport PORT Equivalent to --jsonrpc-port PORT.
|
||||
--rpcapi APIS Equivalent to --jsonrpc-apis APIS.
|
||||
--rpccorsdomain URL Equivalent to --jsonrpc-cors URL.
|
||||
--ipcdisable Equivalent to --no-ipc.
|
||||
--ipc-off Equivalent to --no-ipc.
|
||||
--ipcapi APIS Equivalent to --ipc-apis APIS.
|
||||
--ipcpath PATH Equivalent to --ipc-path PATH.
|
||||
--gasprice WEI Minimum amount of Wei per GAS to be paid for a
|
||||
transaction to be accepted for mining. Overrides
|
||||
--basic-tx-usd.
|
||||
--etherbase ADDRESS Equivalent to --author ADDRESS.
|
||||
--extradata STRING Equivalent to --extra-data STRING.
|
||||
--cache MB Equivalent to --cache-size MB.
|
||||
|
||||
Internal Options:
|
||||
--can-restart Executable will auto-restart if exiting with 69.
|
||||
--can-restart Executable will auto-restart if exiting with 69.
|
||||
|
||||
Miscellaneous Options:
|
||||
-c --config CONFIG Specify a filename containing a configuration file.
|
||||
(default: {flag_config})
|
||||
-l --logging LOGGING Specify the logging level. Must conform to the same
|
||||
format as RUST_LOG. (default: {flag_logging:?})
|
||||
--log-file FILENAME Specify a filename into which logging should be
|
||||
appended. (default: {flag_log_file:?})
|
||||
--no-config Don't load a configuration file.
|
||||
--no-color Don't use terminal color codes in output. (default: {flag_no_color})
|
||||
-v --version Show information about version.
|
||||
-h --help Show this screen.
|
||||
-c --config CONFIG Specify a filename containing a configuration file.
|
||||
(default: {flag_config})
|
||||
-l --logging LOGGING Specify the logging level. Must conform to the same
|
||||
format as RUST_LOG. (default: {flag_logging:?})
|
||||
--log-file FILENAME Specify a filename into which logging should be
|
||||
appended. (default: {flag_log_file:?})
|
||||
--no-config Don't load a configuration file.
|
||||
--no-color Don't use terminal color codes in output. (default: {flag_no_color})
|
||||
-v --version Show information about version.
|
||||
-h --help Show this screen.
|
||||
|
@ -18,18 +18,20 @@ use std::time::Duration;
|
||||
use std::io::{Read, Write, stderr};
|
||||
use std::net::SocketAddr;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::collections::BTreeMap;
|
||||
use std::cmp::max;
|
||||
use cli::{Args, ArgsError};
|
||||
use util::{Hashable, H256, U256, Uint, Bytes, version_data, Address};
|
||||
use util::journaldb::Algorithm;
|
||||
use util::Colour;
|
||||
use ethsync::{NetworkConfiguration, is_valid_node_url, AllowIP};
|
||||
use ethcore::ethstore::ethkey::Secret;
|
||||
use ethcore::ethstore::ethkey::{Secret, Public};
|
||||
use ethcore::client::{VMType};
|
||||
use ethcore::miner::{MinerOptions, Banning, StratumOptions};
|
||||
use ethcore::verification::queue::VerifierSettings;
|
||||
|
||||
use rpc::{IpcConfiguration, HttpConfiguration};
|
||||
use rpc_apis::ApiSet;
|
||||
use ethcore_rpc::NetworkSettings;
|
||||
use cache::CacheConfig;
|
||||
use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, replace_home_for_db,
|
||||
@ -135,7 +137,7 @@ impl Configuration {
|
||||
let mut dapps_conf = self.dapps_config();
|
||||
let ipfs_conf = self.ipfs_config();
|
||||
let signer_conf = self.signer_config();
|
||||
let secretstore_conf = self.secretstore_config();
|
||||
let secretstore_conf = self.secretstore_config()?;
|
||||
let format = self.format()?;
|
||||
|
||||
if self.args.flag_jsonrpc_threads.is_some() && dapps_conf.enabled {
|
||||
@ -570,13 +572,17 @@ impl Configuration {
|
||||
}
|
||||
}
|
||||
|
||||
fn secretstore_config(&self) -> SecretStoreConfiguration {
|
||||
SecretStoreConfiguration {
|
||||
fn secretstore_config(&self) -> Result<SecretStoreConfiguration, String> {
|
||||
Ok(SecretStoreConfiguration {
|
||||
enabled: self.secretstore_enabled(),
|
||||
self_secret: self.secretstore_self_secret()?,
|
||||
nodes: self.secretstore_nodes()?,
|
||||
interface: self.secretstore_interface(),
|
||||
port: self.args.flag_secretstore_port,
|
||||
http_interface: self.secretstore_http_interface(),
|
||||
http_port: self.args.flag_secretstore_http_port,
|
||||
data_path: self.directories().secretstore,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn ipfs_config(&self) -> IpfsConfiguration {
|
||||
@ -718,16 +724,7 @@ impl Configuration {
|
||||
.collect();
|
||||
|
||||
if self.args.flag_geth {
|
||||
apis.push("personal");
|
||||
}
|
||||
|
||||
if self.args.flag_public_node {
|
||||
apis.retain(|api| {
|
||||
match *api {
|
||||
"eth" | "net" | "parity" | "rpc" | "web3" => true,
|
||||
_ => false
|
||||
}
|
||||
});
|
||||
apis.insert(0, "personal");
|
||||
}
|
||||
|
||||
apis.join(",")
|
||||
@ -788,7 +785,10 @@ impl Configuration {
|
||||
enabled: self.rpc_enabled(),
|
||||
interface: self.rpc_interface(),
|
||||
port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port),
|
||||
apis: self.rpc_apis().parse()?,
|
||||
apis: match self.args.flag_public_node {
|
||||
false => self.rpc_apis().parse()?,
|
||||
true => self.rpc_apis().parse::<ApiSet>()?.retain(ApiSet::PublicContext),
|
||||
},
|
||||
hosts: self.rpc_hosts(),
|
||||
cors: self.rpc_cors(),
|
||||
threads: match self.args.flag_jsonrpc_threads {
|
||||
@ -918,10 +918,43 @@ impl Configuration {
|
||||
}
|
||||
|
||||
fn secretstore_interface(&self) -> String {
|
||||
match self.args.flag_secretstore_interface.as_str() {
|
||||
"local" => "127.0.0.1",
|
||||
x => x,
|
||||
}.into()
|
||||
Self::interface(&self.args.flag_secretstore_interface)
|
||||
}
|
||||
|
||||
fn secretstore_http_interface(&self) -> String {
|
||||
Self::interface(&self.args.flag_secretstore_http_interface)
|
||||
}
|
||||
|
||||
fn secretstore_self_secret(&self) -> Result<Option<Secret>, String> {
|
||||
match self.args.flag_secretstore_secret {
|
||||
Some(ref s) => Ok(Some(s.parse()
|
||||
.map_err(|e| format!("Invalid secret store secret: {}. Error: {:?}", s, e))?)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn secretstore_nodes(&self) -> Result<BTreeMap<Public, (String, u16)>, String> {
|
||||
let mut nodes = BTreeMap::new();
|
||||
for node in self.args.flag_secretstore_nodes.split(',').filter(|n| n != &"") {
|
||||
let public_and_addr: Vec<_> = node.split('@').collect();
|
||||
if public_and_addr.len() != 2 {
|
||||
return Err(format!("Invalid secret store node: {}", node));
|
||||
}
|
||||
|
||||
let ip_and_port: Vec<_> = public_and_addr[1].split(':').collect();
|
||||
if ip_and_port.len() != 2 {
|
||||
return Err(format!("Invalid secret store node: {}", node));
|
||||
}
|
||||
|
||||
let public = public_and_addr[0].parse()
|
||||
.map_err(|e| format!("Invalid public key in secret store node: {}. Error: {:?}", public_and_addr[0], e))?;
|
||||
let port = ip_and_port[1].parse()
|
||||
.map_err(|e| format!("Invalid port in secret store node: {}. Error: {:?}", ip_and_port[1], e))?;
|
||||
|
||||
nodes.insert(public, (ip_and_port[0].into(), port));
|
||||
}
|
||||
|
||||
Ok(nodes)
|
||||
}
|
||||
|
||||
fn stratum_interface(&self) -> String {
|
||||
|
@ -82,7 +82,7 @@ impl ContractClient for FullRegistrar {
|
||||
// TODO: light client implementation forwarding to OnDemand and waiting for future
|
||||
// to resolve.
|
||||
pub struct Dependencies {
|
||||
pub sync_status: Arc<::parity_dapps::SyncStatus>,
|
||||
pub sync_status: Arc<SyncStatus>,
|
||||
pub contract_client: Arc<ContractClient>,
|
||||
pub remote: parity_reactor::TokioRemote,
|
||||
pub fetch: FetchClient,
|
||||
@ -103,8 +103,7 @@ pub fn new(configuration: Configuration, deps: Dependencies)
|
||||
).map(Some)
|
||||
}
|
||||
|
||||
pub use self::server::Middleware;
|
||||
pub use self::server::dapps_middleware;
|
||||
pub use self::server::{SyncStatus, Middleware, dapps_middleware};
|
||||
|
||||
#[cfg(not(feature = "dapps"))]
|
||||
mod server {
|
||||
@ -112,11 +111,12 @@ mod server {
|
||||
use std::path::PathBuf;
|
||||
use ethcore_rpc::{hyper, RequestMiddleware, RequestMiddlewareAction};
|
||||
|
||||
pub struct Middleware;
|
||||
pub type SyncStatus = Fn() -> bool;
|
||||
|
||||
pub struct Middleware;
|
||||
impl RequestMiddleware for Middleware {
|
||||
fn on_request(
|
||||
&self, req: &hyper::server::Request<hyper::net::HttpStream>, control: &hyper::Control
|
||||
&self, _req: &hyper::server::Request<hyper::net::HttpStream>, _control: &hyper::Control
|
||||
) -> RequestMiddlewareAction {
|
||||
unreachable!()
|
||||
}
|
||||
@ -137,11 +137,11 @@ mod server {
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hash_fetch::fetch::Client as FetchClient;
|
||||
use parity_dapps;
|
||||
use parity_reactor;
|
||||
|
||||
pub type Middleware = parity_dapps::Middleware<FetchClient>;
|
||||
pub use parity_dapps::Middleware;
|
||||
pub use parity_dapps::SyncStatus;
|
||||
|
||||
pub fn dapps_middleware(
|
||||
deps: Dependencies,
|
||||
|
@ -85,9 +85,17 @@ impl FromStr for Api {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ApiSet {
|
||||
// Safe context (like token-protected WS interface)
|
||||
SafeContext,
|
||||
// Unsafe context (like jsonrpc over http)
|
||||
UnsafeContext,
|
||||
// Public context (like public jsonrpc over http)
|
||||
PublicContext,
|
||||
// All possible APIs
|
||||
All,
|
||||
// Local "unsafe" context and accounts access
|
||||
IpcContext,
|
||||
// Fixed list of APis
|
||||
List(HashSet<Api>),
|
||||
}
|
||||
|
||||
@ -107,10 +115,30 @@ impl FromStr for ApiSet {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
s.split(',')
|
||||
.map(Api::from_str)
|
||||
.collect::<Result<_, _>>()
|
||||
.map(ApiSet::List)
|
||||
let mut apis = HashSet::new();
|
||||
|
||||
for api in s.split(',') {
|
||||
match api {
|
||||
"all" => {
|
||||
apis.extend(ApiSet::All.list_apis());
|
||||
},
|
||||
"safe" => {
|
||||
// Safe APIs are those that are safe even in UnsafeContext.
|
||||
apis.extend(ApiSet::UnsafeContext.list_apis());
|
||||
},
|
||||
// Remove the API
|
||||
api if api.starts_with("-") => {
|
||||
let api = api[1..].parse()?;
|
||||
apis.remove(&api);
|
||||
},
|
||||
api => {
|
||||
let api = api.parse()?;
|
||||
apis.insert(api);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ApiSet::List(apis))
|
||||
}
|
||||
}
|
||||
|
||||
@ -402,21 +430,41 @@ impl Dependencies for LightDependencies {
|
||||
}
|
||||
|
||||
impl ApiSet {
|
||||
/// Retains only APIs in given set.
|
||||
pub fn retain(self, set: Self) -> Self {
|
||||
ApiSet::List(&self.list_apis() & &set.list_apis())
|
||||
}
|
||||
|
||||
pub fn list_apis(&self) -> HashSet<Api> {
|
||||
let mut safe_list = vec![Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc]
|
||||
.into_iter().collect();
|
||||
let mut public_list = vec![
|
||||
Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Rpc,
|
||||
].into_iter().collect();
|
||||
match *self {
|
||||
ApiSet::List(ref apis) => apis.clone(),
|
||||
ApiSet::UnsafeContext => safe_list,
|
||||
ApiSet::PublicContext => public_list,
|
||||
ApiSet::UnsafeContext => {
|
||||
public_list.insert(Api::Traces);
|
||||
public_list
|
||||
},
|
||||
ApiSet::IpcContext => {
|
||||
safe_list.insert(Api::ParityAccounts);
|
||||
safe_list
|
||||
public_list.insert(Api::Traces);
|
||||
public_list.insert(Api::ParityAccounts);
|
||||
public_list
|
||||
},
|
||||
ApiSet::SafeContext => {
|
||||
safe_list.insert(Api::ParityAccounts);
|
||||
safe_list.insert(Api::ParitySet);
|
||||
safe_list.insert(Api::Signer);
|
||||
safe_list
|
||||
public_list.insert(Api::Traces);
|
||||
public_list.insert(Api::ParityAccounts);
|
||||
public_list.insert(Api::ParitySet);
|
||||
public_list.insert(Api::Signer);
|
||||
public_list
|
||||
},
|
||||
ApiSet::All => {
|
||||
public_list.insert(Api::Traces);
|
||||
public_list.insert(Api::ParityAccounts);
|
||||
public_list.insert(Api::ParitySet);
|
||||
public_list.insert(Api::Signer);
|
||||
public_list.insert(Api::Personal);
|
||||
public_list
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -492,4 +540,30 @@ mod test {
|
||||
].into_iter().collect();
|
||||
assert_eq!(ApiSet::SafeContext.list_apis(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_apis() {
|
||||
assert_eq!("all".parse::<ApiSet>().unwrap(), ApiSet::List(vec![
|
||||
Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc,
|
||||
Api::ParityAccounts,
|
||||
Api::ParitySet, Api::Signer,
|
||||
Api::Personal
|
||||
].into_iter().collect()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_without_personal_apis() {
|
||||
assert_eq!("personal,all,-personal".parse::<ApiSet>().unwrap(), ApiSet::List(vec![
|
||||
Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc,
|
||||
Api::ParityAccounts,
|
||||
Api::ParitySet, Api::Signer,
|
||||
].into_iter().collect()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_safe_parsing() {
|
||||
assert_eq!("safe".parse::<ApiSet>().unwrap(), ApiSet::List(vec![
|
||||
Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc,
|
||||
].into_iter().collect()));
|
||||
}
|
||||
}
|
||||
|
@ -651,7 +651,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
|
||||
let secretstore_deps = secretstore::Dependencies {
|
||||
client: client.clone(),
|
||||
};
|
||||
let secretstore_key_server = secretstore::start(cmd.secretstore_conf.clone(), secretstore_deps);
|
||||
let secretstore_key_server = secretstore::start(cmd.secretstore_conf.clone(), secretstore_deps)?;
|
||||
|
||||
// the ipfs server
|
||||
let ipfs_server = ipfs::start_server(cmd.ipfs_conf.clone(), client.clone())?;
|
||||
|
@ -14,9 +14,11 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
use dir::default_data_path;
|
||||
use ethcore::client::Client;
|
||||
use ethkey::{Secret, Public};
|
||||
use helpers::replace_home;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
@ -24,10 +26,18 @@ use helpers::replace_home;
|
||||
pub struct Configuration {
|
||||
/// Is secret store functionality enabled?
|
||||
pub enabled: bool,
|
||||
/// This node secret.
|
||||
pub self_secret: Option<Secret>,
|
||||
/// Other nodes IDs + addresses.
|
||||
pub nodes: BTreeMap<Public, (String, u16)>,
|
||||
/// Interface to listen to
|
||||
pub interface: String,
|
||||
/// Port to listen to
|
||||
pub port: u16,
|
||||
/// Interface to listen to
|
||||
pub http_interface: String,
|
||||
/// Port to listen to
|
||||
pub http_port: u16,
|
||||
/// Data directory path for secret store
|
||||
pub data_path: String,
|
||||
}
|
||||
@ -55,8 +65,8 @@ mod server {
|
||||
|
||||
#[cfg(feature="secretstore")]
|
||||
mod server {
|
||||
use ethkey;
|
||||
use ethcore_secretstore;
|
||||
use ethkey::KeyPair;
|
||||
use super::{Configuration, Dependencies};
|
||||
|
||||
/// Key server
|
||||
@ -67,37 +77,35 @@ mod server {
|
||||
impl KeyServer {
|
||||
/// Create new key server
|
||||
pub fn new(conf: Configuration, deps: Dependencies) -> Result<Self, String> {
|
||||
let key_pairs = vec![
|
||||
ethkey::KeyPair::from_secret("6c26a76e9b31048d170873a791401c7e799a11f0cefc0171cc31a49800967509".parse().unwrap()).unwrap(),
|
||||
ethkey::KeyPair::from_secret("7e94018b3731afdb3b4e6f4c3e179475640166da12e1d1b0c7d80729b1a5b452".parse().unwrap()).unwrap(),
|
||||
ethkey::KeyPair::from_secret("5ab6ed2a52c33142380032c39a03a86b12eacb3fa4b53bc16d84f51318156f8c".parse().unwrap()).unwrap(),
|
||||
];
|
||||
let conf = ethcore_secretstore::ServiceConfiguration {
|
||||
let self_secret = conf.self_secret.ok_or("self secret is required when using secretstore")?;
|
||||
let mut conf = ethcore_secretstore::ServiceConfiguration {
|
||||
listener_address: ethcore_secretstore::NodeAddress {
|
||||
address: conf.interface.clone(),
|
||||
port: conf.port,
|
||||
address: conf.http_interface.clone(),
|
||||
port: conf.http_port,
|
||||
},
|
||||
data_path: conf.data_path.clone(),
|
||||
// TODO: this is test configuration. how it will be configured in production?
|
||||
cluster_config: ethcore_secretstore::ClusterConfiguration {
|
||||
threads: 4,
|
||||
self_private: (***key_pairs[(conf.port - 8082) as usize].secret()).into(),
|
||||
self_private: (**self_secret).into(),
|
||||
listener_address: ethcore_secretstore::NodeAddress {
|
||||
address: conf.interface.clone(),
|
||||
port: conf.port + 10,
|
||||
port: conf.port,
|
||||
},
|
||||
nodes: key_pairs.iter().enumerate().map(|(i, kp)| (kp.public().clone(),
|
||||
ethcore_secretstore::NodeAddress {
|
||||
address: conf.interface.clone(),
|
||||
port: 8082 + 10 + (i as u16),
|
||||
})).collect(),
|
||||
nodes: conf.nodes.into_iter().map(|(p, (ip, port))| (p, ethcore_secretstore::NodeAddress {
|
||||
address: ip,
|
||||
port: port,
|
||||
})).collect(),
|
||||
allow_connecting_to_higher_nodes: true,
|
||||
encryption_config: ethcore_secretstore::EncryptionConfiguration {
|
||||
key_check_timeout_ms: 1000,
|
||||
},
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
let self_key_pair = KeyPair::from_secret(self_secret.clone())
|
||||
.map_err(|e| format!("valid secret is required when using secretstore. Error: {}", e))?;
|
||||
conf.cluster_config.nodes.insert(self_key_pair.public().clone(), conf.cluster_config.listener_address.clone());
|
||||
|
||||
let key_server = ethcore_secretstore::start(deps.client, conf)
|
||||
.map_err(Into::<String>::into)?;
|
||||
|
||||
@ -115,8 +123,12 @@ impl Default for Configuration {
|
||||
let data_dir = default_data_path();
|
||||
Configuration {
|
||||
enabled: true,
|
||||
self_secret: None,
|
||||
nodes: BTreeMap::new(),
|
||||
interface: "127.0.0.1".to_owned(),
|
||||
port: 8082,
|
||||
port: 8083,
|
||||
http_interface: "127.0.0.1".to_owned(),
|
||||
http_port: 8082,
|
||||
data_path: replace_home(&data_dir, "$BASE/secretstore"),
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,9 @@ serde_derive = "0.9"
|
||||
serde_json = "0.9"
|
||||
time = "0.1"
|
||||
transient-hashmap = "0.4"
|
||||
cid = "0.2.1"
|
||||
multihash = "0.5"
|
||||
rust-crypto = "0.2.36"
|
||||
|
||||
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
|
||||
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
|
||||
|
@ -27,6 +27,9 @@ extern crate serde;
|
||||
extern crate serde_json;
|
||||
extern crate time;
|
||||
extern crate transient_hashmap;
|
||||
extern crate cid;
|
||||
extern crate multihash;
|
||||
extern crate crypto as rust_crypto;
|
||||
|
||||
extern crate jsonrpc_core;
|
||||
extern crate jsonrpc_http_server as http;
|
||||
|
@ -44,6 +44,7 @@ mod codes {
|
||||
pub const REQUEST_REJECTED_LIMIT: i64 = -32041;
|
||||
pub const REQUEST_NOT_FOUND: i64 = -32042;
|
||||
pub const ENCRYPTION_ERROR: i64 = -32055;
|
||||
pub const ENCODING_ERROR: i64 = -32058;
|
||||
pub const FETCH_ERROR: i64 = -32060;
|
||||
pub const NO_LIGHT_PEERS: i64 = -32065;
|
||||
pub const DEPRECATED: i64 = -32070;
|
||||
@ -224,6 +225,14 @@ pub fn encryption_error<T: fmt::Debug>(error: T) -> Error {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encoding_error<T: fmt::Debug>(error: T) -> Error {
|
||||
Error {
|
||||
code: ErrorCode::ServerError(codes::ENCODING_ERROR),
|
||||
message: "Encoding error.".into(),
|
||||
data: Some(Value::String(format!("{:?}", error))),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn database_error<T: fmt::Debug>(error: T) -> Error {
|
||||
Error {
|
||||
code: ErrorCode::ServerError(codes::DATABASE_ERROR),
|
||||
|
38
rpc/src/v1/helpers/ipfs.rs
Normal file
38
rpc/src/v1/helpers/ipfs.rs
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! IPFS utility functions
|
||||
|
||||
use multihash;
|
||||
use cid::{Cid, Codec, Version};
|
||||
use rust_crypto::sha2::Sha256;
|
||||
use rust_crypto::digest::Digest;
|
||||
use jsonrpc_core::Error;
|
||||
use v1::types::Bytes;
|
||||
use super::errors;
|
||||
|
||||
/// Compute CIDv0 from protobuf encoded bytes.
|
||||
pub fn cid(content: Bytes) -> Result<String, Error> {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.input(&content.0);
|
||||
let len = hasher.output_bytes();
|
||||
let mut buf = Vec::with_capacity(len);
|
||||
buf.resize(len, 0);
|
||||
hasher.result(&mut buf);
|
||||
let mh = multihash::encode(multihash::Hash::SHA2256, &buf).map_err(errors::encoding_error)?;
|
||||
let cid = Cid::new(Codec::DagProtobuf, Version::V0, &mh);
|
||||
Ok(cid.to_string().into())
|
||||
}
|
@ -24,6 +24,7 @@ pub mod fake_sign;
|
||||
pub mod light_fetch;
|
||||
pub mod informant;
|
||||
pub mod oneshot;
|
||||
pub mod ipfs;
|
||||
|
||||
mod network_settings;
|
||||
mod poll_manager;
|
||||
|
@ -30,7 +30,7 @@ use ethcore::account_provider::AccountProvider;
|
||||
|
||||
use jsonrpc_core::Error;
|
||||
use jsonrpc_macros::Trailing;
|
||||
use v1::helpers::{errors, SigningQueue, SignerService, NetworkSettings};
|
||||
use v1::helpers::{errors, ipfs, SigningQueue, SignerService, NetworkSettings};
|
||||
use v1::helpers::dispatch::{LightDispatcher, DEFAULT_MAC};
|
||||
use v1::helpers::light_fetch::LightFetch;
|
||||
use v1::metadata::Metadata;
|
||||
@ -387,4 +387,8 @@ impl Parity for ParityClient {
|
||||
|
||||
self.fetcher().header(number.0.into()).map(from_encoded).boxed()
|
||||
}
|
||||
|
||||
fn ipfs_cid(&self, content: Bytes) -> Result<String, Error> {
|
||||
ipfs::cid(content)
|
||||
}
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ use updater::{Service as UpdateService};
|
||||
|
||||
use jsonrpc_core::Error;
|
||||
use jsonrpc_macros::Trailing;
|
||||
use v1::helpers::{errors, SigningQueue, SignerService, NetworkSettings};
|
||||
use v1::helpers::{errors, ipfs, SigningQueue, SignerService, NetworkSettings};
|
||||
use v1::helpers::accounts::unwrap_provider;
|
||||
use v1::helpers::dispatch::DEFAULT_MAC;
|
||||
use v1::metadata::Metadata;
|
||||
@ -428,4 +428,8 @@ impl<C, M, S: ?Sized, U> Parity for ParityClient<C, M, S, U> where
|
||||
extra_info: client.block_extra_info(id).expect(EXTRA_INFO_PROOF),
|
||||
}).boxed()
|
||||
}
|
||||
|
||||
fn ipfs_cid(&self, content: Bytes) -> Result<String, Error> {
|
||||
ipfs::cid(content)
|
||||
}
|
||||
}
|
||||
|
@ -510,3 +510,14 @@ fn rpc_parity_node_kind() {
|
||||
|
||||
assert_eq!(io.handle_request_sync(request), Some(response.to_owned()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rpc_parity_cid() {
|
||||
let deps = Dependencies::new();
|
||||
let io = deps.default_client();
|
||||
|
||||
let request = r#"{"jsonrpc": "2.0", "method": "parity_cidV0", "params":["0x414243"], "id": 1}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","result":"QmSF59MAENc8ZhM4aM1thuAE8w5gDmyfzkAvNoyPea7aDz","id":1}"#;
|
||||
|
||||
assert_eq!(io.handle_request_sync(request), Some(response.to_owned()));
|
||||
}
|
||||
|
@ -203,5 +203,9 @@ build_rpc_trait! {
|
||||
/// Same as `eth_getBlockByNumber` but without uncles and transactions.
|
||||
#[rpc(async, name = "parity_getBlockHeaderByNumber")]
|
||||
fn block_header(&self, Trailing<BlockNumber>) -> BoxFuture<RichHeader, Error>;
|
||||
|
||||
/// Get IPFS CIDv0 given protobuf encoded bytes.
|
||||
#[rpc(name = "parity_cidV0")]
|
||||
fn ipfs_cid(&self, Bytes) -> Result<String, Error>;
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,8 @@ serde_derive = "0.9"
|
||||
futures = "0.1"
|
||||
futures-cpupool = "0.1"
|
||||
rustc-serialize = "0.3"
|
||||
tokio-core = "0.1"
|
||||
tokio-core = "0.1.6"
|
||||
tokio-io = "0.1.0"
|
||||
tokio-service = "0.1"
|
||||
tokio-proto = "0.1"
|
||||
url = "1.0"
|
||||
|
@ -21,11 +21,13 @@ use hyper::method::Method as HttpMethod;
|
||||
use hyper::status::StatusCode as HttpStatusCode;
|
||||
use hyper::server::{Server as HttpServer, Request as HttpRequest, Response as HttpResponse, Handler as HttpHandler,
|
||||
Listening as HttpListening};
|
||||
use serde_json;
|
||||
use url::percent_encoding::percent_decode;
|
||||
|
||||
use util::ToPretty;
|
||||
use traits::KeyServer;
|
||||
use types::all::{Error, ServiceConfiguration, RequestSignature, DocumentAddress, DocumentEncryptedKey};
|
||||
use serialization::SerializableDocumentEncryptedKeyShadow;
|
||||
use types::all::{Error, ServiceConfiguration, RequestSignature, DocumentAddress, DocumentEncryptedKey, DocumentEncryptedKeyShadow};
|
||||
|
||||
/// Key server http-requests listener
|
||||
pub struct KeyServerHttpListener<T: KeyServer + 'static> {
|
||||
@ -42,6 +44,8 @@ enum Request {
|
||||
GenerateDocumentKey(DocumentAddress, RequestSignature, usize),
|
||||
/// Request encryption key of given document for given requestor.
|
||||
GetDocumentKey(DocumentAddress, RequestSignature),
|
||||
/// Request shadow of encryption key of given document for given requestor.
|
||||
GetDocumentKeyShadow(DocumentAddress, RequestSignature),
|
||||
}
|
||||
|
||||
/// Cloneable http handler
|
||||
@ -83,6 +87,10 @@ impl<T> KeyServer for KeyServerHttpListener<T> where T: KeyServer + 'static {
|
||||
fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKey, Error> {
|
||||
self.handler.key_server.document_key(signature, document)
|
||||
}
|
||||
|
||||
fn document_key_shadow(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKeyShadow, Error> {
|
||||
self.handler.key_server.document_key_shadow(signature, document)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> HttpHandler for KeyServerHttpHandler<T> where T: KeyServer + 'static {
|
||||
@ -111,6 +119,34 @@ impl<T> HttpHandler for KeyServerHttpHandler<T> where T: KeyServer + 'static {
|
||||
err
|
||||
}));
|
||||
},
|
||||
Request::GetDocumentKeyShadow(document, signature) => {
|
||||
match self.handler.key_server.document_key_shadow(&signature, &document)
|
||||
.map_err(|err| {
|
||||
warn!(target: "secretstore", "GetDocumentKeyShadow request {} has failed with: {}", req_uri, err);
|
||||
err
|
||||
}) {
|
||||
Ok(document_key_shadow) => {
|
||||
let document_key_shadow = SerializableDocumentEncryptedKeyShadow {
|
||||
decrypted_secret: document_key_shadow.decrypted_secret.into(),
|
||||
common_point: document_key_shadow.common_point.expect("always filled when requesting document_key_shadow; qed").into(),
|
||||
decrypt_shadows: document_key_shadow.decrypt_shadows.expect("always filled when requesting document_key_shadow; qed").into_iter().map(Into::into).collect(),
|
||||
};
|
||||
match serde_json::to_vec(&document_key_shadow) {
|
||||
Ok(document_key) => {
|
||||
res.headers_mut().set(header::ContentType::json());
|
||||
if let Err(err) = res.send(&document_key) {
|
||||
// nothing to do, but to log an error
|
||||
warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err);
|
||||
}
|
||||
},
|
||||
Err(err) => {
|
||||
warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err);
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(err) => return_error(res, err),
|
||||
}
|
||||
},
|
||||
Request::Invalid => {
|
||||
warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri);
|
||||
*res.status_mut() = HttpStatusCode::BadRequest;
|
||||
@ -134,11 +170,17 @@ fn return_document_key(req: HttpRequest, mut res: HttpResponse, document_key: Re
|
||||
warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err);
|
||||
}
|
||||
},
|
||||
Err(Error::BadSignature) => *res.status_mut() = HttpStatusCode::BadRequest,
|
||||
Err(Error::AccessDenied) => *res.status_mut() = HttpStatusCode::Forbidden,
|
||||
Err(Error::DocumentNotFound) => *res.status_mut() = HttpStatusCode::NotFound,
|
||||
Err(Error::Database(_)) => *res.status_mut() = HttpStatusCode::InternalServerError,
|
||||
Err(Error::Internal(_)) => *res.status_mut() = HttpStatusCode::InternalServerError,
|
||||
Err(err) => return_error(res, err),
|
||||
}
|
||||
}
|
||||
|
||||
fn return_error(mut res: HttpResponse, err: Error) {
|
||||
match err {
|
||||
Error::BadSignature => *res.status_mut() = HttpStatusCode::BadRequest,
|
||||
Error::AccessDenied => *res.status_mut() = HttpStatusCode::Forbidden,
|
||||
Error::DocumentNotFound => *res.status_mut() = HttpStatusCode::NotFound,
|
||||
Error::Database(_) => *res.status_mut() = HttpStatusCode::InternalServerError,
|
||||
Error::Internal(_) => *res.status_mut() = HttpStatusCode::InternalServerError,
|
||||
}
|
||||
}
|
||||
|
||||
@ -149,17 +191,27 @@ fn parse_request(method: &HttpMethod, uri_path: &str) -> Request {
|
||||
};
|
||||
|
||||
let path: Vec<String> = uri_path.trim_left_matches('/').split('/').map(Into::into).collect();
|
||||
if path.len() < 2 || path[0].is_empty() || path[1].is_empty() {
|
||||
if path.len() == 0 {
|
||||
return Request::Invalid;
|
||||
}
|
||||
let (args_prefix, args_offset) = if &path[0] == "shadow" {
|
||||
("shadow", 1)
|
||||
} else {
|
||||
("", 0)
|
||||
};
|
||||
|
||||
if path.len() < 2 + args_offset || path[args_offset].is_empty() || path[args_offset + 1].is_empty() {
|
||||
return Request::Invalid;
|
||||
}
|
||||
|
||||
let args_len = path.len();
|
||||
let document = path[0].parse();
|
||||
let signature = path[1].parse();
|
||||
let threshold = (if args_len > 2 { &path[2] } else { "" }).parse();
|
||||
match (args_len, method, document, signature, threshold) {
|
||||
(3, &HttpMethod::Post, Ok(document), Ok(signature), Ok(threshold)) => Request::GenerateDocumentKey(document, signature, threshold),
|
||||
(2, &HttpMethod::Get, Ok(document), Ok(signature), _) => Request::GetDocumentKey(document, signature),
|
||||
let document = path[args_offset].parse();
|
||||
let signature = path[args_offset + 1].parse();
|
||||
let threshold = (if args_len > args_offset + 2 { &path[args_offset + 2] } else { "" }).parse();
|
||||
match (args_prefix, args_len, method, document, signature, threshold) {
|
||||
("", 3, &HttpMethod::Post, Ok(document), Ok(signature), Ok(threshold)) => Request::GenerateDocumentKey(document, signature, threshold),
|
||||
("", 2, &HttpMethod::Get, Ok(document), Ok(signature), _) => Request::GetDocumentKey(document, signature),
|
||||
("shadow", 3, &HttpMethod::Get, Ok(document), Ok(signature), _) => Request::GetDocumentKeyShadow(document, signature),
|
||||
_ => Request::Invalid,
|
||||
}
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ use super::acl_storage::AclStorage;
|
||||
use super::key_storage::KeyStorage;
|
||||
use key_server_cluster::ClusterCore;
|
||||
use traits::KeyServer;
|
||||
use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey, ClusterConfiguration};
|
||||
use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey, DocumentEncryptedKeyShadow, ClusterConfiguration};
|
||||
use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration};
|
||||
|
||||
/// Secret store key server implementation
|
||||
@ -38,7 +38,7 @@ pub struct KeyServerImpl {
|
||||
pub struct KeyServerCore {
|
||||
close: Option<futures::Complete<()>>,
|
||||
handle: Option<thread::JoinHandle<()>>,
|
||||
cluster: Option<Arc<ClusterClient>>,
|
||||
cluster: Arc<ClusterClient>,
|
||||
}
|
||||
|
||||
impl KeyServerImpl {
|
||||
@ -53,7 +53,6 @@ impl KeyServerImpl {
|
||||
/// Get cluster client reference.
|
||||
pub fn cluster(&self) -> Arc<ClusterClient> {
|
||||
self.data.lock().cluster.clone()
|
||||
.expect("cluster can be None in test cfg only; test cfg is for correct tests; qed")
|
||||
}
|
||||
}
|
||||
|
||||
@ -64,9 +63,7 @@ impl KeyServer for KeyServerImpl {
|
||||
.map_err(|_| Error::BadSignature)?;
|
||||
|
||||
// generate document key
|
||||
let data = self.data.lock();
|
||||
let encryption_session = data.cluster.as_ref().expect("cluster can be None in test cfg only; test cfg is for correct tests; qed")
|
||||
.new_encryption_session(document.clone(), threshold)?;
|
||||
let encryption_session = self.data.lock().cluster.new_encryption_session(document.clone(), threshold)?;
|
||||
let document_key = encryption_session.wait()?;
|
||||
|
||||
// encrypt document key with requestor public key
|
||||
@ -80,17 +77,21 @@ impl KeyServer for KeyServerImpl {
|
||||
let public = ethkey::recover(signature, document)
|
||||
.map_err(|_| Error::BadSignature)?;
|
||||
|
||||
|
||||
// decrypt document key
|
||||
let data = self.data.lock();
|
||||
let decryption_session = data.cluster.as_ref().expect("cluster can be None in test cfg only; test cfg is for correct tests; qed")
|
||||
.new_decryption_session(document.clone(), signature.clone())?;
|
||||
let document_key = decryption_session.wait()?;
|
||||
let decryption_session = self.data.lock().cluster.new_decryption_session(document.clone(), signature.clone(), false)?;
|
||||
let document_key = decryption_session.wait()?.decrypted_secret;
|
||||
|
||||
// encrypt document key with requestor public key
|
||||
let document_key = ethcrypto::ecies::encrypt_single_message(&public, &document_key)
|
||||
.map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?;
|
||||
Ok(document_key)
|
||||
}
|
||||
|
||||
fn document_key_shadow(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKeyShadow, Error> {
|
||||
let decryption_session = self.data.lock().cluster.new_decryption_session(document.clone(), signature.clone(), false)?;
|
||||
decryption_session.wait().map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyServerCore {
|
||||
@ -129,7 +130,7 @@ impl KeyServerCore {
|
||||
Ok(KeyServerCore {
|
||||
close: Some(stop),
|
||||
handle: Some(handle),
|
||||
cluster: Some(cluster),
|
||||
cluster: cluster,
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -149,24 +150,9 @@ mod tests {
|
||||
use ethkey::{self, Random, Generator};
|
||||
use acl_storage::tests::DummyAclStorage;
|
||||
use key_storage::tests::DummyKeyStorage;
|
||||
use types::all::{ClusterConfiguration, NodeAddress, EncryptionConfiguration, DocumentEncryptedKey, DocumentKey};
|
||||
use super::super::{RequestSignature, DocumentAddress};
|
||||
use types::all::{ClusterConfiguration, NodeAddress, EncryptionConfiguration};
|
||||
use super::{KeyServer, KeyServerImpl};
|
||||
|
||||
const DOCUMENT1: &'static str = "0000000000000000000000000000000000000000000000000000000000000001";
|
||||
const PRIVATE1: &'static str = "03055e18a8434dcc9061cc1b81c4ef84dc7cf4574d755e52cdcf0c8898b25b11";
|
||||
|
||||
fn make_signature(secret: &str, document: &'static str) -> RequestSignature {
|
||||
let secret = secret.parse().unwrap();
|
||||
let document: DocumentAddress = document.into();
|
||||
ethkey::sign(&secret, &document).unwrap()
|
||||
}
|
||||
|
||||
fn decrypt_document_key(secret: &str, document_key: DocumentEncryptedKey) -> DocumentKey {
|
||||
let secret = secret.parse().unwrap();
|
||||
ethcrypto::ecies::decrypt_single_message(&secret, &document_key).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn document_key_generation_and_retrievement_works_over_network() {
|
||||
//::util::log::init_log();
|
||||
@ -208,15 +194,16 @@ mod tests {
|
||||
let test_cases = [0, 1, 2];
|
||||
for threshold in &test_cases {
|
||||
// generate document key
|
||||
// TODO: it is an error that we can regenerate key for the same DOCUMENT
|
||||
let signature = make_signature(PRIVATE1, DOCUMENT1);
|
||||
let generated_key = key_servers[0].generate_document_key(&signature, &DOCUMENT1.into(), *threshold).unwrap();
|
||||
let generated_key = decrypt_document_key(PRIVATE1, generated_key);
|
||||
let document = Random.generate().unwrap().secret().clone();
|
||||
let secret = Random.generate().unwrap().secret().clone();
|
||||
let signature = ethkey::sign(&secret, &document).unwrap();
|
||||
let generated_key = key_servers[0].generate_document_key(&signature, &document, *threshold).unwrap();
|
||||
let generated_key = ethcrypto::ecies::decrypt_single_message(&secret, &generated_key).unwrap();
|
||||
|
||||
// now let's try to retrieve key back
|
||||
for key_server in key_servers.iter() {
|
||||
let retrieved_key = key_server.document_key(&signature, &DOCUMENT1.into()).unwrap();
|
||||
let retrieved_key = decrypt_document_key(PRIVATE1, retrieved_key);
|
||||
let retrieved_key = key_server.document_key(&signature, &document).unwrap();
|
||||
let retrieved_key = ethcrypto::ecies::decrypt_single_message(&secret, &retrieved_key).unwrap();
|
||||
assert_eq!(retrieved_key, generated_key);
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ use std::net::{SocketAddr, IpAddr};
|
||||
use futures::{finished, failed, Future, Stream, BoxFuture};
|
||||
use futures_cpupool::CpuPool;
|
||||
use parking_lot::{RwLock, Mutex};
|
||||
use tokio_core::io::IoFuture;
|
||||
use tokio_io::IoFuture;
|
||||
use tokio_core::reactor::{Handle, Remote, Timeout, Interval};
|
||||
use tokio_core::net::{TcpListener, TcpStream};
|
||||
use ethkey::{Secret, KeyPair, Signature, Random, Generator};
|
||||
@ -45,7 +45,7 @@ pub trait ClusterClient: Send + Sync {
|
||||
/// Start new encryption session.
|
||||
fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result<Arc<EncryptionSession>, Error>;
|
||||
/// Start new decryption session.
|
||||
fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature) -> Result<Arc<DecryptionSession>, Error>;
|
||||
fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature, is_shadow_decryption: bool) -> Result<Arc<DecryptionSession>, Error>;
|
||||
}
|
||||
|
||||
/// Cluster access for single encryption/decryption participant.
|
||||
@ -181,7 +181,7 @@ pub struct Connection {
|
||||
/// Tcp stream.
|
||||
stream: SharedTcpStream,
|
||||
/// Connection key.
|
||||
key: Secret,
|
||||
key: KeyPair,
|
||||
/// Last message time.
|
||||
last_message_time: Mutex<time::Instant>,
|
||||
}
|
||||
@ -649,9 +649,14 @@ impl ClusterSessions {
|
||||
|
||||
pub fn new_encryption_session(&self, _master: NodeId, session_id: SessionId, cluster: Arc<Cluster>) -> Result<Arc<EncryptionSessionImpl>, Error> {
|
||||
let mut encryption_sessions = self.encryption_sessions.write();
|
||||
// check that there's no active encryption session with the same id
|
||||
if encryption_sessions.contains_key(&session_id) {
|
||||
return Err(Error::DuplicateSessionId);
|
||||
}
|
||||
// check that there's no finished encryption session with the same id
|
||||
if self.key_storage.contains(&session_id) {
|
||||
return Err(Error::DuplicateSessionId);
|
||||
}
|
||||
|
||||
let session = Arc::new(EncryptionSessionImpl::new(EncryptionSessionParams {
|
||||
id: session_id.clone(),
|
||||
@ -865,14 +870,14 @@ impl ClusterClient for ClusterClientImpl {
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature) -> Result<Arc<DecryptionSession>, Error> {
|
||||
fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature, is_shadow_decryption: bool) -> Result<Arc<DecryptionSession>, Error> {
|
||||
let mut connected_nodes = self.data.connections.connected_nodes();
|
||||
connected_nodes.insert(self.data.self_key_pair.public().clone());
|
||||
|
||||
let access_key = Random.generate()?.secret().clone();
|
||||
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone()));
|
||||
let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key, cluster)?;
|
||||
session.initialize(requestor_signature)?;
|
||||
session.initialize(requestor_signature, is_shadow_decryption)?;
|
||||
Ok(session)
|
||||
}
|
||||
}
|
||||
|
@ -18,8 +18,9 @@ use std::cmp::{Ord, PartialOrd, Ordering};
|
||||
use std::collections::{BTreeSet, BTreeMap};
|
||||
use std::sync::Arc;
|
||||
use parking_lot::{Mutex, Condvar};
|
||||
use ethcrypto::ecies::encrypt_single_message;
|
||||
use ethkey::{self, Secret, Public, Signature};
|
||||
use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId};
|
||||
use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId, DocumentEncryptedKeyShadow};
|
||||
use key_server_cluster::cluster::Cluster;
|
||||
use key_server_cluster::math;
|
||||
use key_server_cluster::message::{Message, DecryptionMessage, InitializeDecryptionSession, ConfirmDecryptionInitialization,
|
||||
@ -28,7 +29,7 @@ use key_server_cluster::message::{Message, DecryptionMessage, InitializeDecrypti
|
||||
/// Decryption session API.
|
||||
pub trait Session: Send + Sync + 'static {
|
||||
/// Wait until session is completed. Returns distributely restored secret key.
|
||||
fn wait(&self) -> Result<Public, Error>;
|
||||
fn wait(&self) -> Result<DocumentEncryptedKeyShadow, Error>;
|
||||
}
|
||||
|
||||
/// Distributed decryption session.
|
||||
@ -83,6 +84,15 @@ pub struct SessionParams {
|
||||
pub cluster: Arc<Cluster>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Partial decryption result.
|
||||
struct PartialDecryptionResult {
|
||||
/// Shadow point.
|
||||
pub shadow_point: Public,
|
||||
/// Decryption shadow coefficient, if requested.
|
||||
pub decrypt_shadow: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Mutable data of encryption (distributed key generation) session.
|
||||
struct SessionData {
|
||||
@ -94,6 +104,8 @@ struct SessionData {
|
||||
master: Option<NodeId>,
|
||||
/// Public key of requestor.
|
||||
requestor: Option<Public>,
|
||||
/// Is shadow decryption requested?
|
||||
is_shadow_decryption: Option<bool>,
|
||||
|
||||
// === Values, filled during session initialization ===
|
||||
/// Nodes, which have been requested for decryption initialization.
|
||||
@ -105,11 +117,11 @@ struct SessionData {
|
||||
|
||||
// === Values, filled during partial decryption ===
|
||||
/// Shadow points, received from nodes as a response to partial decryption request.
|
||||
shadow_points: BTreeMap<NodeId, Public>,
|
||||
shadow_points: BTreeMap<NodeId, PartialDecryptionResult>,
|
||||
|
||||
/// === Values, filled during final decryption ===
|
||||
/// Decrypted secret
|
||||
decrypted_secret: Option<Result<Public, Error>>,
|
||||
decrypted_secret: Option<Result<DocumentEncryptedKeyShadow, Error>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
@ -146,6 +158,7 @@ impl SessionImpl {
|
||||
state: SessionState::WaitingForInitialization,
|
||||
master: None,
|
||||
requestor: None,
|
||||
is_shadow_decryption: None,
|
||||
requested_nodes: BTreeSet::new(),
|
||||
rejected_nodes: BTreeSet::new(),
|
||||
confirmed_nodes: BTreeSet::new(),
|
||||
@ -174,12 +187,12 @@ impl SessionImpl {
|
||||
|
||||
#[cfg(test)]
|
||||
/// Get decrypted secret
|
||||
pub fn decrypted_secret(&self) -> Option<Public> {
|
||||
pub fn decrypted_secret(&self) -> Option<DocumentEncryptedKeyShadow> {
|
||||
self.data.lock().decrypted_secret.clone().and_then(|r| r.ok())
|
||||
}
|
||||
|
||||
/// Initialize decryption session.
|
||||
pub fn initialize(&self, requestor_signature: Signature) -> Result<(), Error> {
|
||||
pub fn initialize(&self, requestor_signature: Signature, is_shadow_decryption: bool) -> Result<(), Error> {
|
||||
let mut data = self.data.lock();
|
||||
|
||||
// check state
|
||||
@ -194,6 +207,7 @@ impl SessionImpl {
|
||||
data.master = Some(self.node().clone());
|
||||
data.state = SessionState::WaitingForInitializationConfirm;
|
||||
data.requestor = Some(requestor_public.clone());
|
||||
data.is_shadow_decryption = Some(is_shadow_decryption);
|
||||
data.requested_nodes.extend(self.encrypted_data.id_numbers.keys().cloned());
|
||||
|
||||
// ..and finally check access on our's own
|
||||
@ -209,6 +223,7 @@ impl SessionImpl {
|
||||
session: self.id.clone().into(),
|
||||
sub_session: self.access_key.clone().into(),
|
||||
requestor_signature: requestor_signature.clone().into(),
|
||||
is_shadow_decryption: is_shadow_decryption,
|
||||
})))?;
|
||||
}
|
||||
},
|
||||
@ -249,6 +264,7 @@ impl SessionImpl {
|
||||
data.state = if is_requestor_allowed_to_read { SessionState::WaitingForPartialDecryptionRequest }
|
||||
else { SessionState::Failed };
|
||||
data.requestor = Some(requestor_public);
|
||||
data.is_shadow_decryption = Some(message.is_shadow_decryption);
|
||||
|
||||
// respond to master node
|
||||
data.master = Some(sender.clone());
|
||||
@ -316,14 +332,17 @@ impl SessionImpl {
|
||||
}
|
||||
|
||||
// calculate shadow point
|
||||
let shadow_point = {
|
||||
let decryption_result = {
|
||||
let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryptionRequest follows initialization; qed");
|
||||
do_partial_decryption(self.node(), &requestor, &message.nodes.iter().cloned().map(Into::into).collect(), &self.access_key, &self.encrypted_data)?
|
||||
let is_shadow_decryption = data.is_shadow_decryption.expect("is_shadow_decryption is filled during initialization; WaitingForPartialDecryptionRequest follows initialization; qed");
|
||||
let nodes = message.nodes.iter().cloned().map(Into::into).collect();
|
||||
do_partial_decryption(self.node(), &requestor, is_shadow_decryption, &nodes, &self.access_key, &self.encrypted_data)?
|
||||
};
|
||||
self.cluster.send(&sender, Message::Decryption(DecryptionMessage::PartialDecryption(PartialDecryption {
|
||||
session: self.id.clone().into(),
|
||||
sub_session: self.access_key.clone().into(),
|
||||
shadow_point: shadow_point.into(),
|
||||
shadow_point: decryption_result.shadow_point.into(),
|
||||
decrypt_shadow: decryption_result.decrypt_shadow,
|
||||
})))?;
|
||||
|
||||
// update sate
|
||||
@ -348,7 +367,10 @@ impl SessionImpl {
|
||||
if !data.confirmed_nodes.remove(&sender) {
|
||||
return Err(Error::InvalidStateForRequest);
|
||||
}
|
||||
data.shadow_points.insert(sender, message.shadow_point.clone().into());
|
||||
data.shadow_points.insert(sender, PartialDecryptionResult {
|
||||
shadow_point: message.shadow_point.clone().into(),
|
||||
decrypt_shadow: message.decrypt_shadow.clone(),
|
||||
});
|
||||
|
||||
// check if we have enough shadow points to decrypt the secret
|
||||
if data.shadow_points.len() != self.encrypted_data.threshold + 1 {
|
||||
@ -390,22 +412,38 @@ impl SessionImpl {
|
||||
})))?;
|
||||
}
|
||||
|
||||
assert!(data.confirmed_nodes.remove(&self_node_id));
|
||||
|
||||
let shadow_point = {
|
||||
let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryption follows initialization; qed");
|
||||
do_partial_decryption(&self_node_id, &requestor, &data.confirmed_nodes, &access_key, &encrypted_data)?
|
||||
};
|
||||
data.shadow_points.insert(self_node_id.clone(), shadow_point);
|
||||
if data.confirmed_nodes.remove(&self_node_id) {
|
||||
let decryption_result = {
|
||||
let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryption follows initialization; qed");
|
||||
let is_shadow_decryption = data.is_shadow_decryption.expect("is_shadow_decryption is filled during initialization; WaitingForPartialDecryption follows initialization; qed");
|
||||
do_partial_decryption(&self_node_id, &requestor, is_shadow_decryption, &data.confirmed_nodes, &access_key, &encrypted_data)?
|
||||
};
|
||||
data.shadow_points.insert(self_node_id.clone(), decryption_result);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn do_decryption(access_key: Secret, encrypted_data: &DocumentKeyShare, data: &mut SessionData) -> Result<(), Error> {
|
||||
// decrypt the secret using shadow points
|
||||
let joint_shadow_point = math::compute_joint_shadow_point(data.shadow_points.values())?;
|
||||
let joint_shadow_point = math::compute_joint_shadow_point(data.shadow_points.values().map(|s| &s.shadow_point))?;
|
||||
let decrypted_secret = math::decrypt_with_joint_shadow(encrypted_data.threshold, &access_key, &encrypted_data.encrypted_point, &joint_shadow_point)?;
|
||||
data.decrypted_secret = Some(Ok(decrypted_secret));
|
||||
let is_shadow_decryption = data.is_shadow_decryption.expect("is_shadow_decryption is filled during initialization; decryption follows initialization; qed");
|
||||
let (common_point, decrypt_shadows) = if is_shadow_decryption {
|
||||
(
|
||||
Some(math::make_common_shadow_point(encrypted_data.threshold, encrypted_data.common_point.clone())?),
|
||||
Some(data.shadow_points.values()
|
||||
.map(|s| s.decrypt_shadow.as_ref().expect("decrypt_shadow is filled during partial decryption; decryption follows partial decryption; qed").clone())
|
||||
.collect())
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
data.decrypted_secret = Some(Ok(DocumentEncryptedKeyShadow {
|
||||
decrypted_secret: decrypted_secret,
|
||||
common_point: common_point,
|
||||
decrypt_shadows: decrypt_shadows,
|
||||
}));
|
||||
|
||||
// switch to completed state
|
||||
data.state = SessionState::Finished;
|
||||
@ -415,7 +453,7 @@ impl SessionImpl {
|
||||
}
|
||||
|
||||
impl Session for SessionImpl {
|
||||
fn wait(&self) -> Result<Public, Error> {
|
||||
fn wait(&self) -> Result<DocumentEncryptedKeyShadow, Error> {
|
||||
let mut data = self.data.lock();
|
||||
if !data.decrypted_secret.is_some() {
|
||||
self.completed.wait(&mut data);
|
||||
@ -492,15 +530,22 @@ fn process_initialization_response(encrypted_data: &DocumentKeyShare, data: &mut
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants: &BTreeSet<NodeId>, access_key: &Secret, encrypted_data: &DocumentKeyShare) -> Result<Public, Error> {
|
||||
fn do_partial_decryption(node: &NodeId, requestor_public: &Public, is_shadow_decryption: bool, participants: &BTreeSet<NodeId>, access_key: &Secret, encrypted_data: &DocumentKeyShare) -> Result<PartialDecryptionResult, Error> {
|
||||
let node_id_number = &encrypted_data.id_numbers[node];
|
||||
let node_secret_share = &encrypted_data.secret_share;
|
||||
let other_id_numbers = participants.iter()
|
||||
.filter(|id| *id != node)
|
||||
.map(|id| &encrypted_data.id_numbers[id]);
|
||||
// TODO: commutative encryption using _requestor_public
|
||||
let node_shadow = math::compute_node_shadow(node_id_number, node_secret_share, other_id_numbers)?;
|
||||
math::compute_node_shadow_point(access_key, &encrypted_data.common_point, &node_shadow)
|
||||
let decrypt_shadow = if is_shadow_decryption { Some(math::generate_random_scalar()?) } else { None };
|
||||
let (shadow_point, decrypt_shadow) = math::compute_node_shadow_point(access_key, &encrypted_data.common_point, &node_shadow, decrypt_shadow)?;
|
||||
Ok(PartialDecryptionResult {
|
||||
shadow_point: shadow_point,
|
||||
decrypt_shadow: match decrypt_shadow {
|
||||
None => None,
|
||||
Some(decrypt_shadow) => Some(encrypt_single_message(requestor_public, &**decrypt_shadow)?),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -509,10 +554,11 @@ mod tests {
|
||||
use std::collections::BTreeMap;
|
||||
use super::super::super::acl_storage::tests::DummyAclStorage;
|
||||
use ethkey::{self, Random, Generator, Public, Secret};
|
||||
use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error};
|
||||
use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error, DocumentEncryptedKeyShadow};
|
||||
use key_server_cluster::cluster::tests::DummyCluster;
|
||||
use key_server_cluster::decryption_session::{SessionImpl, SessionParams, SessionState};
|
||||
use key_server_cluster::message::{self, Message, DecryptionMessage};
|
||||
use key_server_cluster::math;
|
||||
|
||||
const SECRET_PLAIN: &'static str = "d2b57ae7619e070af0af6bc8c703c0cd27814c54d5d6a999cacac0da34ede279ca0d9216e85991029e54e2f0c92ee0bd30237725fa765cbdbfc4529489864c5f";
|
||||
|
||||
@ -660,18 +706,19 @@ mod tests {
|
||||
#[test]
|
||||
fn fails_to_initialize_when_already_initialized() {
|
||||
let (_, _, sessions) = prepare_decryption_sessions();
|
||||
assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).unwrap(), ());
|
||||
assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).unwrap_err(), Error::InvalidStateForRequest);
|
||||
assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap(), ());
|
||||
assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap_err(), Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fails_to_accept_initialization_when_already_initialized() {
|
||||
let (_, _, sessions) = prepare_decryption_sessions();
|
||||
assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).unwrap(), ());
|
||||
assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap(), ());
|
||||
assert_eq!(sessions[0].on_initialize_session(sessions[1].node().clone(), &message::InitializeDecryptionSession {
|
||||
session: SessionId::default().into(),
|
||||
sub_session: sessions[0].access_key().clone().into(),
|
||||
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
is_shadow_decryption: false,
|
||||
}).unwrap_err(), Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
@ -682,6 +729,7 @@ mod tests {
|
||||
session: SessionId::default().into(),
|
||||
sub_session: sessions[0].access_key().clone().into(),
|
||||
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
is_shadow_decryption: false,
|
||||
}).unwrap(), ());
|
||||
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption {
|
||||
session: SessionId::default().into(),
|
||||
@ -702,6 +750,7 @@ mod tests {
|
||||
session: SessionId::default().into(),
|
||||
sub_session: sessions[0].access_key().clone().into(),
|
||||
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
is_shadow_decryption: false,
|
||||
}).unwrap(), ());
|
||||
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node().clone(), &message::RequestPartialDecryption {
|
||||
session: SessionId::default().into(),
|
||||
@ -717,6 +766,7 @@ mod tests {
|
||||
session: SessionId::default().into(),
|
||||
sub_session: sessions[0].access_key().clone().into(),
|
||||
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
|
||||
is_shadow_decryption: false,
|
||||
}).unwrap(), ());
|
||||
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption {
|
||||
session: SessionId::default().into(),
|
||||
@ -732,13 +782,14 @@ mod tests {
|
||||
session: SessionId::default().into(),
|
||||
sub_session: sessions[0].access_key().clone().into(),
|
||||
shadow_point: Random.generate().unwrap().public().clone().into(),
|
||||
decrypt_shadow: None,
|
||||
}).unwrap_err(), Error::InvalidStateForRequest);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fails_to_accept_partial_decrypt_twice() {
|
||||
let (clusters, _, sessions) = prepare_decryption_sessions();
|
||||
sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).unwrap();
|
||||
sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), false).unwrap();
|
||||
|
||||
let mut pd_from = None;
|
||||
let mut pd_msg = None;
|
||||
@ -762,7 +813,7 @@ mod tests {
|
||||
// now let's try to do a decryption
|
||||
let key_pair = Random.generate().unwrap();
|
||||
let signature = ethkey::sign(key_pair.secret(), &SessionId::default()).unwrap();
|
||||
sessions[0].initialize(signature).unwrap();
|
||||
sessions[0].initialize(signature, false).unwrap();
|
||||
|
||||
do_messages_exchange(&clusters, &sessions);
|
||||
|
||||
@ -773,7 +824,45 @@ mod tests {
|
||||
assert_eq!(sessions.iter().filter(|s| s.state() == SessionState::WaitingForPartialDecryptionRequest).count(), 1);
|
||||
// 3) 1 session has decrypted key value
|
||||
assert!(sessions.iter().skip(1).all(|s| s.decrypted_secret().is_none()));
|
||||
assert_eq!(sessions[0].decrypted_secret(), Some(SECRET_PLAIN.into()));
|
||||
assert_eq!(sessions[0].decrypted_secret(), Some(DocumentEncryptedKeyShadow {
|
||||
decrypted_secret: SECRET_PLAIN.into(),
|
||||
common_point: None,
|
||||
decrypt_shadows: None,
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn complete_shadow_dec_session() {
|
||||
let (clusters, _, sessions) = prepare_decryption_sessions();
|
||||
|
||||
// now let's try to do a decryption
|
||||
let key_pair = Random.generate().unwrap();
|
||||
let signature = ethkey::sign(key_pair.secret(), &SessionId::default()).unwrap();
|
||||
sessions[0].initialize(signature, true).unwrap();
|
||||
|
||||
do_messages_exchange(&clusters, &sessions);
|
||||
|
||||
// now check that:
|
||||
// 1) 4 of 5 sessions are in Finished state
|
||||
assert_eq!(sessions.iter().filter(|s| s.state() == SessionState::Finished).count(), 4);
|
||||
// 2) 1 session is in WaitingForPartialDecryptionRequest state
|
||||
assert_eq!(sessions.iter().filter(|s| s.state() == SessionState::WaitingForPartialDecryptionRequest).count(), 1);
|
||||
// 3) 1 session has decrypted key value
|
||||
assert!(sessions.iter().skip(1).all(|s| s.decrypted_secret().is_none()));
|
||||
|
||||
let decrypted_secret = sessions[0].decrypted_secret().unwrap();
|
||||
// check that decrypted_secret != SECRET_PLAIN
|
||||
assert!(decrypted_secret.decrypted_secret != SECRET_PLAIN.into());
|
||||
// check that common point && shadow coefficients are returned
|
||||
assert!(decrypted_secret.common_point.is_some());
|
||||
assert!(decrypted_secret.decrypt_shadows.is_some());
|
||||
// check that KS client is able to restore original secret
|
||||
use ethcrypto::ecies::decrypt_single_message;
|
||||
let decrypt_shadows: Vec<_> = decrypted_secret.decrypt_shadows.unwrap().into_iter()
|
||||
.map(|c| Secret::from_slice(&decrypt_single_message(key_pair.secret(), &c).unwrap()).unwrap())
|
||||
.collect();
|
||||
let decrypted_secret = math::decrypt_with_shadow_coefficients(decrypted_secret.decrypted_secret, decrypted_secret.common_point.unwrap(), decrypt_shadows).unwrap();
|
||||
assert_eq!(decrypted_secret, SECRET_PLAIN.into());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -783,7 +872,7 @@ mod tests {
|
||||
// now let's try to do a decryption
|
||||
let key_pair = Random.generate().unwrap();
|
||||
let signature = ethkey::sign(key_pair.secret(), &SessionId::default()).unwrap();
|
||||
sessions[0].initialize(signature).unwrap();
|
||||
sessions[0].initialize(signature, false).unwrap();
|
||||
|
||||
// we need 4 out of 5 nodes to agree to do a decryption
|
||||
// let's say that 2 of these nodes are disagree
|
||||
@ -801,6 +890,33 @@ mod tests {
|
||||
assert!(sessions.iter().all(|s| s.decrypted_secret().is_none()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn complete_dec_session_with_acl_check_failed_on_master() {
|
||||
let (clusters, acl_storages, sessions) = prepare_decryption_sessions();
|
||||
|
||||
// we need 4 out of 5 nodes to agree to do a decryption
|
||||
// let's say that 1 of these nodes (master) is disagree
|
||||
let key_pair = Random.generate().unwrap();
|
||||
acl_storages[0].prohibit(key_pair.public().clone(), SessionId::default());
|
||||
|
||||
// now let's try to do a decryption
|
||||
let signature = ethkey::sign(key_pair.secret(), &SessionId::default()).unwrap();
|
||||
sessions[0].initialize(signature, false).unwrap();
|
||||
|
||||
do_messages_exchange(&clusters, &sessions);
|
||||
|
||||
// now check that:
|
||||
// 1) 4 of 5 sessions are in Finished state
|
||||
assert_eq!(sessions.iter().filter(|s| s.state() == SessionState::Finished).count(), 5);
|
||||
// 2) 1 session has decrypted key value
|
||||
assert!(sessions.iter().skip(1).all(|s| s.decrypted_secret().is_none()));
|
||||
assert_eq!(sessions[0].decrypted_secret(), Some(DocumentEncryptedKeyShadow {
|
||||
decrypted_secret: SECRET_PLAIN.into(),
|
||||
common_point: None,
|
||||
decrypt_shadows: None,
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decryption_session_works_over_network() {
|
||||
// TODO
|
||||
|
@ -17,21 +17,22 @@
|
||||
use std::io;
|
||||
use std::collections::BTreeSet;
|
||||
use futures::{Future, Poll, Async};
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
use ethkey::{Random, Generator, KeyPair, Secret, sign, verify_public};
|
||||
use util::H256;
|
||||
use key_server_cluster::{NodeId, Error};
|
||||
use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature};
|
||||
use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessage, ReadMessage,
|
||||
read_message, compute_shared_key};
|
||||
read_message, read_encrypted_message, compute_shared_key};
|
||||
|
||||
/// Start handshake procedure with another node from the cluster.
|
||||
pub fn handshake<A>(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: io::Write + io::Read {
|
||||
pub fn handshake<A>(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: AsyncWrite + AsyncRead {
|
||||
let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into);
|
||||
handshake_with_plain_confirmation(a, self_confirmation_plain, self_key_pair, trusted_nodes)
|
||||
}
|
||||
|
||||
/// Start handshake procedure with another node from the cluster and given plain confirmation.
|
||||
pub fn handshake_with_plain_confirmation<A>(a: A, self_confirmation_plain: Result<H256, Error>, self_key_pair: KeyPair, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: io::Write + io::Read {
|
||||
pub fn handshake_with_plain_confirmation<A>(a: A, self_confirmation_plain: Result<H256, Error>, self_key_pair: KeyPair, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: AsyncWrite + AsyncRead {
|
||||
let (error, state) = match self_confirmation_plain.clone()
|
||||
.and_then(|c| Handshake::<A>::make_public_key_message(self_key_pair.public().clone(), c)) {
|
||||
Ok(message) => (None, HandshakeState::SendPublicKey(write_message(a, message))),
|
||||
@ -52,7 +53,7 @@ pub fn handshake_with_plain_confirmation<A>(a: A, self_confirmation_plain: Resul
|
||||
}
|
||||
|
||||
/// Wait for handshake procedure to be started by another node from the cluster.
|
||||
pub fn accept_handshake<A>(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: io::Write + io::Read {
|
||||
pub fn accept_handshake<A>(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: AsyncWrite + AsyncRead {
|
||||
let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into);
|
||||
let (error, state) = match self_confirmation_plain.clone() {
|
||||
Ok(_) => (None, HandshakeState::ReceivePublicKey(read_message(a))),
|
||||
@ -78,7 +79,7 @@ pub struct HandshakeResult {
|
||||
/// Node id.
|
||||
pub node_id: NodeId,
|
||||
/// Shared key.
|
||||
pub shared_key: Secret,
|
||||
pub shared_key: KeyPair,
|
||||
}
|
||||
|
||||
/// Future handshake procedure.
|
||||
@ -91,7 +92,7 @@ pub struct Handshake<A> {
|
||||
trusted_nodes: BTreeSet<NodeId>,
|
||||
other_node_id: Option<NodeId>,
|
||||
other_confirmation_plain: Option<H256>,
|
||||
shared_key: Option<Secret>,
|
||||
shared_key: Option<KeyPair>,
|
||||
}
|
||||
|
||||
/// Active handshake state.
|
||||
@ -103,7 +104,7 @@ enum HandshakeState<A> {
|
||||
Finished,
|
||||
}
|
||||
|
||||
impl<A> Handshake<A> where A: io::Read + io::Write {
|
||||
impl<A> Handshake<A> where A: AsyncRead + AsyncWrite {
|
||||
#[cfg(test)]
|
||||
pub fn set_self_confirmation_plain(&mut self, self_confirmation_plain: H256) {
|
||||
self.self_confirmation_plain = self_confirmation_plain;
|
||||
@ -123,7 +124,7 @@ impl<A> Handshake<A> where A: io::Read + io::Write {
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> Future for Handshake<A> where A: io::Read + io::Write {
|
||||
impl<A> Future for Handshake<A> where A: AsyncRead + AsyncWrite {
|
||||
type Item = (A, Result<HandshakeResult, Error>);
|
||||
type Error = io::Error;
|
||||
|
||||
@ -207,7 +208,9 @@ impl<A> Future for Handshake<A> where A: io::Read + io::Write {
|
||||
let (stream, _) = try_ready!(future.poll());
|
||||
|
||||
(HandshakeState::ReceivePrivateKeySignature(
|
||||
read_message(stream)
|
||||
read_encrypted_message(stream,
|
||||
self.shared_key.as_ref().expect("shared_key is filled in Send/ReceivePublicKey; SendPrivateKeySignature follows Send/ReceivePublicKey; qed").clone()
|
||||
)
|
||||
), Async::NotReady)
|
||||
},
|
||||
HandshakeState::ReceivePrivateKeySignature(ref mut future) => {
|
||||
@ -247,9 +250,9 @@ impl<A> Future for Handshake<A> where A: io::Read + io::Write {
|
||||
mod tests {
|
||||
use std::collections::BTreeSet;
|
||||
use futures::Future;
|
||||
use ethcrypto::ecdh::agree;
|
||||
use ethkey::{Random, Generator, sign};
|
||||
use util::H256;
|
||||
use key_server_cluster::io::message::compute_shared_key;
|
||||
use key_server_cluster::io::message::tests::TestIo;
|
||||
use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature};
|
||||
use super::{handshake_with_plain_confirmation, accept_handshake, HandshakeResult};
|
||||
@ -263,24 +266,15 @@ mod tests {
|
||||
let peer_confirmation_plain = *Random.generate().unwrap().secret().clone();
|
||||
|
||||
let self_confirmation_signed = sign(peer_key_pair.secret(), &self_confirmation_plain).unwrap();
|
||||
let peer_confirmation_signed = sign(self_key_pair.secret(), &peer_confirmation_plain).unwrap();
|
||||
|
||||
io.add_input_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey {
|
||||
node_id: peer_key_pair.public().clone().into(),
|
||||
confirmation_plain: peer_confirmation_plain.into(),
|
||||
})));
|
||||
io.add_input_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature {
|
||||
io.add_encrypted_input_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature {
|
||||
confirmation_signed: self_confirmation_signed.into(),
|
||||
})));
|
||||
|
||||
io.add_output_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey {
|
||||
node_id: self_key_pair.public().clone().into(),
|
||||
confirmation_plain: self_confirmation_plain.clone().into(),
|
||||
})));
|
||||
io.add_output_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature {
|
||||
confirmation_signed: peer_confirmation_signed.into(),
|
||||
})));
|
||||
|
||||
(self_confirmation_plain, io)
|
||||
}
|
||||
|
||||
@ -289,7 +283,7 @@ mod tests {
|
||||
let (self_confirmation_plain, io) = prepare_test_io();
|
||||
let self_key_pair = io.self_key_pair().clone();
|
||||
let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect();
|
||||
let shared_key = agree(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap();
|
||||
let shared_key = compute_shared_key(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap();
|
||||
|
||||
let handshake = handshake_with_plain_confirmation(io, Ok(self_confirmation_plain), self_key_pair, trusted_nodes);
|
||||
let handshake_result = handshake.wait().unwrap();
|
||||
@ -297,7 +291,6 @@ mod tests {
|
||||
node_id: handshake_result.0.peer_public().clone(),
|
||||
shared_key: shared_key,
|
||||
}));
|
||||
handshake_result.0.assert_output();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -305,7 +298,7 @@ mod tests {
|
||||
let (self_confirmation_plain, io) = prepare_test_io();
|
||||
let self_key_pair = io.self_key_pair().clone();
|
||||
let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect();
|
||||
let shared_key = agree(self_key_pair.secret(), io.peer_public()).unwrap();
|
||||
let shared_key = compute_shared_key(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap();
|
||||
|
||||
let mut handshake = accept_handshake(io, self_key_pair, trusted_nodes);
|
||||
handshake.set_self_confirmation_plain(self_confirmation_plain);
|
||||
@ -315,6 +308,5 @@ mod tests {
|
||||
node_id: handshake_result.0.peer_public().clone(),
|
||||
shared_key: shared_key,
|
||||
}));
|
||||
handshake_result.0.assert_output();
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,10 @@ use std::ops::Deref;
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use serde_json;
|
||||
use ethcrypto::ecdh::agree;
|
||||
use ethkey::{Public, Secret};
|
||||
use ethcrypto::ecies::{encrypt_single_message, decrypt_single_message};
|
||||
use ethkey::{Public, Secret, KeyPair};
|
||||
use ethkey::math::curve_order;
|
||||
use util::{H256, U256};
|
||||
use key_server_cluster::Error;
|
||||
use key_server_cluster::message::{Message, ClusterMessage, EncryptionMessage, DecryptionMessage};
|
||||
|
||||
@ -82,20 +85,11 @@ pub fn serialize_message(message: Message) -> Result<SerializedMessage, Error> {
|
||||
};
|
||||
|
||||
let payload = payload.map_err(|err| Error::Serde(err.to_string()))?;
|
||||
let payload_len = payload.len();
|
||||
if payload_len > u16::MAX as usize {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
let header = MessageHeader {
|
||||
build_serialized_message(MessageHeader {
|
||||
kind: message_kind,
|
||||
version: 1,
|
||||
size: payload_len as u16,
|
||||
};
|
||||
|
||||
let mut serialized_message = serialize_header(&header)?;
|
||||
serialized_message.extend(payload);
|
||||
Ok(SerializedMessage(serialized_message))
|
||||
size: 0,
|
||||
}, payload)
|
||||
}
|
||||
|
||||
/// Deserialize message.
|
||||
@ -127,18 +121,30 @@ pub fn deserialize_message(header: &MessageHeader, payload: Vec<u8>) -> Result<M
|
||||
}
|
||||
|
||||
/// Encrypt serialized message.
|
||||
pub fn encrypt_message(_key: &Secret, message: SerializedMessage) -> Result<SerializedMessage, Error> {
|
||||
Ok(message) // TODO: implement me
|
||||
pub fn encrypt_message(key: &KeyPair, message: SerializedMessage) -> Result<SerializedMessage, Error> {
|
||||
let mut header: Vec<_> = message.into();
|
||||
let payload = header.split_off(MESSAGE_HEADER_SIZE);
|
||||
let encrypted_payload = encrypt_single_message(key.public(), &payload)?;
|
||||
|
||||
let header = deserialize_header(&header)?;
|
||||
build_serialized_message(header, encrypted_payload)
|
||||
}
|
||||
|
||||
/// Decrypt serialized message.
|
||||
pub fn decrypt_message(_key: &Secret, payload: Vec<u8>) -> Result<Vec<u8>, Error> {
|
||||
Ok(payload) // TODO: implement me
|
||||
pub fn decrypt_message(key: &KeyPair, payload: Vec<u8>) -> Result<Vec<u8>, Error> {
|
||||
Ok(decrypt_single_message(key.secret(), &payload)?)
|
||||
}
|
||||
|
||||
/// Compute shared encryption key.
|
||||
pub fn compute_shared_key(self_secret: &Secret, other_public: &Public) -> Result<Secret, Error> {
|
||||
Ok(agree(self_secret, other_public)?)
|
||||
pub fn compute_shared_key(self_secret: &Secret, other_public: &Public) -> Result<KeyPair, Error> {
|
||||
// secret key created in agree function is invalid, as it is not calculated mod EC.field.n
|
||||
// => let's do it manually
|
||||
let shared_secret = agree(self_secret, other_public)?;
|
||||
let shared_secret: H256 = (*shared_secret).into();
|
||||
let shared_secret: U256 = shared_secret.into();
|
||||
let shared_secret: H256 = (shared_secret % curve_order()).into();
|
||||
let shared_key_pair = KeyPair::from_secret_slice(&*shared_secret)?;
|
||||
Ok(shared_key_pair)
|
||||
}
|
||||
|
||||
/// Serialize message header.
|
||||
@ -160,29 +166,44 @@ pub fn deserialize_header(data: &[u8]) -> Result<MessageHeader, Error> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Build serialized message from header && payload
|
||||
fn build_serialized_message(mut header: MessageHeader, payload: Vec<u8>) -> Result<SerializedMessage, Error> {
|
||||
let payload_len = payload.len();
|
||||
if payload_len > u16::MAX as usize {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
header.size = payload.len() as u16;
|
||||
|
||||
let mut message = serialize_header(&header)?;
|
||||
message.extend(payload);
|
||||
Ok(SerializedMessage(message))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use std::io;
|
||||
use futures::Poll;
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
use ethkey::{KeyPair, Public};
|
||||
use key_server_cluster::message::Message;
|
||||
use super::{MESSAGE_HEADER_SIZE, MessageHeader, serialize_message, serialize_header, deserialize_header};
|
||||
use super::{MESSAGE_HEADER_SIZE, MessageHeader, compute_shared_key, encrypt_message, serialize_message,
|
||||
serialize_header, deserialize_header};
|
||||
|
||||
pub struct TestIo {
|
||||
self_key_pair: KeyPair,
|
||||
peer_public: Public,
|
||||
shared_key_pair: KeyPair,
|
||||
input_buffer: io::Cursor<Vec<u8>>,
|
||||
output_buffer: Vec<u8>,
|
||||
expected_output_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl TestIo {
|
||||
pub fn new(self_key_pair: KeyPair, peer_public: Public) -> Self {
|
||||
let shared_key_pair = compute_shared_key(self_key_pair.secret(), &peer_public).unwrap();
|
||||
TestIo {
|
||||
self_key_pair: self_key_pair,
|
||||
peer_public: peer_public,
|
||||
shared_key_pair: shared_key_pair,
|
||||
input_buffer: io::Cursor::new(Vec::new()),
|
||||
output_buffer: Vec::new(),
|
||||
expected_output_buffer: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -203,14 +224,21 @@ pub mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_output_message(&mut self, message: Message) {
|
||||
let serialized_message = serialize_message(message).unwrap();
|
||||
pub fn add_encrypted_input_message(&mut self, message: Message) {
|
||||
let serialized_message = encrypt_message(&self.shared_key_pair, serialize_message(message).unwrap()).unwrap();
|
||||
let serialized_message: Vec<_> = serialized_message.into();
|
||||
self.expected_output_buffer.extend(serialized_message);
|
||||
let input_buffer = self.input_buffer.get_mut();
|
||||
for b in serialized_message {
|
||||
input_buffer.push(b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn assert_output(&self) {
|
||||
assert_eq!(self.output_buffer, self.expected_output_buffer);
|
||||
impl AsyncRead for TestIo {}
|
||||
|
||||
impl AsyncWrite for TestIo {
|
||||
fn shutdown(&mut self) -> Poll<(), io::Error> {
|
||||
Ok(().into())
|
||||
}
|
||||
}
|
||||
|
||||
@ -222,11 +250,11 @@ pub mod tests {
|
||||
|
||||
impl io::Write for TestIo {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
io::Write::write(&mut self.output_buffer, buf)
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
io::Write::flush(&mut self.output_buffer)
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,12 +16,13 @@
|
||||
|
||||
use std::io;
|
||||
use futures::{Future, Poll, Async};
|
||||
use tokio_core::io::{ReadExact, read_exact};
|
||||
use tokio_io::AsyncRead;
|
||||
use tokio_io::io::{ReadExact, read_exact};
|
||||
use key_server_cluster::Error;
|
||||
use key_server_cluster::io::message::{MESSAGE_HEADER_SIZE, MessageHeader, deserialize_header};
|
||||
|
||||
/// Create future for read single message header from the stream.
|
||||
pub fn read_header<A>(a: A) -> ReadHeader<A> where A: io::Read {
|
||||
pub fn read_header<A>(a: A) -> ReadHeader<A> where A: AsyncRead {
|
||||
ReadHeader {
|
||||
reader: read_exact(a, [0; MESSAGE_HEADER_SIZE]),
|
||||
}
|
||||
@ -32,7 +33,7 @@ pub struct ReadHeader<A> {
|
||||
reader: ReadExact<A, [u8; MESSAGE_HEADER_SIZE]>,
|
||||
}
|
||||
|
||||
impl<A> Future for ReadHeader<A> where A: io::Read {
|
||||
impl<A> Future for ReadHeader<A> where A: AsyncRead {
|
||||
type Item = (A, Result<MessageHeader, Error>);
|
||||
type Error = io::Error;
|
||||
|
||||
|
@ -16,13 +16,14 @@
|
||||
|
||||
use std::io;
|
||||
use futures::{Poll, Future, Async};
|
||||
use ethkey::Secret;
|
||||
use tokio_io::AsyncRead;
|
||||
use ethkey::KeyPair;
|
||||
use key_server_cluster::Error;
|
||||
use key_server_cluster::message::Message;
|
||||
use key_server_cluster::io::{read_header, ReadHeader, read_payload, read_encrypted_payload, ReadPayload};
|
||||
|
||||
/// Create future for read single message from the stream.
|
||||
pub fn read_message<A>(a: A) -> ReadMessage<A> where A: io::Read {
|
||||
pub fn read_message<A>(a: A) -> ReadMessage<A> where A: AsyncRead {
|
||||
ReadMessage {
|
||||
key: None,
|
||||
state: ReadMessageState::ReadHeader(read_header(a)),
|
||||
@ -30,7 +31,7 @@ pub fn read_message<A>(a: A) -> ReadMessage<A> where A: io::Read {
|
||||
}
|
||||
|
||||
/// Create future for read single encrypted message from the stream.
|
||||
pub fn read_encrypted_message<A>(a: A, key: Secret) -> ReadMessage<A> where A: io::Read {
|
||||
pub fn read_encrypted_message<A>(a: A, key: KeyPair) -> ReadMessage<A> where A: AsyncRead {
|
||||
ReadMessage {
|
||||
key: Some(key),
|
||||
state: ReadMessageState::ReadHeader(read_header(a)),
|
||||
@ -45,11 +46,11 @@ enum ReadMessageState<A> {
|
||||
|
||||
/// Future for read single message from the stream.
|
||||
pub struct ReadMessage<A> {
|
||||
key: Option<Secret>,
|
||||
key: Option<KeyPair>,
|
||||
state: ReadMessageState<A>,
|
||||
}
|
||||
|
||||
impl<A> Future for ReadMessage<A> where A: io::Read {
|
||||
impl<A> Future for ReadMessage<A> where A: AsyncRead {
|
||||
type Item = (A, Result<Message, Error>);
|
||||
type Error = io::Error;
|
||||
|
||||
|
@ -16,14 +16,15 @@
|
||||
|
||||
use std::io;
|
||||
use futures::{Poll, Future};
|
||||
use tokio_core::io::{read_exact, ReadExact};
|
||||
use ethkey::Secret;
|
||||
use tokio_io::AsyncRead;
|
||||
use tokio_io::io::{read_exact, ReadExact};
|
||||
use ethkey::KeyPair;
|
||||
use key_server_cluster::Error;
|
||||
use key_server_cluster::message::Message;
|
||||
use key_server_cluster::io::message::{MessageHeader, deserialize_message, decrypt_message};
|
||||
|
||||
/// Create future for read single message payload from the stream.
|
||||
pub fn read_payload<A>(a: A, header: MessageHeader) -> ReadPayload<A> where A: io::Read {
|
||||
pub fn read_payload<A>(a: A, header: MessageHeader) -> ReadPayload<A> where A: AsyncRead {
|
||||
ReadPayload {
|
||||
reader: read_exact(a, vec![0; header.size as usize]),
|
||||
header: header,
|
||||
@ -32,7 +33,7 @@ pub fn read_payload<A>(a: A, header: MessageHeader) -> ReadPayload<A> where A: i
|
||||
}
|
||||
|
||||
/// Create future for read single encrypted message payload from the stream.
|
||||
pub fn read_encrypted_payload<A>(a: A, header: MessageHeader, key: Secret) -> ReadPayload<A> where A: io::Read {
|
||||
pub fn read_encrypted_payload<A>(a: A, header: MessageHeader, key: KeyPair) -> ReadPayload<A> where A: AsyncRead {
|
||||
ReadPayload {
|
||||
reader: read_exact(a, vec![0; header.size as usize]),
|
||||
header: header,
|
||||
@ -44,10 +45,10 @@ pub fn read_encrypted_payload<A>(a: A, header: MessageHeader, key: Secret) -> Re
|
||||
pub struct ReadPayload<A> {
|
||||
reader: ReadExact<A, Vec<u8>>,
|
||||
header: MessageHeader,
|
||||
key: Option<Secret>,
|
||||
key: Option<KeyPair>,
|
||||
}
|
||||
|
||||
impl<A> Future for ReadPayload<A> where A: io::Read {
|
||||
impl<A> Future for ReadPayload<A> where A: AsyncRead {
|
||||
type Item = (A, Result<Message, Error>);
|
||||
type Error = io::Error;
|
||||
|
||||
|
@ -15,7 +15,10 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::net::Shutdown;
|
||||
use std::io::{Read, Write, Error};
|
||||
use futures::Poll;
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
use tokio_core::net::TcpStream;
|
||||
|
||||
/// Read+Write implementation for Arc<TcpStream>.
|
||||
@ -37,6 +40,14 @@ impl From<TcpStream> for SharedTcpStream {
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncRead for SharedTcpStream {}
|
||||
|
||||
impl AsyncWrite for SharedTcpStream {
|
||||
fn shutdown(&mut self) -> Poll<(), Error> {
|
||||
self.io.shutdown(Shutdown::Both).map(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for SharedTcpStream {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
|
||||
Read::read(&mut (&*self.io as &TcpStream), buf)
|
||||
|
@ -16,13 +16,14 @@
|
||||
|
||||
use std::io;
|
||||
use futures::{Future, Poll};
|
||||
use tokio_core::io::{WriteAll, write_all};
|
||||
use ethkey::Secret;
|
||||
use tokio_io::AsyncWrite;
|
||||
use tokio_io::io::{WriteAll, write_all};
|
||||
use ethkey::KeyPair;
|
||||
use key_server_cluster::message::Message;
|
||||
use key_server_cluster::io::{serialize_message, encrypt_message};
|
||||
|
||||
/// Write plain message to the channel.
|
||||
pub fn write_message<A>(a: A, message: Message) -> WriteMessage<A> where A: io::Write {
|
||||
pub fn write_message<A>(a: A, message: Message) -> WriteMessage<A> where A: AsyncWrite {
|
||||
let (error, future) = match serialize_message(message)
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) {
|
||||
Ok(message) => (None, write_all(a, message.into())),
|
||||
@ -35,7 +36,7 @@ pub fn write_message<A>(a: A, message: Message) -> WriteMessage<A> where A: io::
|
||||
}
|
||||
|
||||
/// Write encrypted message to the channel.
|
||||
pub fn write_encrypted_message<A>(a: A, key: &Secret, message: Message) -> WriteMessage<A> where A: io::Write {
|
||||
pub fn write_encrypted_message<A>(a: A, key: &KeyPair, message: Message) -> WriteMessage<A> where A: AsyncWrite {
|
||||
let (error, future) = match serialize_message(message)
|
||||
.and_then(|message| encrypt_message(key, message))
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) {
|
||||
@ -56,7 +57,7 @@ pub struct WriteMessage<A> {
|
||||
future: WriteAll<A, Vec<u8>>,
|
||||
}
|
||||
|
||||
impl<A> Future for WriteMessage<A> where A: io::Write {
|
||||
impl<A> Future for WriteMessage<A> where A: AsyncWrite {
|
||||
type Item = (A, Vec<u8>);
|
||||
type Error = io::Error;
|
||||
|
||||
|
@ -203,12 +203,24 @@ pub fn compute_node_shadow<'a, I>(node_number: &Secret, node_secret_share: &Secr
|
||||
}
|
||||
|
||||
/// Compute shadow point for the node.
|
||||
pub fn compute_node_shadow_point(access_key: &Secret, common_point: &Public, node_shadow: &Secret) -> Result<Public, Error> {
|
||||
let mut shadow_key = access_key.clone();
|
||||
shadow_key.mul(node_shadow)?;
|
||||
pub fn compute_node_shadow_point(access_key: &Secret, common_point: &Public, node_shadow: &Secret, decrypt_shadow: Option<Secret>) -> Result<(Public, Option<Secret>), Error> {
|
||||
let mut shadow_key = node_shadow.clone();
|
||||
let decrypt_shadow = match decrypt_shadow {
|
||||
None => None,
|
||||
Some(mut decrypt_shadow) => {
|
||||
// update shadow key
|
||||
shadow_key.mul(&decrypt_shadow)?;
|
||||
// now udate decrypt shadow itself
|
||||
decrypt_shadow.dec()?;
|
||||
decrypt_shadow.mul(node_shadow)?;
|
||||
Some(decrypt_shadow)
|
||||
}
|
||||
};
|
||||
shadow_key.mul(access_key)?;
|
||||
|
||||
let mut node_shadow_point = common_point.clone();
|
||||
math::public_mul_secret(&mut node_shadow_point, &shadow_key)?;
|
||||
Ok(node_shadow_point)
|
||||
Ok((node_shadow_point, decrypt_shadow))
|
||||
}
|
||||
|
||||
/// Compute joint shadow point.
|
||||
@ -252,6 +264,28 @@ pub fn decrypt_with_joint_shadow(threshold: usize, access_key: &Secret, encrypte
|
||||
Ok(decrypted_point)
|
||||
}
|
||||
|
||||
/// Prepare common point for shadow decryption.
|
||||
pub fn make_common_shadow_point(threshold: usize, mut common_point: Public) -> Result<Public, Error> {
|
||||
if threshold % 2 != 1 {
|
||||
Ok(common_point)
|
||||
} else {
|
||||
math::public_negate(&mut common_point)?;
|
||||
Ok(common_point)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Decrypt shadow-encrypted secret.
|
||||
pub fn decrypt_with_shadow_coefficients(mut decrypted_shadow: Public, mut common_shadow_point: Public, shadow_coefficients: Vec<Secret>) -> Result<Public, Error> {
|
||||
let mut shadow_coefficients_sum = shadow_coefficients[0].clone();
|
||||
for shadow_coefficient in shadow_coefficients.iter().skip(1) {
|
||||
shadow_coefficients_sum.add(shadow_coefficient)?;
|
||||
}
|
||||
math::public_mul_secret(&mut common_shadow_point, &shadow_coefficients_sum)?;
|
||||
math::public_add(&mut decrypted_shadow, &common_shadow_point)?;
|
||||
Ok(decrypted_shadow)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Decrypt data using joint secret (version for tests).
|
||||
pub fn decrypt_with_joint_secret(encrypted_point: &Public, common_point: &Public, joint_secret: &Secret) -> Result<Public, Error> {
|
||||
@ -287,7 +321,10 @@ pub mod tests {
|
||||
.filter(|&(j, _)| j != i)
|
||||
.take(t)
|
||||
.map(|(_, id_number)| id_number)).unwrap()).collect();
|
||||
let nodes_shadow_points: Vec<_> = nodes_shadows.iter().map(|s| compute_node_shadow_point(&access_key, &encrypted_secret.common_point, s).unwrap()).collect();
|
||||
let nodes_shadow_points: Vec<_> = nodes_shadows.iter()
|
||||
.map(|s| compute_node_shadow_point(&access_key, &encrypted_secret.common_point, s, None).unwrap())
|
||||
.map(|sp| sp.0)
|
||||
.collect();
|
||||
assert_eq!(nodes_shadows.len(), t + 1);
|
||||
assert_eq!(nodes_shadow_points.len(), t + 1);
|
||||
|
||||
|
@ -218,6 +218,9 @@ pub struct InitializeDecryptionSession {
|
||||
pub sub_session: SerializableSecret,
|
||||
/// Requestor signature.
|
||||
pub requestor_signature: SerializableSignature,
|
||||
/// Is shadow decryption requested? When true, decryption result
|
||||
/// will be visible to the owner of requestor public key only.
|
||||
pub is_shadow_decryption: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
@ -251,6 +254,8 @@ pub struct PartialDecryption {
|
||||
pub sub_session: SerializableSecret,
|
||||
/// Partially decrypted secret.
|
||||
pub shadow_point: SerializablePublic,
|
||||
/// Decrypt shadow coefficient (if requested), encrypted with requestor public.
|
||||
pub decrypt_shadow: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
|
@ -20,7 +20,7 @@ use ethkey;
|
||||
use ethcrypto;
|
||||
use super::types::all::DocumentAddress;
|
||||
|
||||
pub use super::types::all::{NodeId, EncryptionConfiguration};
|
||||
pub use super::types::all::{NodeId, EncryptionConfiguration, DocumentEncryptedKeyShadow};
|
||||
pub use super::acl_storage::AclStorage;
|
||||
pub use super::key_storage::{KeyStorage, DocumentKeyShare};
|
||||
pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic};
|
||||
|
@ -15,7 +15,7 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::net;
|
||||
use ethkey::Secret;
|
||||
use ethkey::KeyPair;
|
||||
use key_server_cluster::NodeId;
|
||||
use key_server_cluster::io::SharedTcpStream;
|
||||
|
||||
@ -28,5 +28,5 @@ pub struct Connection {
|
||||
/// Peer node id.
|
||||
pub node_id: NodeId,
|
||||
/// Encryption key.
|
||||
pub key: Secret,
|
||||
pub key: KeyPair,
|
||||
}
|
||||
|
@ -43,6 +43,8 @@ pub trait KeyStorage: Send + Sync {
|
||||
fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error>;
|
||||
/// Get document encryption key
|
||||
fn get(&self, document: &DocumentAddress) -> Result<DocumentKeyShare, Error>;
|
||||
/// Check if storage contains document encryption key
|
||||
fn contains(&self, document: &DocumentAddress) -> bool;
|
||||
}
|
||||
|
||||
/// Persistent document encryption keys storage
|
||||
@ -95,6 +97,12 @@ impl KeyStorage for PersistentKeyStorage {
|
||||
.and_then(|key| serde_json::from_slice::<SerializableDocumentKeyShare>(&key).map_err(|e| Error::Database(e.to_string())))
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn contains(&self, document: &DocumentAddress) -> bool {
|
||||
self.db.get(None, document)
|
||||
.map(|k| k.is_some())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DocumentKeyShare> for SerializableDocumentKeyShare {
|
||||
@ -146,6 +154,10 @@ pub mod tests {
|
||||
fn get(&self, document: &DocumentAddress) -> Result<DocumentKeyShare, Error> {
|
||||
self.keys.read().get(document).cloned().ok_or(Error::DocumentNotFound)
|
||||
}
|
||||
|
||||
fn contains(&self, document: &DocumentAddress) -> bool {
|
||||
self.keys.read().contains_key(document)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -27,6 +27,7 @@ extern crate serde;
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate tokio_io;
|
||||
extern crate tokio_core;
|
||||
extern crate tokio_service;
|
||||
extern crate tokio_proto;
|
||||
|
@ -17,11 +17,62 @@
|
||||
use std::fmt;
|
||||
use std::cmp::{Ord, PartialOrd, Ordering};
|
||||
use std::ops::Deref;
|
||||
use rustc_serialize::hex::ToHex;
|
||||
use rustc_serialize::hex::{ToHex, FromHex};
|
||||
use serde::{Serialize, Deserialize, Serializer, Deserializer};
|
||||
use serde::de::{Visitor, Error as SerdeError};
|
||||
use ethkey::{Public, Secret, Signature};
|
||||
use util::H256;
|
||||
use util::{H256, Bytes};
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Serializable shadow decryption result.
|
||||
pub struct SerializableDocumentEncryptedKeyShadow {
|
||||
/// Decrypted secret point. It is partially decrypted if shadow decrpytion was requested.
|
||||
pub decrypted_secret: SerializablePublic,
|
||||
/// Shared common point.
|
||||
pub common_point: SerializablePublic,
|
||||
/// If shadow decryption was requested: shadow decryption coefficients, encrypted with requestor public.
|
||||
pub decrypt_shadows: Vec<SerializableBytes>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// Serializable Bytes.
|
||||
pub struct SerializableBytes(Bytes);
|
||||
|
||||
impl<T> From<T> for SerializableBytes where Bytes: From<T> {
|
||||
fn from(s: T) -> SerializableBytes {
|
||||
SerializableBytes(s.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Bytes> for SerializableBytes {
|
||||
fn into(self) -> Bytes {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for SerializableBytes {
|
||||
type Target = Bytes;
|
||||
|
||||
fn deref(&self) -> &Bytes {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for SerializableBytes {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {
|
||||
serializer.serialize_str(&(*self.0).to_hex())
|
||||
}
|
||||
}
|
||||
|
||||
impl Deserialize for SerializableBytes {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where D: Deserializer
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
let data = s.from_hex().map_err(SerdeError::custom)?;
|
||||
Ok(SerializableBytes(data))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// Serializable Signature.
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey};
|
||||
use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey, DocumentEncryptedKeyShadow};
|
||||
|
||||
#[ipc(client_ident="RemoteKeyServer")]
|
||||
/// Secret store key server
|
||||
@ -23,4 +23,12 @@ pub trait KeyServer: Send + Sync {
|
||||
fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result<DocumentEncryptedKey, Error>;
|
||||
/// Request encryption key of given document for given requestor
|
||||
fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKey, Error>;
|
||||
/// Request encryption key of given document for given requestor.
|
||||
/// This method does not reveal document_key to any KeyServer, but it requires additional actions on client.
|
||||
/// To calculate decrypted key on client:
|
||||
/// 1) use requestor secret key to decrypt secret coefficients from result.decrypt_shadows
|
||||
/// 2) calculate decrypt_shadows_sum = sum of all secrets from (1)
|
||||
/// 3) calculate decrypt_shadow_point: decrypt_shadows_sum * result.common_point
|
||||
/// 4) calculate decrypted_secret: result.decrypted_secret + decrypt_shadow_point
|
||||
fn document_key_shadow(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKeyShadow, Error>;
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ pub enum Error {
|
||||
Internal(String),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
#[binary]
|
||||
/// Secret store configuration
|
||||
pub struct NodeAddress {
|
||||
@ -99,6 +99,18 @@ pub struct EncryptionConfiguration {
|
||||
pub key_check_timeout_ms: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[binary]
|
||||
/// Shadow decryption result.
|
||||
pub struct DocumentEncryptedKeyShadow {
|
||||
/// Decrypted secret point. It is partially decrypted if shadow decrpytion was requested.
|
||||
pub decrypted_secret: ethkey::Public,
|
||||
/// Shared common point.
|
||||
pub common_point: Option<ethkey::Public>,
|
||||
/// If shadow decryption was requested: shadow decryption coefficients, encrypted with requestor public.
|
||||
pub decrypt_shadows: Option<Vec<Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
match *self {
|
||||
|
@ -23,11 +23,11 @@ use traits::Encodable;
|
||||
struct ListInfo {
|
||||
position: usize,
|
||||
current: usize,
|
||||
max: usize,
|
||||
max: Option<usize>,
|
||||
}
|
||||
|
||||
impl ListInfo {
|
||||
fn new(position: usize, max: usize) -> ListInfo {
|
||||
fn new(position: usize, max: Option<usize>) -> ListInfo {
|
||||
ListInfo {
|
||||
position: position,
|
||||
current: 0,
|
||||
@ -133,7 +133,7 @@ impl RlpStream {
|
||||
self.buffer.push(0);
|
||||
|
||||
let position = self.buffer.len();
|
||||
self.unfinished_lists.push(ListInfo::new(position, len));
|
||||
self.unfinished_lists.push(ListInfo::new(position, Some(len)));
|
||||
},
|
||||
}
|
||||
|
||||
@ -141,6 +141,19 @@ impl RlpStream {
|
||||
self
|
||||
}
|
||||
|
||||
|
||||
/// Declare appending the list of unknown size, chainable.
|
||||
pub fn begin_unbounded_list(&mut self) -> &mut RlpStream {
|
||||
self.finished_list = false;
|
||||
// payload is longer than 1 byte only for lists > 55 bytes
|
||||
// by pushing always this 1 byte we may avoid unnecessary shift of data
|
||||
self.buffer.push(0);
|
||||
let position = self.buffer.len();
|
||||
self.unfinished_lists.push(ListInfo::new(position, None));
|
||||
// return chainable self
|
||||
self
|
||||
}
|
||||
|
||||
/// Apends null to the end of stream, chainable.
|
||||
///
|
||||
/// ```rust
|
||||
@ -177,6 +190,36 @@ impl RlpStream {
|
||||
self
|
||||
}
|
||||
|
||||
/// Appends raw (pre-serialised) RLP data. Checks for size oveflow.
|
||||
pub fn append_raw_checked<'a>(&'a mut self, bytes: &[u8], item_count: usize, max_size: usize) -> bool {
|
||||
if self.estimate_size(bytes.len()) > max_size {
|
||||
return false;
|
||||
}
|
||||
self.append_raw(bytes, item_count);
|
||||
true
|
||||
}
|
||||
|
||||
/// Calculate total RLP size for appended payload.
|
||||
pub fn estimate_size<'a>(&'a self, add: usize) -> usize {
|
||||
let total_size = self.buffer.len() + add;
|
||||
let mut base_size = total_size;
|
||||
for list in &self.unfinished_lists[..] {
|
||||
let len = total_size - list.position;
|
||||
if len > 55 {
|
||||
let leading_empty_bytes = (len as u64).leading_zeros() as usize / 8;
|
||||
let size_bytes = 8 - leading_empty_bytes;
|
||||
base_size += size_bytes;
|
||||
}
|
||||
}
|
||||
base_size
|
||||
}
|
||||
|
||||
|
||||
/// Returns current RLP size in bytes for the data pushed into the list.
|
||||
pub fn len<'a>(&'a self) -> usize {
|
||||
self.estimate_size(0)
|
||||
}
|
||||
|
||||
/// Clear the output stream so far.
|
||||
///
|
||||
/// ```rust
|
||||
@ -246,10 +289,11 @@ impl RlpStream {
|
||||
None => false,
|
||||
Some(ref mut x) => {
|
||||
x.current += inserted_items;
|
||||
if x.current > x.max {
|
||||
panic!("You cannot append more items then you expect!");
|
||||
match x.max {
|
||||
Some(ref max) if x.current > *max => panic!("You cannot append more items then you expect!"),
|
||||
Some(ref max) => x.current == *max,
|
||||
_ => false,
|
||||
}
|
||||
x.current == x.max
|
||||
}
|
||||
};
|
||||
|
||||
@ -273,6 +317,17 @@ impl RlpStream {
|
||||
false => panic!()
|
||||
}
|
||||
}
|
||||
|
||||
/// Finalize current ubnbound list. Panics if no unbounded list has been opened.
|
||||
pub fn complete_unbounded_list(&mut self) {
|
||||
let list = self.unfinished_lists.pop().expect("No open list.");
|
||||
if list.max.is_some() {
|
||||
panic!("List type mismatch.");
|
||||
}
|
||||
let len = self.buffer.len() - list.position;
|
||||
self.encoder().insert_list_payload(len, list.position);
|
||||
self.note_appended(1);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BasicEncoder<'a> {
|
||||
|
@ -412,3 +412,25 @@ fn test_rlp_list_length_overflow() {
|
||||
let as_val: Result<String, DecoderError> = rlp.val_at(0);
|
||||
assert_eq!(Err(DecoderError::RlpIsTooShort), as_val);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rlp_stream_size_limit() {
|
||||
for limit in 40 .. 270 {
|
||||
let item = [0u8; 1];
|
||||
let mut stream = RlpStream::new();
|
||||
while stream.append_raw_checked(&item, 1, limit) {}
|
||||
assert_eq!(stream.drain().len(), limit);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rlp_stream_unbounded_list() {
|
||||
let mut stream = RlpStream::new();
|
||||
stream.begin_unbounded_list();
|
||||
stream.append(&40u32);
|
||||
stream.append(&41u32);
|
||||
assert!(!stream.is_finished());
|
||||
stream.complete_unbounded_list();
|
||||
assert!(stream.is_finished());
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user