diff --git a/Cargo.lock b/Cargo.lock index fba56d466..16046c480 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3,6 +3,7 @@ name = "wasm" version = "0.1.0" dependencies = [ "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.3", "ethcore-logger 1.8.0", "ethcore-util 1.8.0", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -196,7 +197,7 @@ name = "bloomable" version = "0.1.0" dependencies = [ "ethcore-bigint 0.1.3", - "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hash 0.1.0", ] [[package]] @@ -299,8 +300,10 @@ name = "common-types" version = "0.1.0" dependencies = [ "bloomable 0.1.0", + "ethcore-bigint 0.1.3", "ethcore-util 1.8.0", "ethjson 0.1.0", + "hash 0.1.0", "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", "rlp_derive 0.1.0", @@ -452,6 +455,14 @@ dependencies = [ "backtrace 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "error-chain" +version = "0.11.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "backtrace 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "eth-secp256k1" version = "0.5.6" @@ -481,16 +492,17 @@ name = "ethash" version = "1.8.0" dependencies = [ "crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "hash 0.1.0", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "sha3 0.1.0", ] [[package]] name = "ethcore" version = "1.8.0" dependencies = [ + "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomable 0.1.0", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -502,6 +514,7 @@ dependencies = [ "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.8.0", + "ethcore-bigint 0.1.3", "ethcore-bloom-journal 0.1.0", "ethcore-devtools 1.8.0", "ethcore-io 1.8.0", @@ -517,6 +530,7 @@ dependencies = [ "evm 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "hardware-wallet 1.8.0", + "hash 0.1.0", "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", @@ -527,17 +541,21 @@ dependencies = [ "native-contracts 0.1.0", "num 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "price-info 1.7.0", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", "rlp_derive 0.1.0", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "semantic_version 0.1.0", "semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "stats 0.1.0", "table 0.1.0", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "transient-hashmap 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "triehash 0.1.0", + "unexpected 0.1.0", "using_queue 0.1.0", "vm 0.1.0", "wasm 0.1.0", @@ -584,6 +602,7 @@ dependencies = [ name = "ethcore-ipc" version = "1.8.0" dependencies = [ + "ethcore-bigint 0.1.3", "ethcore-devtools 1.8.0", "ethcore-util 1.8.0", "nanomsg 0.5.1 (git+https://github.com/paritytech/nanomsg.rs.git?branch=parity-1.7)", @@ -646,6 +665,7 @@ version = "1.8.0" dependencies = [ "bincode 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.8.0", + "ethcore-bigint 0.1.3", "ethcore-devtools 1.8.0", "ethcore-io 1.8.0", "ethcore-ipc 1.8.0", @@ -654,9 +674,11 @@ dependencies = [ "ethcore-util 1.8.0", "evm 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "hash 0.1.0", "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", "rlp_derive 0.1.0", @@ -665,6 +687,7 @@ dependencies = [ "smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "stats 0.1.0", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "triehash 0.1.0", "vm 0.1.0", ] @@ -690,12 +713,14 @@ dependencies = [ "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.3", "ethcore-devtools 1.8.0", "ethcore-io 1.8.0", "ethcore-logger 1.8.0", "ethcore-util 1.8.0", "ethcrypto 0.1.0", "ethkey 0.2.0", + "hash 0.1.0", "igd 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "ipnetwork 0.12.6 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", @@ -720,6 +745,7 @@ dependencies = [ "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.8.0", + "ethcore-bigint 0.1.3", "ethcore-devtools 1.8.0", "ethcore-ipc 1.8.0", "ethcore-ipc-codegen 1.8.0", @@ -730,6 +756,7 @@ dependencies = [ "ethkey 0.2.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "hash 0.1.0", "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -751,6 +778,7 @@ name = "ethcore-stratum" version = "1.8.0" dependencies = [ "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.3", "ethcore-devtools 1.8.0", "ethcore-ipc 1.8.0", "ethcore-ipc-codegen 1.8.0", @@ -758,11 +786,13 @@ dependencies = [ "ethcore-logger 1.8.0", "ethcore-util 1.8.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "hash 0.1.0", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -771,31 +801,29 @@ dependencies = [ name = "ethcore-util" version = "1.8.0" dependencies = [ - "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)", "ethcore-bigint 0.1.3", "ethcore-bloom-journal 0.1.0", "ethcore-devtools 1.8.0", "ethcore-logger 1.8.0", + "hash 0.1.0", "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "lru-cache 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", "rocksdb 0.4.5 (git+https://github.com/paritytech/rust-rocksdb)", - "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "sha3 0.1.0", "target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "triehash 0.1.0", "vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -892,6 +920,7 @@ dependencies = [ "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.8.0", + "ethcore-bigint 0.1.3", "ethcore-devtools 1.8.0", "ethcore-io 1.8.0", "ethcore-ipc 1.8.0", @@ -901,6 +930,7 @@ dependencies = [ "ethcore-network 1.8.0", "ethcore-util 1.8.0", "ethkey 0.2.0", + "hash 0.1.0", "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "ipnetwork 0.12.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -910,6 +940,7 @@ dependencies = [ "semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "triehash 0.1.0", ] [[package]] @@ -919,14 +950,17 @@ dependencies = [ "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "common-types 0.1.0", + "ethcore-bigint 0.1.3", "ethcore-logger 1.8.0", "ethcore-util 1.8.0", "ethjson 0.1.0", "evmjit 1.8.0", + "hash 0.1.0", "heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "parity-wasm 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "vm 0.1.0", @@ -939,6 +973,7 @@ version = "0.1.0" dependencies = [ "docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.8.0", + "ethcore-bigint 0.1.3", "ethcore-util 1.8.0", "ethjson 0.1.0", "evm 0.1.0", @@ -1066,6 +1101,16 @@ dependencies = [ "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "hash" +version = "0.1.0" +dependencies = [ + "ethcore-bigint 0.1.3", + "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", + "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "heapsize" version = "0.4.0" @@ -1189,6 +1234,7 @@ dependencies = [ name = "ipc-common-types" version = "1.8.0" dependencies = [ + "ethcore-bigint 0.1.3", "ethcore-ipc 1.8.0", "ethcore-ipc-codegen 1.8.0", "ethcore-util 1.8.0", @@ -1624,12 +1670,14 @@ name = "node-filter" version = "1.8.0" dependencies = [ "ethcore 1.8.0", + "ethcore-bigint 0.1.3", "ethcore-io 1.8.0", "ethcore-network 1.8.0", "ethcore-util 1.8.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "native-contracts 0.1.0", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1828,12 +1876,14 @@ version = "1.8.0" dependencies = [ "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.24.2 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)", "daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.8.0", + "ethcore-bigint 0.1.3", "ethcore-devtools 1.8.0", "ethcore-io 1.8.0", "ethcore-ipc 1.8.0", @@ -1851,6 +1901,7 @@ dependencies = [ "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "hash 0.1.0", "ipnetwork 0.12.6 (registry+https://github.com/rust-lang/crates.io-index)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1869,6 +1920,7 @@ dependencies = [ "parity-rpc-client 1.4.0", "parity-updater 1.8.0", "parity-whisper 0.1.0", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "path 0.1.0", "pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1894,10 +1946,12 @@ dependencies = [ "base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.3", "ethcore-devtools 1.8.0", "ethcore-util 1.8.0", "fetch 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "hash 0.1.0", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1910,6 +1964,7 @@ dependencies = [ "parity-hash-fetch 1.8.0", "parity-reactor 0.1.0", "parity-ui 1.8.0", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1940,14 +1995,17 @@ name = "parity-hash-fetch" version = "1.8.0" dependencies = [ "ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.3", "ethcore-util 1.8.0", "fetch 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "hash 0.1.0", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "native-contracts 0.1.0", "parity-reactor 0.1.0", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1958,6 +2016,7 @@ version = "1.8.0" dependencies = [ "cid 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.8.0", + "ethcore-bigint 0.1.3", "ethcore-util 1.8.0", "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1992,10 +2051,12 @@ dependencies = [ name = "parity-rpc" version = "1.8.0" dependencies = [ + "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "cid 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.8.0", "ethcore 1.8.0", + "ethcore-bigint 0.1.3", "ethcore-devtools 1.8.0", "ethcore-io 1.8.0", "ethcore-ipc 1.8.0", @@ -2010,6 +2071,7 @@ dependencies = [ "fetch 0.1.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "hash 0.1.0", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -2024,6 +2086,7 @@ dependencies = [ "order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-reactor 0.1.0", "parity-updater 1.8.0", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", @@ -2046,11 +2109,13 @@ version = "1.4.0" dependencies = [ "ethcore-util 1.8.0", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "hash 0.1.0", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-ws-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parity-rpc 1.8.0", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2094,7 +2159,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#d809723e58bcb36c0f8d2eca5ca94abbb3690544" +source = "git+https://github.com/paritytech/js-precompiled.git#75e4afa0b77396aa8feefb49276672c3fe885a88" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2105,6 +2170,7 @@ version = "1.8.0" dependencies = [ "ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.8.0", + "ethcore-bigint 0.1.3", "ethcore-ipc 1.8.0", "ethcore-ipc-codegen 1.8.0", "ethcore-util 1.8.0", @@ -2114,6 +2180,7 @@ dependencies = [ "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "parity-hash-fetch 1.8.0", "parity-reactor 0.1.0", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "path 0.1.0", "target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2609,6 +2676,10 @@ dependencies = [ "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "semantic_version" +version = "0.1.0" + [[package]] name = "semver" version = "0.1.20" @@ -2694,13 +2765,6 @@ name = "sha1" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "sha3" -version = "0.1.0" -dependencies = [ - "gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "shell32-sys" version = "0.1.1" @@ -3072,11 +3136,24 @@ dependencies = [ "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "triehash" +version = "0.1.0" +dependencies = [ + "ethcore-bigint 0.1.3", + "hash 0.1.0", + "rlp 0.2.0", +] + [[package]] name = "typeable" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "unexpected" +version = "0.1.0" + [[package]] name = "unicase" version = "1.4.0" @@ -3182,9 +3259,11 @@ version = "0.1.0" dependencies = [ "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "common-types 0.1.0", + "ethcore-bigint 0.1.3", "ethcore-util 1.8.0", "ethjson 0.1.0", "evmjit 1.8.0", + "hash 0.1.0", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", @@ -3326,6 +3405,7 @@ dependencies = [ "checksum either 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2b503c86dad62aaf414ecf2b8c527439abedb3f8d812537f0b12bfd6f32a91" "checksum elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "258ff6a9a94f648d0379dbd79110e057edbb53eb85cc237e33eadf8e5a30df85" "checksum env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e3856f1697098606fc6cb97a93de88ca3f3bc35bb878c725920e6e82ecf05e83" +"checksum error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)" = "38d3a55d9a7a456748f2a3912c0941a5d9a68006eb15b3c3c9836b8420dc102d" "checksum error-chain 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bd5c82c815138e278b8dcdeffc49f27ea6ffb528403e9dea4194f2e3dd40b143" "checksum eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)" = "" "checksum ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0c3d62319ee0f35abf20afe8859dd2668195912614346447bb2dee9fb8da7c62" diff --git a/Cargo.toml b/Cargo.toml index 06abd7b31..5f3a6ad73 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,12 +11,14 @@ log = "0.3" env_logger = "0.4" rustc-hex = "1.0" docopt = "0.8" +clap = "2" time = "0.1" num_cpus = "1.2" number_prefix = "0.2" rpassword = "0.2.1" semver = "0.6" ansi_term = "0.9" +parking_lot = "0.4" regex = "0.2" isatty = "0.1" toml = "0.4" @@ -33,6 +35,7 @@ jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "pa ethsync = { path = "sync" } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } +ethcore-bigint = { path = "util/bigint" } ethcore-io = { path = "util/io" } ethcore-devtools = { path = "devtools" } ethcore-ipc = { path = "ipc/rpc" } @@ -57,6 +60,7 @@ parity-updater = { path = "updater" } parity-whisper = { path = "whisper" } path = { path = "util/path" } panic_hook = { path = "panic_hook" } +hash = { path = "util/hash" } parity-dapps = { path = "dapps", optional = true } clippy = { version = "0.0.103", optional = true} diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index b1751c616..2c55b79cb 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -13,6 +13,7 @@ futures = "0.1" linked-hash-map = "0.3" log = "0.3" parity-dapps-glue = "1.7" +parking_lot = "0.4" mime = "0.2" mime_guess = "1.6.1" rand = "0.3" @@ -30,11 +31,13 @@ jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "pa jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethcore-util = { path = "../util" } +ethcore-bigint = { path = "../util/bigint" } fetch = { path = "../util/fetch" } node-health = { path = "./node-health" } parity-hash-fetch = { path = "../hash-fetch" } parity-reactor = { path = "../util/reactor" } parity-ui = { path = "./ui" } +hash = { path = "../util/hash" } clippy = { version = "0.0.103", optional = true} diff --git a/dapps/src/apps/fetcher/installers.rs b/dapps/src/apps/fetcher/installers.rs index 82c91c859..cb7fa1671 100644 --- a/dapps/src/apps/fetcher/installers.rs +++ b/dapps/src/apps/fetcher/installers.rs @@ -19,9 +19,9 @@ use std::{fs, fmt}; use std::io::{self, Read, Write}; use std::path::PathBuf; use fetch::{self, Mime}; -use util::H256; +use hash::keccak_buffer; +use bigint::hash::H256; -use util::sha3::sha3; use page::{LocalPageEndpoint, PageCache}; use handlers::{ContentValidator, ValidatorResponse}; use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest, serialize_manifest, Manifest}; @@ -57,9 +57,9 @@ fn write_response_and_check_hash( file.flush()?; // Validate hash - // TODO [ToDr] calculate sha3 in-flight while reading the response + // TODO [ToDr] calculate keccak in-flight while reading the response let mut file = io::BufReader::new(fs::File::open(&content_path)?); - let hash = sha3(&mut file)?; + let hash = keccak_buffer(&mut file)?; if id == hash { Ok((file.into_inner(), content_path)) } else { diff --git a/dapps/src/apps/fetcher/mod.rs b/dapps/src/apps/fetcher/mod.rs index 5b91da1a3..fe529d772 100644 --- a/dapps/src/apps/fetcher/mod.rs +++ b/dapps/src/apps/fetcher/mod.rs @@ -32,7 +32,7 @@ use hyper; use hyper::status::StatusCode; use {Embeddable, SyncStatus, random_filename}; -use util::Mutex; +use parking_lot::Mutex; use page::LocalPageEndpoint; use handlers::{ContentHandler, ContentFetcherHandler}; use endpoint::{Endpoint, EndpointPath, Handler}; diff --git a/dapps/src/handlers/fetch.rs b/dapps/src/handlers/fetch.rs index 7740247d9..afe1f5083 100644 --- a/dapps/src/handlers/fetch.rs +++ b/dapps/src/handlers/fetch.rs @@ -23,7 +23,7 @@ use std::time::{Instant, Duration}; use fetch::{self, Fetch}; use futures::Future; use parity_reactor::Remote; -use util::Mutex; +use parking_lot::Mutex; use hyper::{server, Decoder, Encoder, Next, Method, Control}; use hyper::net::HttpStream; diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 073db5121..0cb0833af 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -24,6 +24,7 @@ extern crate futures; extern crate itertools; extern crate linked_hash_map; extern crate mime_guess; +extern crate parking_lot; extern crate rand; extern crate rustc_hex; extern crate serde; @@ -37,12 +38,14 @@ extern crate jsonrpc_core; extern crate jsonrpc_http_server; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate fetch; extern crate node_health; extern crate parity_dapps_glue as parity_dapps; extern crate parity_hash_fetch as hash_fetch; extern crate parity_reactor; extern crate parity_ui; +extern crate hash; #[macro_use] extern crate log; @@ -72,7 +75,7 @@ use std::collections::HashMap; use std::mem; use std::path::PathBuf; use std::sync::Arc; -use util::RwLock; +use parking_lot::RwLock; use jsonrpc_http_server::{self as http, hyper, Origin}; diff --git a/dapps/src/tests/helpers/fetch.rs b/dapps/src/tests/helpers/fetch.rs index e6e875c51..853d6857e 100644 --- a/dapps/src/tests/helpers/fetch.rs +++ b/dapps/src/tests/helpers/fetch.rs @@ -16,7 +16,7 @@ use std::{io, thread, time}; use std::sync::{atomic, mpsc, Arc}; -use util::Mutex; +use parking_lot::Mutex; use futures::{self, Future}; use fetch::{self, Fetch}; diff --git a/dapps/src/tests/helpers/registrar.rs b/dapps/src/tests/helpers/registrar.rs index d7890675b..41f296f8d 100644 --- a/dapps/src/tests/helpers/registrar.rs +++ b/dapps/src/tests/helpers/registrar.rs @@ -20,7 +20,9 @@ use std::collections::HashMap; use rustc_hex::FromHex; use hash_fetch::urlhint::ContractClient; -use util::{Bytes, Address, Mutex, H256, ToPretty}; +use bigint::hash::H256; +use util::{Bytes, Address, ToPretty}; +use parking_lot::Mutex; const REGISTRAR: &'static str = "8e4e9b13d4b45cb0befc93c3061b1408f67316b2"; const URLHINT: &'static str = "deadbeefcafe0000000000000000000000000000"; diff --git a/ethash/Cargo.toml b/ethash/Cargo.toml index 87069d6f8..aa415c1f9 100644 --- a/ethash/Cargo.toml +++ b/ethash/Cargo.toml @@ -7,10 +7,10 @@ authors = ["Parity Technologies "] [dependencies] log = "0.3" -sha3 = { path = "../util/sha3" } +hash = { path = "../util/hash" } primal = "0.2.3" parking_lot = "0.4" crunchy = "0.1.0" [features] -benches = [] \ No newline at end of file +benches = [] diff --git a/ethash/src/compute.rs b/ethash/src/compute.rs index ab2c758df..9eb042147 100644 --- a/ethash/src/compute.rs +++ b/ethash/src/compute.rs @@ -23,7 +23,7 @@ use primal::is_prime; use std::cell::Cell; use std::mem; use std::ptr; -use sha3; +use hash; use std::slice; use std::path::{Path, PathBuf}; use std::io::{self, Read, Write}; @@ -200,7 +200,7 @@ impl SeedHashCompute { #[inline] pub fn resume_compute_seedhash(mut hash: H256, start_epoch: u64, end_epoch: u64) -> H256 { for _ in start_epoch..end_epoch { - unsafe { sha3::sha3_256(hash[..].as_mut_ptr(), 32, hash[..].as_ptr(), 32) }; + unsafe { hash::keccak_256(hash[..].as_mut_ptr(), 32, hash[..].as_ptr(), 32) }; } hash } @@ -214,14 +214,14 @@ fn fnv_hash(x: u32, y: u32) -> u32 { return x.wrapping_mul(FNV_PRIME) ^ y; } -fn sha3_512(input: &[u8], output: &mut [u8]) { - unsafe { sha3::sha3_512(output.as_mut_ptr(), output.len(), input.as_ptr(), input.len()) }; +fn keccak_512(input: &[u8], output: &mut [u8]) { + unsafe { hash::keccak_512(output.as_mut_ptr(), output.len(), input.as_ptr(), input.len()) }; } -fn sha3_512_inplace(input: &mut [u8]) { - // This is safe since `sha3_*` uses an internal buffer and copies the result to the output. This +fn keccak_512_inplace(input: &mut [u8]) { + // This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This // means that we can reuse the input buffer for both input and output. - unsafe { sha3::sha3_512(input.as_mut_ptr(), input.len(), input.as_ptr(), input.len()) }; + unsafe { hash::keccak_512(input.as_mut_ptr(), input.len(), input.as_ptr(), input.len()) }; } fn get_cache_size(block_number: u64) -> usize { @@ -250,23 +250,23 @@ fn get_data_size(block_number: u64) -> usize { /// Boundary recovered from mix hash pub fn quick_get_difficulty(header_hash: &H256, nonce: u64, mix_hash: &H256) -> H256 { unsafe { - // This is safe - the `sha3_512` call below reads the first 40 bytes (which we explicitly set + // This is safe - the `keccak_512` call below reads the first 40 bytes (which we explicitly set // with two `copy_nonoverlapping` calls) but writes the first 64, and then we explicitly write - // the next 32 bytes before we read the whole thing with `sha3_256`. + // the next 32 bytes before we read the whole thing with `keccak_256`. // // This cannot be elided by the compiler as it doesn't know the implementation of - // `sha3_512`. + // `keccak_512`. let mut buf: [u8; 64 + 32] = mem::uninitialized(); ptr::copy_nonoverlapping(header_hash.as_ptr(), buf.as_mut_ptr(), 32); ptr::copy_nonoverlapping(mem::transmute(&nonce), buf[32..].as_mut_ptr(), 8); - sha3::sha3_512(buf.as_mut_ptr(), 64, buf.as_ptr(), 40); + hash::keccak_512(buf.as_mut_ptr(), 64, buf.as_ptr(), 40); ptr::copy_nonoverlapping(mix_hash.as_ptr(), buf[64..].as_mut_ptr(), 32); - // This is initialized in `sha3_256` + // This is initialized in `keccak_256` let mut hash: [u8; 32] = mem::uninitialized(); - sha3::sha3_256(hash.as_mut_ptr(), hash.len(), buf.as_ptr(), buf.len()); + hash::keccak_256(hash.as_mut_ptr(), hash.len(), buf.as_ptr(), buf.len()); hash } @@ -320,7 +320,7 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64) half_mix: unsafe { // Pack `header_hash` and `nonce` together // We explicitly write the first 40 bytes, leaving the last 24 as uninitialized. Then - // `sha3_512` reads the first 40 bytes (4th parameter) and overwrites the entire array, + // `keccak_512` reads the first 40 bytes (4th parameter) and overwrites the entire array, // leaving it fully initialized. let mut out: [u8; NODE_BYTES] = mem::uninitialized(); @@ -335,8 +335,8 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64) mem::size_of::(), ); - // compute sha3-512 hash and replicate across mix - sha3::sha3_512( + // compute keccak-512 hash and replicate across mix + hash::keccak_512( out.as_mut_ptr(), NODE_BYTES, out.as_ptr(), @@ -427,10 +427,10 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64) let value: H256 = unsafe { // We can interpret the buffer as an array of `u8`s, since it's `repr(C)`. let read_ptr: *const u8 = mem::transmute(&buf); - // We overwrite the second half since `sha3_256` has an internal buffer and so allows + // We overwrite the second half since `keccak_256` has an internal buffer and so allows // overlapping arrays as input. let write_ptr: *mut u8 = mem::transmute(&mut buf.compress_bytes); - sha3::sha3_256( + hash::keccak_256( write_ptr, buf.compress_bytes.len(), read_ptr, @@ -450,7 +450,7 @@ fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node { let mut ret = cache[node_index as usize % num_parent_nodes].clone(); ret.as_words_mut()[0] ^= node_index; - sha3_512_inplace(&mut ret.bytes); + keccak_512_inplace(&mut ret.bytes); debug_assert_eq!(NODE_WORDS, 16); for i in 0..ETHASH_DATASET_PARENTS as u32 { @@ -467,7 +467,7 @@ fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node { } } - sha3_512_inplace(&mut ret.bytes); + keccak_512_inplace(&mut ret.bytes); ret } @@ -485,9 +485,9 @@ fn light_new>(cache_dir: T, block_number: u64) -> Light { // Use uninit instead of unnecessarily writing `size_of::() * num_nodes` 0s nodes.set_len(num_nodes); - sha3_512(&seedhash[0..32], &mut nodes.get_unchecked_mut(0).bytes); + keccak_512(&seedhash[0..32], &mut nodes.get_unchecked_mut(0).bytes); for i in 1..num_nodes { - sha3::sha3_512(nodes.get_unchecked_mut(i).bytes.as_mut_ptr(), NODE_BYTES, nodes.get_unchecked(i - 1).bytes.as_ptr(), NODE_BYTES); + hash::keccak_512(nodes.get_unchecked_mut(i).bytes.as_mut_ptr(), NODE_BYTES, nodes.get_unchecked(i - 1).bytes.as_ptr(), NODE_BYTES); } debug_assert_eq!(NODE_WORDS, 16); @@ -504,7 +504,7 @@ fn light_new>(cache_dir: T, block_number: u64) -> Light { } } - sha3_512(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes); + keccak_512(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes); } } } diff --git a/ethash/src/lib.rs b/ethash/src/lib.rs index 9112546c4..f88e8a3f5 100644 --- a/ethash/src/lib.rs +++ b/ethash/src/lib.rs @@ -20,7 +20,7 @@ #![cfg_attr(feature = "benches", feature(test))] extern crate primal; -extern crate sha3; +extern crate hash; extern crate parking_lot; #[macro_use] diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 71d91df06..d96cc2dea 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -11,6 +11,7 @@ build = "build.rs" "ethcore-ipc-codegen" = { path = "../ipc/codegen" } [dependencies] +ansi_term = "0.9" bit-set = "0.4" bloomchain = "0.1" bn = { git = "https://github.com/paritytech/bn" } @@ -29,6 +30,7 @@ ethcore-ipc-nano = { path = "../ipc/nano" } ethcore-logger = { path = "../logger" } ethcore-stratum = { path = "../stratum" } ethcore-util = { path = "../util" } +ethcore-bigint = { path = "../util/bigint" } ethjson = { path = "../json" } ethkey = { path = "../ethkey" } ethstore = { path = "../ethstore" } @@ -45,6 +47,7 @@ lru-cache = "0.1.0" native-contracts = { path = "native_contracts" } num = "0.1" num_cpus = "1.2" +parking_lot = "0.4" price-info = { path = "../price-info" } rand = "0.3" rlp = { path = "../util/rlp" } @@ -60,6 +63,10 @@ table = { path = "../util/table" } bloomable = { path = "../util/bloomable" } vm = { path = "vm" } wasm = { path = "wasm" } +hash = { path = "../util/hash" } +triehash = { path = "../util/triehash" } +semantic_version = { path = "../util/semantic_version" } +unexpected = { path = "../util/unexpected" } [dev-dependencies] native-contracts = { path = "native_contracts", features = ["test_contracts"] } diff --git a/ethcore/evm/Cargo.toml b/ethcore/evm/Cargo.toml index c3f9c03b9..89baaadde 100644 --- a/ethcore/evm/Cargo.toml +++ b/ethcore/evm/Cargo.toml @@ -8,6 +8,7 @@ bit-set = "0.4" byteorder = "1.0" common-types = { path = "../types" } ethcore-util = { path = "../../util" } +ethcore-bigint = { path = "../../util/bigint" } evmjit = { path = "../../evmjit", optional = true } ethjson = { path = "../../json" } heapsize = "0.4" @@ -16,8 +17,10 @@ log = "0.3" rlp = { path = "../../util/rlp" } vm = { path = "../vm" } parity-wasm = "0.12" +parking_lot = "0.4" ethcore-logger = { path = "../../logger" } wasm-utils = { git = "https://github.com/paritytech/wasm-utils" } +hash = { path = "../../util/hash" } [dev-dependencies] rustc-hex = "1.0" diff --git a/ethcore/evm/src/benches/mod.rs b/ethcore/evm/src/benches/mod.rs index ecb7d379a..c87fda7bb 100644 --- a/ethcore/evm/src/benches/mod.rs +++ b/ethcore/evm/src/benches/mod.rs @@ -24,6 +24,8 @@ extern crate test; use self::test::{Bencher, black_box}; +use bigint::prelude::U256; +use bigint::hash::H256; use util::*; use vm::ActionParams; use evm::{self, Factory, VMType}; diff --git a/ethcore/evm/src/evm.rs b/ethcore/evm/src/evm.rs index d593143a6..9eedb3c51 100644 --- a/ethcore/evm/src/evm.rs +++ b/ethcore/evm/src/evm.rs @@ -17,7 +17,8 @@ //! Evm interface. use std::{ops, cmp, fmt}; -use util::{U128, U256, U512}; + +use bigint::prelude::{U128, U256, U512}; use vm::{Ext, Result, ReturnData, GasLeft, Error}; /// Finalization result. Gas Left: either it is a known value, or it needs to be computed by processing @@ -149,7 +150,7 @@ impl CostType for usize { #[cfg(test)] mod tests { - use util::U256; + use bigint::prelude::U256; use super::CostType; #[test] diff --git a/ethcore/evm/src/factory.rs b/ethcore/evm/src/factory.rs index 20275dbff..3e9ee0e37 100644 --- a/ethcore/evm/src/factory.rs +++ b/ethcore/evm/src/factory.rs @@ -18,7 +18,7 @@ //! use std::sync::Arc; use vm::Vm; -use util::U256; +use bigint::prelude::U256; use super::interpreter::SharedCache; use super::vmtype::VMType; diff --git a/ethcore/evm/src/interpreter/gasometer.rs b/ethcore/evm/src/interpreter/gasometer.rs index 161c7db39..082868b95 100644 --- a/ethcore/evm/src/interpreter/gasometer.rs +++ b/ethcore/evm/src/interpreter/gasometer.rs @@ -15,7 +15,8 @@ // along with Parity. If not, see . use std::cmp; -use util::*; +use bigint::prelude::U256; +use bigint::hash::H256; use super::u256_to_address; use {evm, vm}; diff --git a/ethcore/evm/src/interpreter/informant.rs b/ethcore/evm/src/interpreter/informant.rs index a2a2c84c5..ac2505489 100644 --- a/ethcore/evm/src/interpreter/informant.rs +++ b/ethcore/evm/src/interpreter/informant.rs @@ -43,7 +43,7 @@ mod inner { use evm::instructions::{Instruction, InstructionInfo, INSTRUCTIONS}; use evm::{CostType}; - use util::U256; + use bigint::prelude::U256; macro_rules! evm_debug { ($x: expr) => { diff --git a/ethcore/evm/src/interpreter/memory.rs b/ethcore/evm/src/interpreter/memory.rs index 9aa9babc7..0369992cb 100644 --- a/ethcore/evm/src/interpreter/memory.rs +++ b/ethcore/evm/src/interpreter/memory.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::U256; +use bigint::prelude::U256; use vm::ReturnData; const MAX_RETURN_WASTE_BYTES: usize = 16384; @@ -134,7 +134,7 @@ impl Memory for Vec { #[cfg(test)] mod tests { - use util::U256; + use bigint::prelude::U256; use super::Memory; #[test] diff --git a/ethcore/evm/src/interpreter/mod.rs b/ethcore/evm/src/interpreter/mod.rs index cb8fe3a52..a59a37db0 100644 --- a/ethcore/evm/src/interpreter/mod.rs +++ b/ethcore/evm/src/interpreter/mod.rs @@ -26,6 +26,9 @@ mod shared_cache; use std::marker::PhantomData; use std::{cmp, mem}; use std::sync::Arc; +use hash::keccak; +use bigint::prelude::{U256, U512}; +use bigint::hash::H256; use vm::{ self, ActionParams, ActionValue, CallType, MessageCallResult, @@ -180,7 +183,7 @@ impl vm::Vm for Interpreter { match result { InstructionResult::JumpToPosition(position) => { if valid_jump_destinations.is_none() { - let code_hash = params.code_hash.clone().unwrap_or_else(|| code.sha3()); + let code_hash = params.code_hash.clone().unwrap_or_else(|| keccak(code.as_ref())); valid_jump_destinations = Some(self.cache.jump_destinations(&code_hash, code)); } let jump_destinations = valid_jump_destinations.as_ref().expect("jump_destinations are initialized on first jump; qed"); @@ -464,8 +467,8 @@ impl Interpreter { instructions::SHA3 => { let offset = stack.pop_back(); let size = stack.pop_back(); - let sha3 = self.mem.read_slice(offset, size).sha3(); - stack.push(U256::from(&*sha3)); + let k = keccak(self.mem.read_slice(offset, size)); + stack.push(U256::from(&*k)); }, instructions::SLOAD => { let key = H256::from(&stack.pop_back()); @@ -880,7 +883,7 @@ mod tests { use rustc_hex::FromHex; use vmtype::VMType; use factory::Factory; - use vm::{self, ActionParams, ActionValue}; + use vm::{ActionParams, ActionValue}; use vm::tests::{FakeExt, test_finalize}; #[test] diff --git a/ethcore/evm/src/interpreter/shared_cache.rs b/ethcore/evm/src/interpreter/shared_cache.rs index b582ce8a5..7b3b33b70 100644 --- a/ethcore/evm/src/interpreter/shared_cache.rs +++ b/ethcore/evm/src/interpreter/shared_cache.rs @@ -15,9 +15,10 @@ // along with Parity. If not, see . use std::sync::Arc; +use hash::KECCAK_EMPTY; use heapsize::HeapSizeOf; -use util::{H256, Mutex}; -use util::sha3::*; +use bigint::hash::H256; +use parking_lot::Mutex; use util::cache::MemoryLruCache; use bit_set::BitSet; use super::super::instructions; @@ -50,7 +51,7 @@ impl SharedCache { /// Get jump destinations bitmap for a contract. pub fn jump_destinations(&self, code_hash: &H256, code: &[u8]) -> Arc { - if code_hash == &SHA3_EMPTY { + if code_hash == &KECCAK_EMPTY { return Self::find_jump_destinations(code); } diff --git a/ethcore/evm/src/jit.rs b/ethcore/evm/src/jit.rs index 22262cbb6..d94bb7b8e 100644 --- a/ethcore/evm/src/jit.rs +++ b/ethcore/evm/src/jit.rs @@ -15,6 +15,8 @@ // along with Parity. If not, see . //! Just in time compiler execution environment. +use bigint::prelude::U256; +use bigint::hash::H256; use util::*; use evmjit; use evm::{self, GasLeft}; diff --git a/ethcore/evm/src/lib.rs b/ethcore/evm/src/lib.rs index 77d4f7c04..1acb57400 100644 --- a/ethcore/evm/src/lib.rs +++ b/ethcore/evm/src/lib.rs @@ -20,13 +20,16 @@ extern crate byteorder; extern crate bit_set; extern crate common_types as types; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate ethjson; extern crate rlp; extern crate parity_wasm; +extern crate parking_lot; extern crate wasm_utils; extern crate ethcore_logger; extern crate heapsize; extern crate vm; +extern crate hash; #[macro_use] extern crate lazy_static; diff --git a/ethcore/evm/src/tests.rs b/ethcore/evm/src/tests.rs index 7263d1779..3eb10bc6a 100644 --- a/ethcore/evm/src/tests.rs +++ b/ethcore/evm/src/tests.rs @@ -20,6 +20,8 @@ use std::hash::Hash; use std::sync::Arc; use std::collections::{HashMap, HashSet}; use rustc_hex::FromHex; +use bigint::prelude::U256; +use bigint::hash::H256; use util::*; use vm::{self, ActionParams, ActionValue}; use vm::tests::{FakeExt, FakeCall, FakeCallType, test_finalize}; diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 5ee90567d..a96a7fb76 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -14,6 +14,7 @@ build = "build.rs" log = "0.3" ethcore = { path = ".."} ethcore-util = { path = "../../util" } +ethcore-bigint = { path = "../../util/bigint" } ethcore-network = { path = "../../util/network" } ethcore-io = { path = "../../util/io" } ethcore-ipc = { path = "../../ipc/rpc", optional = true } @@ -31,7 +32,10 @@ itertools = "0.5" bincode = "0.8.0" serde = "1.0" serde_derive = "1.0" +parking_lot = "0.4" stats = { path = "../../util/stats" } +hash = { path = "../../util/hash" } +triehash = { path = "../../util/triehash" } [features] default = [] diff --git a/ethcore/light/src/cache.rs b/ethcore/light/src/cache.rs index ab416e9b3..65c3a4d0f 100644 --- a/ethcore/light/src/cache.rs +++ b/ethcore/light/src/cache.rs @@ -27,7 +27,8 @@ use ethcore::receipt::Receipt; use stats::Corpus; use time::{SteadyTime, Duration}; use heapsize::HeapSizeOf; -use util::{U256, H256}; +use bigint::prelude::U256; +use bigint::hash::H256; use util::cache::MemoryLruCache; /// Configuration for how much data to cache. diff --git a/ethcore/light/src/cht.rs b/ethcore/light/src/cht.rs index 7f2ccc3a8..d4d1f2294 100644 --- a/ethcore/light/src/cht.rs +++ b/ethcore/light/src/cht.rs @@ -21,7 +21,9 @@ //! we discarded. use ethcore::ids::BlockId; -use util::{Bytes, H256, U256, HashDB, MemoryDB}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::{Bytes, HashDB, MemoryDB}; use util::trie::{self, TrieMut, TrieDBMut, Trie, TrieDB, Recorder}; use rlp::{RlpStream, UntrustedRlp}; @@ -130,7 +132,7 @@ pub fn compute_root(cht_num: u64, iterable: I) -> Option } if v.len() == SIZE as usize { - Some(::util::triehash::trie_root(v)) + Some(::triehash::trie_root(v)) } else { None } diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 554d0f005..3828e6954 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -37,11 +37,12 @@ use ethcore::ids::BlockId; use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp, UntrustedRlp}; use heapsize::HeapSizeOf; -use util::{H256, U256, RwLock}; +use bigint::prelude::U256; +use bigint::hash::H256; use util::kvdb::{DBTransaction, KeyValueDB}; use cache::Cache; -use util::Mutex; +use parking_lot::{Mutex, RwLock}; use smallvec::SmallVec; @@ -487,7 +488,7 @@ impl HeaderChain { /// Get the genesis hash. pub fn genesis_hash(&self) -> H256 { - ::util::Hashable::sha3(&self.genesis_header) + self.genesis_header.hash() } /// Get the best block's data. @@ -555,7 +556,7 @@ mod tests { use cache::Cache; use time::Duration; - use util::Mutex; + use parking_lot::Mutex; fn make_db() -> Arc<::util::KeyValueDB> { Arc::new(::util::kvdb::in_memory(0)) diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index ee270c977..2067a23c2 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -30,8 +30,10 @@ use ethcore::spec::Spec; use ethcore::service::ClientIoMessage; use ethcore::encoded; use io::IoChannel; +use parking_lot::{Mutex, RwLock}; +use bigint::prelude::U256; +use bigint::hash::H256; -use util::{H256, U256, Mutex, RwLock}; use util::kvdb::{KeyValueDB, CompactionProfile}; use self::header_chain::{AncestryIter, HeaderChain}; diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index 83949a2f1..99dccc999 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -28,7 +28,7 @@ use io::{IoContext, IoError, IoHandler, IoService}; use util::kvdb::{Database, DatabaseConfig}; use cache::Cache; -use util::Mutex; +use parking_lot::Mutex; use super::{Client, Config as ClientConfig}; @@ -117,11 +117,11 @@ mod tests { use super::Service; use devtools::RandomTempPath; use ethcore::spec::Spec; - + use std::sync::Arc; use cache::Cache; use time::Duration; - use util::Mutex; + use parking_lot::Mutex; #[test] fn it_works() { diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index 780ea3043..07f862a1a 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -70,6 +70,7 @@ extern crate bincode; extern crate ethcore_io as io; extern crate ethcore_network as network; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate ethcore; extern crate evm; extern crate heapsize; @@ -77,6 +78,7 @@ extern crate futures; extern crate itertools; extern crate rand; extern crate rlp; +extern crate parking_lot; #[macro_use] extern crate rlp_derive; extern crate serde; @@ -84,6 +86,8 @@ extern crate smallvec; extern crate stats; extern crate time; extern crate vm; +extern crate hash; +extern crate triehash; #[cfg(feature = "ipc")] extern crate ethcore_ipc as ipc; diff --git a/ethcore/light/src/net/load_timer.rs b/ethcore/light/src/net/load_timer.rs index a169b86d9..7b78fc693 100644 --- a/ethcore/light/src/net/load_timer.rs +++ b/ethcore/light/src/net/load_timer.rs @@ -32,7 +32,7 @@ use request::{CompleteRequest, Kind}; use bincode; use time; -use util::{RwLock, Mutex}; +use parking_lot::{RwLock, Mutex}; /// Number of time periods samples should be kept for. pub const MOVING_SAMPLE_SIZE: usize = 256; diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 046dc68bd..cccb32458 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -23,8 +23,10 @@ use ethcore::transaction::UnverifiedTransaction; use io::TimerToken; use network::{HostInfo, NetworkProtocolHandler, NetworkContext, PeerId}; use rlp::{RlpStream, UntrustedRlp}; -use util::hash::H256; -use util::{DBValue, Mutex, RwLock, U256}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::DBValue; +use parking_lot::{Mutex, RwLock}; use time::{Duration, SteadyTime}; use std::collections::{HashMap, HashSet}; @@ -287,7 +289,7 @@ pub type PeerMap = HashMap>; mod id_guard { use network::PeerId; - use util::RwLockReadGuard; + use parking_lot::RwLockReadGuard; use super::{PeerMap, ReqId}; diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index 950fa24d8..39eb33106 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -30,7 +30,7 @@ use request::{self, Request}; use super::error::Error; use rlp::*; -use util::U256; +use bigint::prelude::U256; use time::{Duration, SteadyTime}; /// Credits value. diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index 35182f0bf..e83c33bff 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -27,7 +27,7 @@ use std::iter::FromIterator; use request::Request; use request::NetworkRequests as Requests; use net::{timeout, ReqId}; -use util::U256; +use bigint::prelude::U256; use time::{Duration, SteadyTime}; diff --git a/ethcore/light/src/net/status.rs b/ethcore/light/src/net/status.rs index 732826430..b83b831da 100644 --- a/ethcore/light/src/net/status.rs +++ b/ethcore/light/src/net/status.rs @@ -17,7 +17,8 @@ //! Peer status and capabilities. use rlp::{DecoderError, Encodable, Decodable, RlpStream, UntrustedRlp}; -use util::{H256, U256}; +use bigint::prelude::U256; +use bigint::hash::H256; use super::request_credits::FlowParams; @@ -373,7 +374,8 @@ pub fn write_announcement(announcement: &Announcement) -> Vec { mod tests { use super::*; use super::super::request_credits::FlowParams; - use util::{U256, H256}; + use bigint::prelude::U256; + use bigint::hash::H256; use rlp::{RlpStream, UntrustedRlp}; #[test] diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index ef86c08f5..8e928dd22 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -32,7 +32,9 @@ use request; use request::*; use rlp::*; -use util::{Address, H256, U256}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::Address; use std::sync::Arc; @@ -606,7 +608,7 @@ fn id_guard() { pending_requests.insert(req_id_1, req.clone(), 0.into(), ::time::SteadyTime::now()); pending_requests.insert(req_id_2, req, 1.into(), ::time::SteadyTime::now()); - proto.peers.write().insert(peer_id, ::util::Mutex::new(Peer { + proto.peers.write().insert(peer_id, ::parking_lot::Mutex::new(Peer { local_credits: flow_params.create_credits(), status: status(provider.client.chain_info()), capabilities: capabilities.clone(), diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 42469725b..d67b7dc4e 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -27,7 +27,7 @@ use ethcore::executed::{Executed, ExecutionError}; use futures::{Async, Poll, Future}; use futures::sync::oneshot::{self, Sender, Receiver, Canceled}; use network::PeerId; -use util::{RwLock, Mutex}; +use parking_lot::{RwLock, Mutex}; use net::{ self, Handler, PeerStatus, Status, Capabilities, diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 6a2201349..9f03955da 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -25,13 +25,16 @@ use ethcore::receipt::Receipt; use ethcore::state::{self, ProvedExecution}; use ethcore::transaction::SignedTransaction; use vm::EnvInfo; +use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY, KECCAK_EMPTY_LIST_RLP, keccak}; use request::{self as net_request, IncompleteRequest, CompleteRequest, Output, OutputKind, Field}; use rlp::{RlpStream, UntrustedRlp}; -use util::{Address, Bytes, DBValue, HashDB, Mutex, H256, U256}; +use bigint::prelude::U256; +use bigint::hash::H256; +use parking_lot::Mutex; +use util::{Address, Bytes, DBValue, HashDB}; use util::memorydb::MemoryDB; -use util::sha3::{Hashable, SHA3_NULL_RLP, SHA3_EMPTY, SHA3_EMPTY_LIST_RLP}; use util::trie::{Trie, TrieDB, TrieError}; const SUPPLIED_MATCHES: &'static str = "supplied responses always match produced requests; enforced by `check_response`; qed"; @@ -276,7 +279,7 @@ impl From for CheckedRequest { Request::Account(req) => { let net_req = net_request::IncompleteAccountRequest { block_hash: req.header.field(), - address_hash: ::util::Hashable::sha3(&req.address).into(), + address_hash: ::hash::keccak(&req.address).into(), }; CheckedRequest::Account(req, net_req) } @@ -366,7 +369,7 @@ impl CheckedRequest { } CheckedRequest::Receipts(ref check, ref req) => { // empty transactions -> no receipts - if check.0.as_ref().ok().map_or(false, |hdr| hdr.receipts_root() == SHA3_NULL_RLP) { + if check.0.as_ref().ok().map_or(false, |hdr| hdr.receipts_root() == KECCAK_NULL_RLP) { return Some(Response::Receipts(Vec::new())); } @@ -377,7 +380,7 @@ impl CheckedRequest { CheckedRequest::Body(ref check, ref req) => { // check for empty body. if let Some(hdr) = check.0.as_ref().ok() { - if hdr.transactions_root() == SHA3_NULL_RLP && hdr.uncles_hash() == SHA3_EMPTY_LIST_RLP { + if hdr.transactions_root() == KECCAK_NULL_RLP && hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP { let mut stream = RlpStream::new_list(3); stream.append_raw(hdr.rlp().as_raw(), 1); stream.begin_list(0); @@ -422,7 +425,7 @@ impl CheckedRequest { }) } CheckedRequest::Code(_, ref req) => { - if req.code_hash.as_ref().map_or(false, |&h| h == SHA3_EMPTY) { + if req.code_hash.as_ref().map_or(false, |&h| h == KECCAK_EMPTY) { Some(Response::Code(Vec::new())) } else { None @@ -571,8 +574,8 @@ impl net_request::ResponseLike for Response { match *self { Response::HeaderProof((ref hash, _)) => f(0, Output::Hash(*hash)), Response::Account(None) => { - f(0, Output::Hash(SHA3_EMPTY)); // code hash - f(1, Output::Hash(SHA3_NULL_RLP)); // storage root. + f(0, Output::Hash(KECCAK_EMPTY)); // code hash + f(1, Output::Hash(KECCAK_NULL_RLP)); // storage root. } Response::Account(Some(ref acc)) => { f(0, Output::Hash(acc.code_hash)); @@ -688,7 +691,7 @@ impl HeaderByHash { }; let header = headers.get(0).ok_or(Error::Empty)?; - let hash = header.sha3(); + let hash = header.hash(); match hash == expected_hash { true => { cache.lock().insert_block_header(hash, header.clone()); @@ -708,12 +711,12 @@ impl Body { pub fn check_response(&self, cache: &Mutex<::cache::Cache>, body: &encoded::Body) -> Result { // check the integrity of the the body against the header let header = self.0.as_ref()?; - let tx_root = ::util::triehash::ordered_trie_root(body.rlp().at(0).iter().map(|r| r.as_raw().to_vec())); + let tx_root = ::triehash::ordered_trie_root(body.rlp().at(0).iter().map(|r| r.as_raw().to_vec())); if tx_root != header.transactions_root() { return Err(Error::WrongTrieRoot(header.transactions_root(), tx_root)); } - let uncles_hash = body.rlp().at(1).as_raw().sha3(); + let uncles_hash = keccak(body.rlp().at(1).as_raw()); if uncles_hash != header.uncles_hash() { return Err(Error::WrongHash(header.uncles_hash(), uncles_hash)); } @@ -738,7 +741,7 @@ impl BlockReceipts { /// Check a response with receipts against the stored header. pub fn check_response(&self, cache: &Mutex<::cache::Cache>, receipts: &[Receipt]) -> Result, Error> { let receipts_root = self.0.as_ref()?.receipts_root(); - let found_root = ::util::triehash::ordered_trie_root(receipts.iter().map(|r| ::rlp::encode(r).into_vec())); + let found_root = ::triehash::ordered_trie_root(receipts.iter().map(|r| ::rlp::encode(r).into_vec())); match receipts_root == found_root { true => { @@ -768,7 +771,7 @@ impl Account { let mut db = MemoryDB::new(); for node in proof { db.insert(&node[..]); } - match TrieDB::new(&db, &state_root).and_then(|t| t.get(&self.address.sha3()))? { + match TrieDB::new(&db, &state_root).and_then(|t| t.get(&keccak(&self.address)))? { Some(val) => { let rlp = UntrustedRlp::new(&val); Ok(Some(BasicAccount { @@ -800,7 +803,7 @@ impl Code { code_hash: &H256, code: &[u8] ) -> Result, Error> { - let found_hash = code.sha3(); + let found_hash = keccak(code); if &found_hash == code_hash { Ok(code.to_vec()) } else { @@ -850,9 +853,12 @@ impl TransactionProof { #[cfg(test)] mod tests { use super::*; - use util::{MemoryDB, Address, Mutex, H256}; + use bigint::hash::H256; + use util::{MemoryDB, Address}; + use parking_lot::Mutex; use util::trie::{Trie, TrieMut, SecTrieDB, SecTrieDBMut}; use util::trie::recorder::Recorder; + use hash::keccak; use ethcore::client::{BlockChainClient, TestBlockChainClient, EachBlockWith}; use ethcore::header::Header; @@ -933,7 +939,7 @@ mod tests { }).collect::>(); let mut header = Header::new(); - let receipts_root = ::util::triehash::ordered_trie_root( + let receipts_root = ::triehash::ordered_trie_root( receipts.iter().map(|x| ::rlp::encode(x).into_vec()) ); @@ -998,7 +1004,7 @@ mod tests { #[test] fn check_code() { let code = vec![1u8; 256]; - let code_hash = ::util::Hashable::sha3(&code); + let code_hash = keccak(&code); let header = Header::new(); let req = Code { header: encoded::Header::new(::rlp::encode(&header).into_vec()).into(), diff --git a/ethcore/light/src/on_demand/tests.rs b/ethcore/light/src/on_demand/tests.rs index 10c4ceae5..e114a7902 100644 --- a/ethcore/light/src/on_demand/tests.rs +++ b/ethcore/light/src/on_demand/tests.rs @@ -22,7 +22,8 @@ use ethcore::header::{Header, Seal}; use futures::Future; use network::{PeerId, NodeId}; use net::*; -use util::{H256, Mutex}; +use bigint::hash::H256; +use parking_lot::Mutex; use time::Duration; use ::request::{self as basic_request, Response}; diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs index 3632783ca..6db81dcdd 100644 --- a/ethcore/light/src/provider.rs +++ b/ethcore/light/src/provider.rs @@ -24,7 +24,8 @@ use ethcore::client::{BlockChainClient, ProvingBlockChainClient}; use ethcore::transaction::PendingTransaction; use ethcore::ids::BlockId; use ethcore::encoded; -use util::{RwLock, H256}; +use bigint::hash::H256; +use parking_lot::RwLock; use cht::{self, BlockInfo}; use client::{LightChainClient, AsLightClient}; diff --git a/ethcore/light/src/transaction_queue.rs b/ethcore/light/src/transaction_queue.rs index b0e7483a3..090919245 100644 --- a/ethcore/light/src/transaction_queue.rs +++ b/ethcore/light/src/transaction_queue.rs @@ -28,7 +28,9 @@ use std::collections::hash_map::Entry; use ethcore::error::{TransactionError, TransactionImportResult}; use ethcore::transaction::{Condition, PendingTransaction, SignedTransaction}; -use util::{Address, U256, H256, H256FastMap}; +use bigint::prelude::U256; +use bigint::hash::{H256, H256FastMap}; +use util::Address; // Knowledge of an account's current nonce. #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/ethcore/light/src/types/request/builder.rs b/ethcore/light/src/types/request/builder.rs index 0b413677d..74503f54f 100644 --- a/ethcore/light/src/types/request/builder.rs +++ b/ethcore/light/src/types/request/builder.rs @@ -211,7 +211,7 @@ impl DerefMut for Requests { mod tests { use request::*; use super::RequestBuilder; - use util::H256; + use bigint::hash::H256; #[test] fn all_scalar() { diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 51f916b15..38e736673 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -17,7 +17,7 @@ //! Light protocol request types. use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; -use util::H256; +use bigint::hash::H256; mod builder; @@ -760,7 +760,9 @@ pub mod header { pub mod header_proof { use super::{Field, NoSuchOutput, OutputKind, Output}; use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; - use util::{Bytes, U256, H256}; + use bigint::hash::H256; + use bigint::prelude::U256; + use util::Bytes; /// Potentially incomplete header proof request. #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] @@ -854,7 +856,7 @@ pub mod header_proof { /// Request and response for transaction index. pub mod transaction_index { use super::{Field, NoSuchOutput, OutputKind, Output}; - use util::H256; + use bigint::hash::H256; /// Potentially incomplete transaction index request. #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] @@ -932,7 +934,7 @@ pub mod transaction_index { pub mod block_receipts { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::receipt::Receipt; - use util::H256; + use bigint::hash::H256; /// Potentially incomplete block receipts request. #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] @@ -1001,7 +1003,7 @@ pub mod block_body { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::encoded; use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; - use util::H256; + use bigint::hash::H256; /// Potentially incomplete block body request. #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] @@ -1089,7 +1091,9 @@ pub mod block_body { /// A request for an account proof. pub mod account { use super::{Field, NoSuchOutput, OutputKind, Output}; - use util::{Bytes, U256, H256}; + use bigint::hash::H256; + use bigint::prelude::U256; + use util::Bytes; /// Potentially incomplete request for an account proof. #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] @@ -1188,7 +1192,8 @@ pub mod account { /// A request for a storage proof. pub mod storage { use super::{Field, NoSuchOutput, OutputKind, Output}; - use util::{Bytes, H256}; + use bigint::hash::H256; + use util::Bytes; /// Potentially incomplete request for an storage proof. #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] @@ -1296,7 +1301,8 @@ pub mod storage { /// A request for contract code. pub mod contract_code { use super::{Field, NoSuchOutput, OutputKind, Output}; - use util::{Bytes, H256}; + use bigint::hash::H256; + use util::Bytes; /// Potentially incomplete contract code request. #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] @@ -1382,7 +1388,9 @@ pub mod execution { use super::{Field, NoSuchOutput, OutputKind, Output}; use ethcore::transaction::Action; use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp}; - use util::{Bytes, Address, U256, H256, DBValue}; + use bigint::hash::H256; + use bigint::prelude::U256; + use util::{Bytes, Address, DBValue}; /// Potentially incomplete execution proof request. #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] @@ -1591,7 +1599,7 @@ mod tests { let full_req = Request::TransactionIndex(req.clone()); let res = TransactionIndexResponse { num: 1000, - hash: ::util::H256::random(), + hash: ::bigint::hash::H256::random(), index: 4, }; let full_res = Response::TransactionIndex(res.clone()); diff --git a/ethcore/native_contracts/build.rs b/ethcore/native_contracts/build.rs index e7985b388..979535057 100644 --- a/ethcore/native_contracts/build.rs +++ b/ethcore/native_contracts/build.rs @@ -29,6 +29,7 @@ const SECRETSTORE_ACL_STORAGE_ABI: &'static str = include_str!("res/secretstore_ const VALIDATOR_SET_ABI: &'static str = include_str!("res/validator_set.json"); const VALIDATOR_REPORT_ABI: &'static str = include_str!("res/validator_report.json"); const PEER_SET_ABI: &'static str = include_str!("res/peer_set.json"); +const TX_ACL_ABI: &'static str = include_str!("res/tx_acl.json"); const TEST_VALIDATOR_SET_ABI: &'static str = include_str!("res/test_validator_set.json"); @@ -55,6 +56,7 @@ fn main() { build_file("ValidatorSet", VALIDATOR_SET_ABI, "validator_set.rs"); build_file("ValidatorReport", VALIDATOR_REPORT_ABI, "validator_report.rs"); build_file("PeerSet", PEER_SET_ABI, "peer_set.rs"); + build_file("TransactAcl", TX_ACL_ABI, "tx_acl.rs"); build_test_contracts(); } diff --git a/ethcore/native_contracts/generator/src/lib.rs b/ethcore/native_contracts/generator/src/lib.rs index 996ee4969..8de7555d2 100644 --- a/ethcore/native_contracts/generator/src/lib.rs +++ b/ethcore/native_contracts/generator/src/lib.rs @@ -302,7 +302,8 @@ fn detokenize(name: &str, output_type: ParamType) -> String { } ParamType::Uint(width) => { let read_uint = match width { - 8 | 16 | 32 | 64 => format!("bigint::prelude::U256(u).low_u64() as u{}", width), + 8 => "u[31] as u8".into(), + 16 | 32 | 64 => format!("BigEndian::read_u{}(&u[{}..])", width, 32 - (width / 8)), _ => format!("bigint::prelude::U{}::from(&u[..])", width), }; diff --git a/ethcore/native_contracts/res/tx_acl.json b/ethcore/native_contracts/res/tx_acl.json new file mode 100644 index 000000000..cff9956de --- /dev/null +++ b/ethcore/native_contracts/res/tx_acl.json @@ -0,0 +1 @@ +[{"constant":false,"inputs":[{"name":"sender","type":"address"}],"name":"allowedTxTypes","outputs":[{"name":"","type":"uint32"}],"payable":false,"stateMutability":"nonpayable","type":"function"}] diff --git a/ethcore/native_contracts/src/lib.rs b/ethcore/native_contracts/src/lib.rs index 733dea80e..c37a13504 100644 --- a/ethcore/native_contracts/src/lib.rs +++ b/ethcore/native_contracts/src/lib.rs @@ -31,6 +31,7 @@ mod secretstore_acl_storage; mod validator_set; mod validator_report; mod peer_set; +mod tx_acl; pub mod test_contracts; @@ -42,3 +43,4 @@ pub use self::secretstore_acl_storage::SecretStoreAclStorage; pub use self::validator_set::ValidatorSet; pub use self::validator_report::ValidatorReport; pub use self::peer_set::PeerSet; +pub use self::tx_acl::TransactAcl; diff --git a/util/sha3/src/lib.rs b/ethcore/native_contracts/src/tx_acl.rs similarity index 79% rename from util/sha3/src/lib.rs rename to ethcore/native_contracts/src/tx_acl.rs index 77ecd84c3..1ab4c8e5d 100644 --- a/util/sha3/src/lib.rs +++ b/ethcore/native_contracts/src/tx_acl.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -extern { - pub fn sha3_256(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) -> i32; - pub fn sha3_512(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) -> i32; -} +#![allow(unused_mut, unused_variables, unused_imports)] + +//! Transact permissions contract. + +include!(concat!(env!("OUT_DIR"), "/tx_acl.rs")); diff --git a/ethcore/node_filter/Cargo.toml b/ethcore/node_filter/Cargo.toml index e885ef1d1..2d70c3461 100644 --- a/ethcore/node_filter/Cargo.toml +++ b/ethcore/node_filter/Cargo.toml @@ -9,8 +9,10 @@ authors = ["Parity Technologies "] [dependencies] ethcore = { path = ".."} ethcore-util = { path = "../../util" } +ethcore-bigint = { path = "../../util/bigint" } ethcore-io = { path = "../../util/io" } ethcore-network = { path = "../../util/network" } native-contracts = { path = "../native_contracts" } futures = "0.1" log = "0.3" +parking_lot = "0.4" diff --git a/ethcore/node_filter/src/lib.rs b/ethcore/node_filter/src/lib.rs index d3dcbaa3b..16a1bdd8f 100644 --- a/ethcore/node_filter/src/lib.rs +++ b/ethcore/node_filter/src/lib.rs @@ -18,9 +18,11 @@ extern crate ethcore; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate ethcore_network as network; extern crate native_contracts; extern crate futures; +extern crate parking_lot; #[cfg(test)] extern crate ethcore_io as io; #[macro_use] extern crate log; @@ -29,7 +31,9 @@ use std::collections::HashMap; use native_contracts::PeerSet as Contract; use network::{NodeId, ConnectionFilter, ConnectionDirection}; use ethcore::client::{BlockChainClient, BlockId, ChainNotify}; -use util::{Mutex, Address, H256, Bytes}; +use bigint::hash::H256; +use util::{Address, Bytes}; +use parking_lot::Mutex; use futures::Future; const MAX_CACHE_SIZE: usize = 4096; diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index 5ce555aef..5ebf3f47a 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -16,6 +16,8 @@ //! DB backend wrapper for Account trie use std::collections::HashMap; +use hash::{KECCAK_NULL_RLP, keccak}; +use bigint::hash::H256; use util::*; use rlp::NULL_RLP; @@ -79,7 +81,7 @@ impl<'db> AccountDB<'db> { /// Create a new AccountDB from an address. #[cfg(test)] pub fn new(db: &'db HashDB, address: &Address) -> Self { - Self::from_hash(db, address.sha3()) + Self::from_hash(db, keccak(address)) } /// Create a new AcountDB from an address' hash. @@ -97,14 +99,14 @@ impl<'db> HashDB for AccountDB<'db>{ } fn get(&self, key: &H256) -> Option { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return Some(DBValue::from_slice(&NULL_RLP)); } self.db.get(&combine_key(&self.address_hash, key)) } fn contains(&self, key: &H256) -> bool { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return true; } self.db.contains(&combine_key(&self.address_hash, key)) @@ -133,7 +135,7 @@ impl<'db> AccountDBMut<'db> { /// Create a new AccountDB from an address. #[cfg(test)] pub fn new(db: &'db mut HashDB, address: &Address) -> Self { - Self::from_hash(db, address.sha3()) + Self::from_hash(db, keccak(address)) } /// Create a new AcountDB from an address' hash. @@ -156,14 +158,14 @@ impl<'db> HashDB for AccountDBMut<'db>{ } fn get(&self, key: &H256) -> Option { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return Some(DBValue::from_slice(&NULL_RLP)); } self.db.get(&combine_key(&self.address_hash, key)) } fn contains(&self, key: &H256) -> bool { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return true; } self.db.contains(&combine_key(&self.address_hash, key)) @@ -171,16 +173,16 @@ impl<'db> HashDB for AccountDBMut<'db>{ fn insert(&mut self, value: &[u8]) -> H256 { if value == &NULL_RLP { - return SHA3_NULL_RLP.clone(); + return KECCAK_NULL_RLP.clone(); } - let k = value.sha3(); + let k = keccak(value); let ak = combine_key(&self.address_hash, &k); self.db.emplace(ak, DBValue::from_slice(value)); k } fn emplace(&mut self, key: H256, value: DBValue) { - if key == SHA3_NULL_RLP { + if key == KECCAK_NULL_RLP { return; } let key = combine_key(&self.address_hash, &key); @@ -188,7 +190,7 @@ impl<'db> HashDB for AccountDBMut<'db>{ } fn remove(&mut self, key: &H256) { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return; } let key = combine_key(&self.address_hash, key); @@ -204,14 +206,14 @@ impl<'db> HashDB for Wrapping<'db> { } fn get(&self, key: &H256) -> Option { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return Some(DBValue::from_slice(&NULL_RLP)); } self.0.get(key) } fn contains(&self, key: &H256) -> bool { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return true; } self.0.contains(key) @@ -238,14 +240,14 @@ impl<'db> HashDB for WrappingMut<'db>{ } fn get(&self, key: &H256) -> Option { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return Some(DBValue::from_slice(&NULL_RLP)); } self.0.get(key) } fn contains(&self, key: &H256) -> bool { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return true; } self.0.contains(key) @@ -253,20 +255,20 @@ impl<'db> HashDB for WrappingMut<'db>{ fn insert(&mut self, value: &[u8]) -> H256 { if value == &NULL_RLP { - return SHA3_NULL_RLP.clone(); + return KECCAK_NULL_RLP.clone(); } self.0.insert(value) } fn emplace(&mut self, key: H256, value: DBValue) { - if key == SHA3_NULL_RLP { + if key == KECCAK_NULL_RLP { return; } self.0.emplace(key, value) } fn remove(&mut self, key: &H256) { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return; } self.0.remove(key) diff --git a/ethcore/src/account_provider/mod.rs b/ethcore/src/account_provider/mod.rs index 752cec964..393236919 100755 --- a/ethcore/src/account_provider/mod.rs +++ b/ethcore/src/account_provider/mod.rs @@ -23,7 +23,7 @@ use self::stores::{AddressBook, DappsSettingsStore, NewDappsPolicy}; use std::fmt; use std::collections::{HashMap, HashSet}; use std::time::{Instant, Duration}; -use util::{RwLock}; +use parking_lot::RwLock; use ethstore::{ SimpleSecretStore, SecretStore, Error as SSError, EthStore, EthMultiStore, random_string, SecretVaultRef, StoreAccountRef, OpaqueSecret, @@ -794,7 +794,7 @@ mod tests { use std::time::Instant; use ethstore::ethkey::{Generator, Random, Address}; use ethstore::{StoreAccountRef, Derivation}; - use util::H256; + use bigint::hash::H256; #[test] fn unlock_account_temp() { diff --git a/ethcore/src/basic_types.rs b/ethcore/src/basic_types.rs index 1f07e748f..838834eea 100644 --- a/ethcore/src/basic_types.rs +++ b/ethcore/src/basic_types.rs @@ -20,7 +20,7 @@ pub type LogBloom = ::log_entry::LogBloom; /// Constant 2048-bit datum for 0. Often used as a default. -pub static ZERO_LOGBLOOM: LogBloom = ::util::hash::H2048([0x00; 256]); +pub static ZERO_LOGBLOOM: LogBloom = ::bigint::hash::H2048([0x00; 256]); #[cfg_attr(feature="dev", allow(enum_variant_names))] /// Semantic boolean for when a seal/signature is included. diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 341e50061..d6fa65a41 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -19,10 +19,14 @@ use std::cmp; use std::sync::Arc; use std::collections::HashSet; +use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP}; +use triehash::ordered_trie_root; use rlp::{UntrustedRlp, RlpStream, Encodable, Decodable, DecoderError}; -use util::{Bytes, Address, Hashable, U256, H256, ordered_trie_root, SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; -use util::error::{Mismatch, OutOfBounds}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::{Bytes, Address}; +use unexpected::{Mismatch, OutOfBounds}; use basic_types::{LogBloom, Seal}; use vm::{EnvInfo, LastHashes}; @@ -398,13 +402,13 @@ impl<'x> OpenBlock<'x> { if let Err(e) = s.engine.on_close_block(&mut s.block) { warn!("Encountered error on closing the block: {}", e); } - + if let Err(e) = s.block.state.commit() { warn!("Encountered error on state commit: {}", e); } s.block.header.set_transactions_root(ordered_trie_root(s.block.transactions.iter().map(|e| e.rlp_bytes().into_vec()))); let uncle_bytes = s.block.uncles.iter().fold(RlpStream::new_list(s.block.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out(); - s.block.header.set_uncles_hash(uncle_bytes.sha3()); + s.block.header.set_uncles_hash(keccak(&uncle_bytes)); s.block.header.set_state_root(s.block.state.root().clone()); s.block.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().into_vec()))); s.block.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator @@ -429,14 +433,14 @@ impl<'x> OpenBlock<'x> { if let Err(e) = s.block.state.commit() { warn!("Encountered error on state commit: {}", e); } - if s.block.header.transactions_root().is_zero() || s.block.header.transactions_root() == &SHA3_NULL_RLP { + if s.block.header.transactions_root().is_zero() || s.block.header.transactions_root() == &KECCAK_NULL_RLP { s.block.header.set_transactions_root(ordered_trie_root(s.block.transactions.iter().map(|e| e.rlp_bytes().into_vec()))); } let uncle_bytes = s.block.uncles.iter().fold(RlpStream::new_list(s.block.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out(); - if s.block.header.uncles_hash().is_zero() || s.block.header.uncles_hash() == &SHA3_EMPTY_LIST_RLP { - s.block.header.set_uncles_hash(uncle_bytes.sha3()); + if s.block.header.uncles_hash().is_zero() || s.block.header.uncles_hash() == &KECCAK_EMPTY_LIST_RLP { + s.block.header.set_uncles_hash(keccak(&uncle_bytes)); } - if s.block.header.receipts_root().is_zero() || s.block.header.receipts_root() == &SHA3_NULL_RLP { + if s.block.header.receipts_root().is_zero() || s.block.header.receipts_root() == &KECCAK_NULL_RLP { s.block.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().into_vec()))); } @@ -469,7 +473,7 @@ impl<'x> IsBlock for LockedBlock { impl ClosedBlock { /// Get the hash of the header without seal arguments. - pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) } + pub fn hash(&self) -> H256 { self.header().rlp_keccak(Seal::Without) } /// Turn this into a `LockedBlock`, unable to be reopened again. pub fn lock(self) -> LockedBlock { @@ -494,7 +498,7 @@ impl ClosedBlock { impl LockedBlock { /// Get the hash of the header without seal arguments. - pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) } + pub fn hash(&self) -> H256 { self.header().rlp_keccak(Seal::Without) } /// Provide a valid seal in order to turn this into a `SealedBlock`. /// diff --git a/ethcore/src/blockchain/best_block.rs b/ethcore/src/blockchain/best_block.rs index e857a99b6..fd41f71e8 100644 --- a/ethcore/src/blockchain/best_block.rs +++ b/ethcore/src/blockchain/best_block.rs @@ -14,7 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::{Bytes, U256, H256}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::Bytes; use header::BlockNumber; /// Best block info. diff --git a/ethcore/src/blockchain/block_info.rs b/ethcore/src/blockchain/block_info.rs index e9e02de60..d1e643792 100644 --- a/ethcore/src/blockchain/block_info.rs +++ b/ethcore/src/blockchain/block_info.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::{U256,H256}; +use bigint::prelude::U256; +use bigint::hash::H256; use header::BlockNumber; /// Brief info about inserted block. diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 29d12009a..cdd390693 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -22,6 +22,9 @@ use std::mem; use itertools::Itertools; use bloomchain as bc; use heapsize::HeapSizeOf; +use bigint::prelude::U256; +use bigint::hash::{H256, H2048}; +use parking_lot::{Mutex, RwLock}; use util::*; use rlp::*; use header::*; @@ -41,6 +44,7 @@ use db::{self, Writable, Readable, CacheUpdatePolicy}; use cache_manager::CacheManager; use encoded; use engines::epoch::{Transition as EpochTransition, PendingTransition as PendingEpochTransition}; +use ansi_term::Colour; const LOG_BLOOMS_LEVELS: usize = 3; const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16; @@ -512,7 +516,7 @@ impl BlockChain { // we need to insert genesis into the cache let block = BlockView::new(genesis); let header = block.header_view(); - let hash = block.sha3(); + let hash = block.hash(); let details = BlockDetails { number: header.number(), @@ -622,7 +626,7 @@ impl BlockChain { return None; } if let Some(extras) = self.db.read(db::COL_EXTRA, &best_block_hash) as Option { - type DetailsKey = Key; + type DetailsKey = Key; batch.delete(db::COL_EXTRA, &(DetailsKey::key(&best_block_hash))); let hash = extras.parent; let range = extras.number as bc::Number .. extras.number as bc::Number; @@ -764,7 +768,7 @@ impl BlockChain { pub fn insert_unordered_block(&self, batch: &mut DBTransaction, bytes: &[u8], receipts: Vec, parent_td: Option, is_best: bool, is_ancient: bool) -> bool { let block = BlockView::new(bytes); let header = block.header_view(); - let hash = header.sha3(); + let hash = header.hash(); if self.is_known(&hash) { return false; @@ -966,7 +970,7 @@ impl BlockChain { // create views onto rlp let block = BlockView::new(bytes); let header = block.header_view(); - let hash = header.sha3(); + let hash = header.hash(); if self.is_known_child(&header.parent_hash(), &hash) { return ImportRoute::none(); @@ -1005,7 +1009,7 @@ impl BlockChain { /// Get inserted block info which is critical to prepare extras updates. fn block_info(&self, header: &HeaderView) -> BlockInfo { - let hash = header.sha3(); + let hash = header.hash(); let number = header.number(); let parent_hash = header.parent_hash(); let parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash)); @@ -1466,9 +1470,9 @@ mod tests { #![cfg_attr(feature="dev", allow(similar_names))] use std::sync::Arc; use rustc_hex::FromHex; + use hash::keccak; use util::kvdb::KeyValueDB; - use util::hash::*; - use util::sha3::Hashable; + use bigint::hash::*; use receipt::Receipt; use blockchain::{BlockProvider, BlockChain, Config, ImportRoute}; use tests::helpers::*; @@ -1518,8 +1522,8 @@ mod tests { let mut finalizer = BlockFinalizer::default(); let genesis = canon_chain.generate(&mut finalizer).unwrap(); let first = canon_chain.generate(&mut finalizer).unwrap(); - let genesis_hash = BlockView::new(&genesis).header_view().sha3(); - let first_hash = BlockView::new(&first).header_view().sha3(); + let genesis_hash = BlockView::new(&genesis).header_view().hash(); + let first_hash = BlockView::new(&first).header_view().hash(); let db = new_db(); let bc = new_chain(&genesis, db.clone()); @@ -1549,7 +1553,7 @@ mod tests { let mut canon_chain = ChainGenerator::default(); let mut finalizer = BlockFinalizer::default(); let genesis = canon_chain.generate(&mut finalizer).unwrap(); - let genesis_hash = BlockView::new(&genesis).header_view().sha3(); + let genesis_hash = BlockView::new(&genesis).header_view().hash(); let db = new_db(); let bc = new_chain(&genesis, db.clone()); @@ -1558,7 +1562,7 @@ mod tests { let mut batch = db.transaction(); for _ in 0..10 { let block = canon_chain.generate(&mut finalizer).unwrap(); - block_hashes.push(BlockView::new(&block).header_view().sha3()); + block_hashes.push(BlockView::new(&block).header_view().hash()); bc.insert_block(&mut batch, &block, vec![]); bc.commit(); } @@ -1607,14 +1611,14 @@ mod tests { assert_eq!( [&b4b, &b3b, &b2b].iter().map(|b| BlockView::new(b).header()).collect::>(), - bc.find_uncle_headers(&BlockView::new(&b4a).header_view().sha3(), 3).unwrap() + bc.find_uncle_headers(&BlockView::new(&b4a).header_view().hash(), 3).unwrap() ); // TODO: insert block that already includes one of them as an uncle to check it's not allowed. } fn secret() -> Secret { - "".sha3().into() + keccak("").into() } #[test] @@ -1646,8 +1650,8 @@ mod tests { let b2 = fork_chain .generate(&mut fork_finalizer).unwrap(); - let b1a_hash = BlockView::new(&b1a).header_view().sha3(); - let b2_hash = BlockView::new(&b2).header_view().sha3(); + let b1a_hash = BlockView::new(&b1a).header_view().hash(); + let b2_hash = BlockView::new(&b2).header_view().hash(); let t1_hash = t1.hash(); @@ -1730,9 +1734,9 @@ mod tests { .with_transaction(t3.clone()) .generate(&mut fork_finalizer).unwrap(); - let b1a_hash = BlockView::new(&b1a).header_view().sha3(); - let b1b_hash = BlockView::new(&b1b).header_view().sha3(); - let b2_hash = BlockView::new(&b2).header_view().sha3(); + let b1a_hash = BlockView::new(&b1a).header_view().hash(); + let b1b_hash = BlockView::new(&b1b).header_view().hash(); + let b2_hash = BlockView::new(&b2).header_view().hash(); let t1_hash = t1.hash(); let t2_hash = t2.hash(); @@ -1790,11 +1794,11 @@ mod tests { let b3b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap(); let b3a = canon_chain.generate(&mut finalizer).unwrap(); - let genesis_hash = BlockView::new(&genesis).header_view().sha3(); - let b1_hash= BlockView::new(&b1).header_view().sha3(); - let b2_hash= BlockView::new(&b2).header_view().sha3(); - let b3a_hash= BlockView::new(&b3a).header_view().sha3(); - let b3b_hash= BlockView::new(&b3b).header_view().sha3(); + let genesis_hash = BlockView::new(&genesis).header_view().hash(); + let b1_hash= BlockView::new(&b1).header_view().hash(); + let b2_hash= BlockView::new(&b2).header_view().hash(); + let b3a_hash= BlockView::new(&b3a).header_view().hash(); + let b3b_hash= BlockView::new(&b3b).header_view().hash(); // b3a is a part of canon chain, whereas b3b is part of sidechain let best_block_hash = b3a_hash.clone(); @@ -1910,8 +1914,8 @@ mod tests { let mut finalizer = BlockFinalizer::default(); let genesis = canon_chain.generate(&mut finalizer).unwrap(); let first = canon_chain.generate(&mut finalizer).unwrap(); - let genesis_hash = BlockView::new(&genesis).header_view().sha3(); - let first_hash = BlockView::new(&first).header_view().sha3(); + let genesis_hash = BlockView::new(&genesis).header_view().hash(); + let first_hash = BlockView::new(&first).header_view().hash(); let db = new_db(); { @@ -2226,9 +2230,9 @@ mod tests { let genesis = canon_chain.generate(&mut finalizer).unwrap(); let first = canon_chain.generate(&mut finalizer).unwrap(); let second = canon_chain.generate(&mut finalizer).unwrap(); - let genesis_hash = BlockView::new(&genesis).header_view().sha3(); - let first_hash = BlockView::new(&first).header_view().sha3(); - let second_hash = BlockView::new(&second).header_view().sha3(); + let genesis_hash = BlockView::new(&genesis).header_view().hash(); + let first_hash = BlockView::new(&first).header_view().hash(); + let second_hash = BlockView::new(&second).header_view().hash(); let db = new_db(); let bc = new_chain(&genesis, db.clone()); @@ -2266,7 +2270,7 @@ mod tests { // create a longer fork for i in 0..5 { let canon_block = canon_chain.generate(&mut finalizer).unwrap(); - let hash = BlockView::new(&canon_block).header_view().sha3(); + let hash = BlockView::new(&canon_block).header_view().hash(); bc.insert_block(&mut batch, &canon_block, vec![]); bc.insert_epoch_transition(&mut batch, i, EpochTransition { @@ -2279,7 +2283,7 @@ mod tests { assert_eq!(bc.best_block_number(), 5); - let hash = BlockView::new(&uncle).header_view().sha3(); + let hash = BlockView::new(&uncle).header_view().hash(); bc.insert_block(&mut batch, &uncle, vec![]); bc.insert_epoch_transition(&mut batch, 999, EpochTransition { block_hash: hash, diff --git a/ethcore/src/blockchain/extras.rs b/ethcore/src/blockchain/extras.rs index 69e623a1f..109038ef2 100644 --- a/ethcore/src/blockchain/extras.rs +++ b/ethcore/src/blockchain/extras.rs @@ -26,7 +26,8 @@ use header::BlockNumber; use receipt::Receipt; use heapsize::HeapSizeOf; -use util::{H256, H264, U256}; +use bigint::prelude::U256; +use bigint::hash::{H256, H264}; use util::kvdb::PREFIX_LEN as DB_PREFIX_LEN; /// Represents index of extra data in database diff --git a/ethcore/src/blockchain/generator/block.rs b/ethcore/src/blockchain/generator/block.rs index 05fd15031..d5243dc66 100644 --- a/ethcore/src/blockchain/generator/block.rs +++ b/ethcore/src/blockchain/generator/block.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use rlp::*; -use util::{H256, H2048}; +use bigint::hash::{H256, H2048}; use util::bytes::Bytes; use header::Header; use transaction::SignedTransaction; diff --git a/ethcore/src/blockchain/generator/bloom.rs b/ethcore/src/blockchain/generator/bloom.rs index 5f39a49b8..bb83ff1af 100644 --- a/ethcore/src/blockchain/generator/bloom.rs +++ b/ethcore/src/blockchain/generator/bloom.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::hash::H2048; +use bigint::hash::H2048; pub trait WithBloom { fn with_bloom(self, bloom: H2048) -> Self where Self: Sized; diff --git a/ethcore/src/blockchain/generator/complete.rs b/ethcore/src/blockchain/generator/complete.rs index 0c22d4dfc..be5695d36 100644 --- a/ethcore/src/blockchain/generator/complete.rs +++ b/ethcore/src/blockchain/generator/complete.rs @@ -14,9 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::hash::H256; +use bigint::hash::H256; use util::bytes::Bytes; -use util::sha3::Hashable; use views::BlockView; #[derive(Default, Clone)] @@ -46,7 +45,7 @@ impl<'a, I> Iterator for Complete<'a, I> where I: Iterator, ::Ite fn next(&mut self) -> Option { self.iter.next().map(|item| { let rlp = item.complete(self.finalizer.parent_hash.clone()); - self.finalizer.parent_hash = BlockView::new(&rlp).header_view().sha3(); + self.finalizer.parent_hash = BlockView::new(&rlp).header_view().hash(); rlp }) } diff --git a/ethcore/src/blockchain/generator/generator.rs b/ethcore/src/blockchain/generator/generator.rs index a2c2c441a..a32e37942 100644 --- a/ethcore/src/blockchain/generator/generator.rs +++ b/ethcore/src/blockchain/generator/generator.rs @@ -14,7 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::{U256, H2048, Bytes}; +use bigint::prelude::U256; +use bigint::hash::H2048; +use util::Bytes; use header::BlockNumber; use transaction::SignedTransaction; use super::fork::Fork; @@ -110,8 +112,7 @@ impl Iterator for ChainGenerator { } mod tests { - use util::hash::{H256, H2048}; - use util::sha3::Hashable; + use bigint::hash::{H256, H2048}; use views::BlockView; use blockchain::generator::{ChainIterator, ChainGenerator, BlockFinalizer}; @@ -129,7 +130,7 @@ mod tests { let b1_rlp = canon_chain.generate(&mut finalizer).unwrap(); let b1 = BlockView::new(&b1_rlp); - assert_eq!(b1.header_view().parent_hash(), genesis.header_view().sha3()); + assert_eq!(b1.header_view().parent_hash(), genesis.header_view().hash()); assert_eq!(b1.header_view().number(), 1); let mut fork_chain = canon_chain.fork(1); @@ -137,13 +138,13 @@ mod tests { let b2_rlp_fork = fork_chain.generate(&mut finalizer.fork()).unwrap(); let b2_fork = BlockView::new(&b2_rlp_fork); - assert_eq!(b2_fork.header_view().parent_hash(), b1.header_view().sha3()); + assert_eq!(b2_fork.header_view().parent_hash(), b1.header_view().hash()); assert_eq!(b2_fork.header_view().number(), 2); let b2_rlp = canon_chain.generate(&mut finalizer).unwrap(); let b2 = BlockView::new(&b2_rlp); - assert_eq!(b2.header_view().parent_hash(), b1.header_view().sha3()); + assert_eq!(b2.header_view().parent_hash(), b1.header_view().hash()); assert_eq!(b2.header_view().number(), 2); assert!(b2.header_view().difficulty() > b2_fork.header_view().difficulty()); } @@ -163,7 +164,7 @@ mod tests { assert_eq!(block0.header_view().parent_hash(), H256::default()); assert_eq!(block1.header_view().number(), 1); - assert_eq!(block1.header_view().parent_hash(), block0.header_view().sha3()); + assert_eq!(block1.header_view().parent_hash(), block0.header_view().hash()); } diff --git a/ethcore/src/blockchain/import_route.rs b/ethcore/src/blockchain/import_route.rs index e86639e73..ebd1454b4 100644 --- a/ethcore/src/blockchain/import_route.rs +++ b/ethcore/src/blockchain/import_route.rs @@ -16,7 +16,7 @@ //! Import route. -use util::H256; +use bigint::hash::H256; use blockchain::block_info::{BlockInfo, BlockLocation}; /// Import route for newly inserted block. @@ -67,7 +67,8 @@ impl From for ImportRoute { #[cfg(test)] mod tests { - use util::{U256, H256}; + use bigint::prelude::U256; + use bigint::hash::H256; use blockchain::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData}; use blockchain::ImportRoute; diff --git a/ethcore/src/blockchain/update.rs b/ethcore/src/blockchain/update.rs index 914b8aa99..10a6fcd06 100644 --- a/ethcore/src/blockchain/update.rs +++ b/ethcore/src/blockchain/update.rs @@ -1,5 +1,5 @@ use std::collections::HashMap; -use util::H256; +use bigint::hash::H256; use header::BlockNumber; use blockchain::block_info::BlockInfo; use blooms::BloomGroup; diff --git a/ethcore/src/builtin.rs b/ethcore/src/builtin.rs index 9e49e610c..ac2726354 100644 --- a/ethcore/src/builtin.rs +++ b/ethcore/src/builtin.rs @@ -23,7 +23,10 @@ use crypto::ripemd160::Ripemd160 as Ripemd160Digest; use crypto::digest::Digest; use num::{BigUint, Zero, One}; -use util::{U256, H256, Hashable, BytesRef}; +use hash::keccak; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::BytesRef; use ethkey::{Signature, recover as ec_recover}; use ethjson; @@ -228,7 +231,7 @@ impl Impl for EcRecover { let s = Signature::from_rsv(&r, &s, bit); if s.is_valid() { if let Ok(p) = ec_recover(&s, &hash) { - let r = p.sha3(); + let r = keccak(p); output.write(0, &[0; 12]); output.write(12, &r[12..r.len()]); } @@ -266,6 +269,34 @@ impl Impl for Ripemd160 { } } +// calculate modexp: exponentiation by squaring. the `num` crate has pow, but not modular. +fn modexp(mut base: BigUint, mut exp: BigUint, modulus: BigUint) -> BigUint { + use num::Integer; + + match (base.is_zero(), exp.is_zero()) { + (_, true) => return BigUint::one(), // n^0 % m + (true, false) => return BigUint::zero(), // 0^n % m, n>0 + (false, false) if modulus <= BigUint::one() => return BigUint::zero(), // a^b % 1 = 0. + _ => {} + } + + let mut result = BigUint::one(); + base = base % &modulus; + + // fast path for base divisible by modulus. + if base.is_zero() { return BigUint::zero() } + while !exp.is_zero() { + if exp.is_odd() { + result = (result * &base) % &modulus; + } + + exp = exp >> 1; + base = (base.clone() * base) % &modulus; + } + + result +} + impl Impl for ModexpImpl { fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), Error> { let mut reader = input.chain(io::repeat(0)); @@ -294,34 +325,6 @@ impl Impl for ModexpImpl { let exp = read_num(exp_len); let modulus = read_num(mod_len); - // calculate modexp: exponentiation by squaring. the `num` crate has pow, but not modular. - fn modexp(mut base: BigUint, mut exp: BigUint, modulus: BigUint) -> BigUint { - use num::Integer; - - match (base.is_zero(), exp.is_zero()) { - (_, true) => return BigUint::one(), // n^0 % m - (true, false) => return BigUint::zero(), // 0^n % m, n>0 - (false, false) if modulus <= BigUint::one() => return BigUint::zero(), // a^b % 1 = 0. - _ => {} - } - - let mut result = BigUint::one(); - base = base % &modulus; - - // fast path for base divisible by modulus. - if base.is_zero() { return result } - while !exp.is_zero() { - if exp.is_odd() { - result = (result * &base) % &modulus; - } - - exp = exp >> 1; - base = (base.clone() * base) % &modulus; - } - - result - } - // write output to given memory, left padded and same length as the modulus. let bytes = modexp(base, exp, modulus).to_bytes_be(); @@ -503,10 +506,45 @@ impl Impl for Bn128PairingImpl { #[cfg(test)] mod tests { - use super::{Builtin, Linear, ethereum_builtin, Pricer, Modexp}; + use super::{Builtin, Linear, ethereum_builtin, Pricer, Modexp, modexp as me}; use ethjson; - use util::{U256, BytesRef}; + use bigint::prelude::U256; + use util::BytesRef; use rustc_hex::FromHex; + use num::{BigUint, Zero, One}; + + #[test] + fn modexp_func() { + // n^0 % m == 1 + let mut base = BigUint::parse_bytes(b"12345", 10).unwrap(); + let mut exp = BigUint::zero(); + let mut modulus = BigUint::parse_bytes(b"789", 10).unwrap(); + assert_eq!(me(base, exp, modulus), BigUint::one()); + + // 0^n % m == 0 + base = BigUint::zero(); + exp = BigUint::parse_bytes(b"12345", 10).unwrap(); + modulus = BigUint::parse_bytes(b"789", 10).unwrap(); + assert_eq!(me(base, exp, modulus), BigUint::zero()); + + // n^m % 1 == 0 + base = BigUint::parse_bytes(b"12345", 10).unwrap(); + exp = BigUint::parse_bytes(b"789", 10).unwrap(); + modulus = BigUint::one(); + assert_eq!(me(base, exp, modulus), BigUint::zero()); + + // if n % d == 0, then n^m % d == 0 + base = BigUint::parse_bytes(b"12345", 10).unwrap(); + exp = BigUint::parse_bytes(b"789", 10).unwrap(); + modulus = BigUint::parse_bytes(b"15", 10).unwrap(); + assert_eq!(me(base, exp, modulus), BigUint::zero()); + + // others + base = BigUint::parse_bytes(b"12345", 10).unwrap(); + exp = BigUint::parse_bytes(b"789", 10).unwrap(); + modulus = BigUint::parse_bytes(b"97", 10).unwrap(); + assert_eq!(me(base, exp, modulus), BigUint::parse_bytes(b"55", 10).unwrap()); + } #[test] fn identity() { @@ -572,14 +610,6 @@ mod tests { #[test] fn ecrecover() { - /*let k = KeyPair::from_secret(b"test".sha3()).unwrap(); - let a: Address = From::from(k.public().sha3()); - println!("Address: {}", a); - let m = b"hello world".sha3(); - println!("Message: {}", m); - let s = k.sign(&m).unwrap(); - println!("Signed: {}", s);*/ - let f = ethereum_builtin("ecrecover"); let i = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); diff --git a/ethcore/src/client/ancient_import.rs b/ethcore/src/client/ancient_import.rs index d4d8696d2..aaf24cdc1 100644 --- a/ethcore/src/client/ancient_import.rs +++ b/ethcore/src/client/ancient_import.rs @@ -23,7 +23,7 @@ use engines::{Engine, EpochVerifier}; use header::Header; use rand::Rng; -use util::RwLock; +use parking_lot::RwLock; // do "heavy" verification on ~1/50 blocks, randomly sampled. const HEAVY_VERIFY_RATE: f32 = 0.02; diff --git a/ethcore/src/client/chain_notify.rs b/ethcore/src/client/chain_notify.rs index 0a9bff8d7..977e7b435 100644 --- a/ethcore/src/client/chain_notify.rs +++ b/ethcore/src/client/chain_notify.rs @@ -15,7 +15,8 @@ // along with Parity. If not, see . use ipc::IpcConfig; -use util::{H256, Bytes}; +use bigint::hash::H256; +use util::Bytes; /// Represents what has to be handled by actor listening to chain events #[ipc] diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index ac8a0aea9..8948faa25 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -23,13 +23,18 @@ use time::precise_time_ns; use itertools::Itertools; // util -use util::{Bytes, PerfTimer, Mutex, RwLock, MutexGuard, Hashable}; +use hash::keccak; +use timer::PerfTimer; +use util::UtilError; +use util::Bytes; use util::{journaldb, DBValue, TrieFactory, Trie}; -use util::{U256, H256, Address, H2048}; +use util::Address; use util::trie::TrieSpec; use util::kvdb::*; // other +use bigint::prelude::U256; +use bigint::hash::{H256, H2048}; use basic_types::Seal; use block::*; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; @@ -54,6 +59,7 @@ use io::*; use log_entry::LocalizedLogEntry; use miner::{Miner, MinerService, TransactionImportResult}; use native_contracts::Registry; +use parking_lot::{Mutex, RwLock, MutexGuard}; use rand::OsRng; use receipt::{Receipt, LocalizedReceipt}; use rlp::UntrustedRlp; @@ -247,7 +253,7 @@ impl Client { last_hashes: RwLock::new(VecDeque::new()), factories: factories, history: history, - rng: Mutex::new(OsRng::new().map_err(::util::UtilError::StdIo)?), + rng: Mutex::new(OsRng::new().map_err(UtilError::from)?), ancient_verifier: Mutex::new(None), on_user_defaults_change: Mutex::new(None), registrar: Mutex::new(None), @@ -652,7 +658,7 @@ impl Client { .map(Into::into) .collect(); - assert_eq!(header.hash(), BlockView::new(block_data).header_view().sha3()); + assert_eq!(header.hash(), BlockView::new(block_data).header_view().hash()); //let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new)); @@ -1126,7 +1132,9 @@ impl Client { T: trace::Tracer, V: trace::VMTracer, { - let options = options.dont_check_nonce(); + let options = options + .dont_check_nonce() + .save_output_from_contract(); let original_state = if state_diff { Some(state.clone()) } else { None }; let mut ret = Executive::new(state, env_info, engine).transact_virtual(transaction, options)?; @@ -1440,6 +1448,10 @@ impl BlockChainClient for Client { self.state_at(id).and_then(|s| s.code(address).ok()).map(|c| c.map(|c| (&*c).clone())) } + fn code_hash(&self, address: &Address, id: BlockId) -> Option { + self.state_at(id).and_then(|s| s.code_hash(address).ok()) + } + fn balance(&self, address: &Address, id: BlockId) -> Option { self.state_at(id).and_then(|s| s.balance(address).ok()) } @@ -1503,7 +1515,7 @@ impl BlockChainClient for Client { }; let (_, db) = state.drop(); - let account_db = self.factories.accountdb.readonly(db.as_hashdb(), account.sha3()); + let account_db = self.factories.accountdb.readonly(db.as_hashdb(), keccak(account)); let trie = match self.factories.trie.readonly(account_db.as_hashdb(), &root) { Ok(trie) => trie, _ => { @@ -1591,7 +1603,7 @@ impl BlockChainClient for Client { use verification::queue::kind::BlockLike; use verification::queue::kind::blocks::Unverified; - // create unverified block here so the `sha3` calculation can be cached. + // create unverified block here so the `keccak` calculation can be cached. let unverified = Unverified::new(bytes); { @@ -1792,7 +1804,7 @@ impl BlockChainClient for Client { let dispatch = move |reg_addr, data| { future::done(self.call_contract(BlockId::Latest, reg_addr, data)) }; - r.get_address(dispatch, name.as_bytes().sha3(), "A".to_string()).wait().ok() + r.get_address(dispatch, keccak(name.as_bytes()), "A".to_string()).wait().ok() }) .and_then(|a| if a.is_zero() { None } else { Some(a) }) } @@ -2067,16 +2079,16 @@ mod tests { #[test] fn should_return_correct_log_index() { + use hash::keccak; use super::transaction_receipt; use ethkey::KeyPair; use log_entry::{LogEntry, LocalizedLogEntry}; use receipt::{Receipt, LocalizedReceipt}; use transaction::{Transaction, LocalizedTransaction, Action}; - use util::Hashable; use tests::helpers::TestEngine; // given - let key = KeyPair::from_secret_slice(&"test".sha3()).unwrap(); + let key = KeyPair::from_secret_slice(&keccak("test")).unwrap(); let secret = key.secret(); let engine = TestEngine::new(0); diff --git a/ethcore/src/client/evm_test_client.rs b/ethcore/src/client/evm_test_client.rs index a455a3724..e6f5e4675 100644 --- a/ethcore/src/client/evm_test_client.rs +++ b/ethcore/src/client/evm_test_client.rs @@ -18,7 +18,9 @@ use std::fmt; use std::sync::Arc; -use util::{self, U256, H256, journaldb, trie}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::{self, journaldb, trie}; use util::kvdb::{self, KeyValueDB}; use {state, state_db, client, executive, trace, transaction, db, spec, pod_state}; use factory::Factories; @@ -166,7 +168,7 @@ impl<'a> EvmTestClient<'a> { author: *genesis.author(), timestamp: genesis.timestamp(), difficulty: *genesis.difficulty(), - last_hashes: Arc::new([util::H256::default(); 256].to_vec()), + last_hashes: Arc::new([H256::default(); 256].to_vec()), gas_used: 0.into(), gas_limit: *genesis.gas_limit(), }; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 0e7e53e43..aabd744f9 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -22,6 +22,10 @@ use std::collections::{HashMap, BTreeMap}; use std::mem; use itertools::Itertools; use rustc_hex::FromHex; +use hash::keccak; +use bigint::prelude::U256; +use bigint::hash::{H256, H2048}; +use parking_lot::RwLock; use util::*; use rlp::*; use ethkey::{Generator, Random}; @@ -241,7 +245,7 @@ impl TestBlockChainClient { uncle_header.set_parent_hash(self.last_hash.read().clone()); uncle_header.set_number(n as BlockNumber); uncles.append(&uncle_header); - header.set_uncles_hash(uncles.as_raw().sha3()); + header.set_uncles_hash(keccak(uncles.as_raw())); uncles }, _ => RlpStream::new_list(0) @@ -452,6 +456,13 @@ impl BlockChainClient for TestBlockChainClient { } } + fn code_hash(&self, address: &Address, id: BlockId) -> Option { + match id { + BlockId::Latest | BlockId::Pending => self.code.read().get(address).map(|c| keccak(&c)), + _ => None, + } + } + fn balance(&self, address: &Address, id: BlockId) -> Option { match id { BlockId::Latest | BlockId::Pending => Some(self.balances.read().get(address).cloned().unwrap_or_else(U256::zero)), diff --git a/ethcore/src/client/trace.rs b/ethcore/src/client/trace.rs index 3ab01757e..dfe8e30b7 100644 --- a/ethcore/src/client/trace.rs +++ b/ethcore/src/client/trace.rs @@ -1,7 +1,7 @@ //! Bridge between Tracedb and Blockchain. -use util::{H256}; +use bigint::hash::H256; use header::BlockNumber; use trace::DatabaseExtras as TraceDatabaseExtras; use blockchain::{BlockChain, BlockProvider}; diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index d7b7bba44..4a1f8a0c5 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -34,7 +34,9 @@ use trace::LocalizedTrace; use transaction::{LocalizedTransaction, PendingTransaction, SignedTransaction}; use verification::queue::QueueInfo as BlockQueueInfo; -use util::{U256, Address, H256, H2048, Bytes}; +use bigint::prelude::U256; +use bigint::hash::{H256, H2048}; +use util::{Address, Bytes}; use util::hashdb::DBValue; use types::ids::*; @@ -96,6 +98,9 @@ pub trait BlockChainClient : Sync + Send { .expect("code will return Some if given BlockId::Latest; qed") } + /// Get address code hash at given block's state. + fn code_hash(&self, address: &Address, id: BlockId) -> Option; + /// Get address balance at the given block's state. /// /// May not return None if given BlockId::Latest. diff --git a/ethcore/src/db.rs b/ethcore/src/db.rs index bccb8e943..e60b77b2e 100644 --- a/ethcore/src/db.rs +++ b/ethcore/src/db.rs @@ -19,7 +19,8 @@ use std::ops::Deref; use std::hash::Hash; use std::collections::HashMap; -use util::{DBTransaction, KeyValueDB, RwLock}; +use parking_lot::RwLock; +use util::{DBTransaction, KeyValueDB}; use rlp; diff --git a/ethcore/src/encoded.rs b/ethcore/src/encoded.rs index 374a946db..1f7ee98b5 100644 --- a/ethcore/src/encoded.rs +++ b/ethcore/src/encoded.rs @@ -28,8 +28,11 @@ use header::{BlockNumber, Header as FullHeader}; use transaction::UnverifiedTransaction; use views; +use hash::keccak; use heapsize::HeapSizeOf; -use util::{Address, Hashable, H256, H2048, U256}; +use bigint::prelude::U256; +use bigint::hash::{H256, H2048}; +use util::Address; use rlp::Rlp; /// Owning header view. @@ -65,7 +68,7 @@ impl Header { // forwarders to borrowed view. impl Header { /// Returns the header hash. - pub fn hash(&self) -> H256 { self.sha3() } + pub fn hash(&self) -> H256 { keccak(&self.0) } /// Returns the parent hash. pub fn parent_hash(&self) -> H256 { self.view().parent_hash() } @@ -110,12 +113,6 @@ impl Header { pub fn seal(&self) -> Vec> { self.view().seal() } } -impl Hashable for Header { - fn sha3(&self) -> H256 { - self.0.sha3() - } -} - /// Owning block body view. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "ipc", binary)] @@ -219,7 +216,7 @@ impl Block { // forwarders to borrowed header view. impl Block { /// Returns the header hash. - pub fn hash(&self) -> H256 { self.header_view().sha3() } + pub fn hash(&self) -> H256 { self.header_view().hash() } /// Returns the parent hash. pub fn parent_hash(&self) -> H256 { self.header_view().parent_hash() } diff --git a/ethcore/src/engines/authority_round/finality.rs b/ethcore/src/engines/authority_round/finality.rs index 4e1bdf6a3..72ff5af92 100644 --- a/ethcore/src/engines/authority_round/finality.rs +++ b/ethcore/src/engines/authority_round/finality.rs @@ -19,7 +19,8 @@ use std::collections::{VecDeque}; use std::collections::hash_map::{HashMap, Entry}; -use util::{Address, H256}; +use bigint::hash::H256; +use util::Address; use engines::validator_set::SimpleList; @@ -149,7 +150,8 @@ impl<'a> Iterator for Iter<'a> { #[cfg(test)] mod tests { - use util::{Address, H256}; + use bigint::hash::H256; + use util::Address; use super::RollingFinality; #[test] diff --git a/ethcore/src/engines/authority_round/mod.rs b/ethcore/src/engines/authority_round/mod.rs index 63f0abef1..fc0080f8a 100644 --- a/ethcore/src/engines/authority_round/mod.rs +++ b/ethcore/src/engines/authority_round/mod.rs @@ -42,6 +42,11 @@ use ethkey::{verify_address, Signature}; use io::{IoContext, IoHandler, TimerToken, IoService}; use itertools::{self, Itertools}; use rlp::{UntrustedRlp, encode}; +use bigint::prelude::{U256, U128}; +use bigint::hash::{H256, H520}; +use semantic_version::SemanticVersion; +use parking_lot::{Mutex, RwLock}; +use unexpected::{Mismatch, OutOfBounds}; use util::*; mod finality; @@ -830,7 +835,9 @@ impl Engine for AuthorityRound { mod tests { use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; - use util::*; + use hash::keccak; + use bigint::prelude::U256; + use bigint::hash::H520; use header::Header; use error::{Error, BlockError}; use rlp::encode; @@ -884,8 +891,8 @@ mod tests { #[test] fn generates_seal_and_does_not_double_propose() { let tap = Arc::new(AccountProvider::transient_provider()); - let addr1 = tap.insert_account("1".sha3().into(), "1").unwrap(); - let addr2 = tap.insert_account("2".sha3().into(), "2").unwrap(); + let addr1 = tap.insert_account(keccak("1").into(), "1").unwrap(); + let addr2 = tap.insert_account(keccak("2").into(), "2").unwrap(); let spec = Spec::new_test_round(); let engine = &*spec.engine; @@ -916,7 +923,7 @@ mod tests { #[test] fn proposer_switching() { let tap = AccountProvider::transient_provider(); - let addr = tap.insert_account("0".sha3().into(), "0").unwrap(); + let addr = tap.insert_account(keccak("0").into(), "0").unwrap(); let mut parent_header: Header = Header::default(); parent_header.set_seal(vec![encode(&0usize).into_vec()]); parent_header.set_gas_limit("222222".parse::().unwrap()); @@ -941,7 +948,7 @@ mod tests { #[test] fn rejects_future_block() { let tap = AccountProvider::transient_provider(); - let addr = tap.insert_account("0".sha3().into(), "0").unwrap(); + let addr = tap.insert_account(keccak("0").into(), "0").unwrap(); let mut parent_header: Header = Header::default(); parent_header.set_seal(vec![encode(&0usize).into_vec()]); @@ -967,7 +974,7 @@ mod tests { #[test] fn rejects_step_backwards() { let tap = AccountProvider::transient_provider(); - let addr = tap.insert_account("0".sha3().into(), "0").unwrap(); + let addr = tap.insert_account(keccak("0").into(), "0").unwrap(); let mut parent_header: Header = Header::default(); parent_header.set_seal(vec![encode(&4usize).into_vec()]); diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 5d6a44fc5..49dc71c0e 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -19,7 +19,11 @@ use std::sync::{Weak, Arc}; use std::collections::BTreeMap; use std::cmp; +use bigint::prelude::U256; +use bigint::hash::{H256, H520}; +use parking_lot::RwLock; use util::*; +use unexpected::{Mismatch, OutOfBounds}; use ethkey::{recover, public_to_address, Signature}; use account_provider::AccountProvider; use block::*; @@ -31,6 +35,7 @@ use evm::Schedule; use ethjson; use header::{Header, BlockNumber}; use client::Client; +use semantic_version::SemanticVersion; use super::signer::EngineSigner; use super::validator_set::{ValidatorSet, SimpleList, new_validator_set}; @@ -252,7 +257,8 @@ impl Engine for BasicAuthority { #[cfg(test)] mod tests { use std::sync::Arc; - use util::*; + use hash::keccak; + use bigint::hash::H520; use block::*; use error::{BlockError, Error}; use tests::helpers::*; @@ -308,7 +314,7 @@ mod tests { #[test] fn can_generate_seal() { let tap = AccountProvider::transient_provider(); - let addr = tap.insert_account("".sha3().into(), "").unwrap(); + let addr = tap.insert_account(keccak("").into(), "").unwrap(); let spec = new_test_authority(); let engine = &*spec.engine; @@ -326,7 +332,7 @@ mod tests { #[test] fn seals_internally() { let tap = AccountProvider::transient_provider(); - let authority = tap.insert_account("".sha3().into(), "").unwrap(); + let authority = tap.insert_account(keccak("").into(), "").unwrap(); let engine = new_test_authority().engine; assert!(!engine.seals_internally().unwrap()); diff --git a/ethcore/src/engines/epoch.rs b/ethcore/src/engines/epoch.rs index 586059e83..fb0eaa267 100644 --- a/ethcore/src/engines/epoch.rs +++ b/ethcore/src/engines/epoch.rs @@ -16,7 +16,7 @@ //! Epoch verifiers and transitions. -use util::H256; +use bigint::hash::H256; use error::Error; use header::Header; diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index 976f815e0..8a2d369c1 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -64,6 +64,7 @@ impl Engine for InstantSeal { #[cfg(test)] mod tests { use std::sync::Arc; + use bigint::hash::H520; use util::*; use tests::helpers::*; use spec::Spec; diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index cb13c984a..2f48150ab 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -54,7 +54,11 @@ use spec::CommonParams; use transaction::{UnverifiedTransaction, SignedTransaction}; use ethkey::Signature; +use bigint::prelude::U256; +use bigint::hash::H256; +use semantic_version::SemanticVersion; use util::*; +use unexpected::{Mismatch, OutOfBounds}; /// Default EIP-210 contrat code. /// As defined in https://github.com/ethereum/EIPs/pull/210/commits/9df24a3714af42e3bf350265bdc75b486c909d7f#diff-e02a92c2fb96c1a1bfb05e4c6e2ef5daR49 @@ -403,6 +407,8 @@ pub mod common { use state::Substate; use state::CleanupMode; + use bigint::prelude::U256; + use bigint::hash::H256; use util::*; use super::Engine; diff --git a/ethcore/src/engines/null_engine.rs b/ethcore/src/engines/null_engine.rs index a52342c59..a07952ee3 100644 --- a/ethcore/src/engines/null_engine.rs +++ b/ethcore/src/engines/null_engine.rs @@ -18,7 +18,7 @@ use std::collections::BTreeMap; use util::Address; use builtin::Builtin; use block::{ExecutedBlock, IsBlock}; -use util::U256; +use bigint::prelude::U256; use engines::Engine; use spec::CommonParams; use evm::Schedule; diff --git a/ethcore/src/engines/signer.rs b/ethcore/src/engines/signer.rs index 4069488ab..ae673cb84 100644 --- a/ethcore/src/engines/signer.rs +++ b/ethcore/src/engines/signer.rs @@ -17,7 +17,8 @@ //! A signer used by Engines which need to sign messages. use std::sync::Arc; -use util::{H256, Address}; +use bigint::hash::H256; +use util::Address; use ethkey::Signature; use account_provider::{self, AccountProvider}; diff --git a/ethcore/src/engines/tendermint/message.rs b/ethcore/src/engines/tendermint/message.rs index 68bdcb0f7..baaf94601 100644 --- a/ethcore/src/engines/tendermint/message.rs +++ b/ethcore/src/engines/tendermint/message.rs @@ -17,6 +17,8 @@ //! Tendermint message handling. use std::cmp; +use hash::keccak; +use bigint::hash::{H256, H520}; use util::*; use super::{Height, View, BlockHash, Step}; use error::Error; @@ -99,7 +101,7 @@ impl ConsensusMessage { pub fn verify(&self) -> Result { let full_rlp = ::rlp::encode(self); let block_info = Rlp::new(&full_rlp).at(1); - let public_key = recover(&self.signature.into(), &block_info.as_raw().sha3())?; + let public_key = recover(&self.signature.into(), &keccak(block_info.as_raw()))?; Ok(public_to_address(&public_key)) } } @@ -194,13 +196,13 @@ pub fn message_full_rlp(signature: &H520, vote_info: &Bytes) -> Bytes { } pub fn message_hash(vote_step: VoteStep, block_hash: H256) -> H256 { - message_info_rlp(&vote_step, Some(block_hash)).sha3() + keccak(message_info_rlp(&vote_step, Some(block_hash))) } #[cfg(test)] mod tests { use std::sync::Arc; - use util::*; + use hash::keccak; use rlp::*; use account_provider::AccountProvider; use header::Header; @@ -228,7 +230,7 @@ mod tests { view: 123, step: Step::Precommit, }, - block_hash: Some("1".sha3()) + block_hash: Some(keccak("1")), }; let raw_rlp = ::rlp::encode(&message).into_vec(); let rlp = Rlp::new(&raw_rlp); @@ -251,12 +253,12 @@ mod tests { #[test] fn generate_and_verify() { let tap = Arc::new(AccountProvider::transient_provider()); - let addr = tap.insert_account("0".sha3().into(), "0").unwrap(); + let addr = tap.insert_account(keccak("0").into(), "0").unwrap(); tap.unlock_account_permanently(addr, "0".into()).unwrap(); let mi = message_info_rlp(&VoteStep::new(123, 2, Step::Precommit), Some(H256::default())); - let raw_rlp = message_full_rlp(&tap.sign(addr, None, mi.sha3()).unwrap().into(), &mi); + let raw_rlp = message_full_rlp(&tap.sign(addr, None, keccak(&mi)).unwrap().into(), &mi); let rlp = UntrustedRlp::new(&raw_rlp); let message: ConsensusMessage = rlp.as_val().unwrap(); diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs index ac5ccd7b2..ee447d8da 100644 --- a/ethcore/src/engines/tendermint/mod.rs +++ b/ethcore/src/engines/tendermint/mod.rs @@ -28,8 +28,13 @@ mod params; use std::sync::{Weak, Arc}; use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; use std::collections::{HashSet, BTreeMap, HashMap}; +use hash::keccak; use std::cmp; +use bigint::prelude::{U128, U256}; +use bigint::hash::{H256, H520}; +use parking_lot::RwLock; use util::*; +use unexpected::{OutOfBounds, Mismatch}; use client::{Client, EngineClient}; use error::{Error, BlockError}; use header::{Header, BlockNumber}; @@ -47,6 +52,7 @@ use super::transition::TransitionHandler; use super::vote_collector::VoteCollector; use self::message::*; use self::params::TendermintParams; +use semantic_version::SemanticVersion; #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)] pub enum Step { @@ -213,7 +219,7 @@ impl Tendermint { let r = self.view.load(AtomicOrdering::SeqCst); let s = *self.step.read(); let vote_info = message_info_rlp(&VoteStep::new(h, r, s), block_hash); - match (self.signer.read().address(), self.sign(vote_info.sha3()).map(Into::into)) { + match (self.signer.read().address(), self.sign(keccak(&vote_info)).map(Into::into)) { (Some(validator), Ok(signature)) => { let message_rlp = message_full_rlp(&signature, &vote_info); let message = ConsensusMessage::new(signature, h, r, s, block_hash); @@ -498,7 +504,7 @@ impl Engine for Tendermint { let view = self.view.load(AtomicOrdering::SeqCst); let bh = Some(header.bare_hash()); let vote_info = message_info_rlp(&VoteStep::new(height, view, Step::Propose), bh.clone()); - if let Ok(signature) = self.sign(vote_info.sha3()).map(Into::into) { + if let Ok(signature) = self.sign(keccak(&vote_info)).map(Into::into) { // Insert Propose vote. debug!(target: "engine", "Submitting proposal {} at height {} view {}.", header.bare_hash(), height, view); self.votes.vote(ConsensusMessage::new(signature, height, view, Step::Propose, bh), author); @@ -522,7 +528,7 @@ impl Engine for Tendermint { let rlp = UntrustedRlp::new(rlp); let message: ConsensusMessage = rlp.as_val()?; if !self.votes.is_old_or_known(&message) { - let sender = public_to_address(&recover(&message.signature.into(), &rlp.at(1)?.as_raw().sha3())?); + let sender = public_to_address(&recover(&message.signature.into(), &keccak(rlp.at(1)?.as_raw()))?); if !self.is_authority(&sender) { return Err(EngineError::NotAuthorized(sender).into()); } @@ -798,7 +804,7 @@ mod tests { fn vote(engine: &Engine, signer: F, height: usize, view: usize, step: Step, block_hash: Option) -> Bytes where F: FnOnce(H256) -> Result { let mi = message_info_rlp(&VoteStep::new(height, view, step), block_hash); - let m = message_full_rlp(&signer(mi.sha3()).unwrap().into(), &mi); + let m = message_full_rlp(&signer(keccak(&mi)).unwrap().into(), &mi); engine.handle_message(&m).unwrap(); m } @@ -806,7 +812,7 @@ mod tests { fn proposal_seal(tap: &Arc, header: &Header, view: View) -> Vec { let author = header.author(); let vote_info = message_info_rlp(&VoteStep::new(header.number() as Height, view, Step::Propose), Some(header.bare_hash())); - let signature = tap.sign(*author, None, vote_info.sha3()).unwrap(); + let signature = tap.sign(*author, None, keccak(vote_info)).unwrap(); vec![ ::rlp::encode(&view).into_vec(), ::rlp::encode(&H520::from(signature)).into_vec(), @@ -815,7 +821,7 @@ mod tests { } fn insert_and_unlock(tap: &Arc, acc: &str) -> Address { - let addr = tap.insert_account(acc.sha3().into(), acc).unwrap(); + let addr = tap.insert_account(keccak(acc).into(), acc).unwrap(); tap.unlock_account_permanently(addr, acc.into()).unwrap(); addr } @@ -922,7 +928,7 @@ mod tests { let mut seal = proposal_seal(&tap, &header, 0); let vote_info = message_info_rlp(&VoteStep::new(2, 0, Step::Precommit), Some(header.bare_hash())); - let signature1 = tap.sign(proposer, None, vote_info.sha3()).unwrap(); + let signature1 = tap.sign(proposer, None, keccak(&vote_info)).unwrap(); seal[1] = ::rlp::NULL_RLP.to_vec(); seal[2] = ::rlp::encode_list(&vec![H520::from(signature1.clone())]).into_vec(); @@ -935,7 +941,7 @@ mod tests { } let voter = insert_and_unlock(&tap, "0"); - let signature0 = tap.sign(voter, None, vote_info.sha3()).unwrap(); + let signature0 = tap.sign(voter, None, keccak(&vote_info)).unwrap(); seal[2] = ::rlp::encode_list(&vec![H520::from(signature1.clone()), H520::from(signature0.clone())]).into_vec(); header.set_seal(seal.clone()); @@ -943,7 +949,7 @@ mod tests { assert!(engine.verify_block_family(&header, &parent_header, None).is_ok()); let bad_voter = insert_and_unlock(&tap, "101"); - let bad_signature = tap.sign(bad_voter, None, vote_info.sha3()).unwrap(); + let bad_signature = tap.sign(bad_voter, None, keccak(vote_info)).unwrap(); seal[2] = ::rlp::encode_list(&vec![H520::from(signature1), H520::from(bad_signature)]).into_vec(); header.set_seal(seal); @@ -1076,10 +1082,10 @@ mod tests { let mut seal = proposal_seal(&tap, &header, 0); let vote_info = message_info_rlp(&VoteStep::new(2, 0, Step::Precommit), Some(header.bare_hash())); - let signature1 = tap.sign(proposer, None, vote_info.sha3()).unwrap(); + let signature1 = tap.sign(proposer, None, keccak(&vote_info)).unwrap(); let voter = insert_and_unlock(&tap, "0"); - let signature0 = tap.sign(voter, None, vote_info.sha3()).unwrap(); + let signature0 = tap.sign(voter, None, keccak(&vote_info)).unwrap(); seal[1] = ::rlp::NULL_RLP.to_vec(); seal[2] = ::rlp::encode_list(&vec![H520::from(signature1.clone())]).into_vec(); @@ -1116,7 +1122,7 @@ mod tests { assert!(epoch_verifier.verify_light(&header).is_ok()); let bad_voter = insert_and_unlock(&tap, "101"); - let bad_signature = tap.sign(bad_voter, None, vote_info.sha3()).unwrap(); + let bad_signature = tap.sign(bad_voter, None, keccak(&vote_info)).unwrap(); seal[2] = ::rlp::encode_list(&vec![H520::from(signature1), H520::from(bad_signature)]).into_vec(); header.set_seal(seal); diff --git a/ethcore/src/engines/validator_set/contract.rs b/ethcore/src/engines/validator_set/contract.rs index 7c1890379..c84c6e448 100644 --- a/ethcore/src/engines/validator_set/contract.rs +++ b/ethcore/src/engines/validator_set/contract.rs @@ -18,6 +18,8 @@ /// It can also report validators for misbehaviour with two levels: `reportMalicious` and `reportBenign`. use std::sync::Weak; +use bigint::hash::H256; +use parking_lot::RwLock; use util::*; use futures::Future; @@ -128,6 +130,8 @@ impl ValidatorSet for ValidatorContract { mod tests { use std::sync::Arc; use rustc_hex::FromHex; + use hash::keccak; + use bigint::hash::H520; use util::*; use rlp::encode; use spec::Spec; @@ -153,7 +157,7 @@ mod tests { #[test] fn reports_validators() { let tap = Arc::new(AccountProvider::transient_provider()); - let v1 = tap.insert_account("1".sha3().into(), "").unwrap(); + let v1 = tap.insert_account(keccak("1").into(), "").unwrap(); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_contract, Some(tap.clone())); client.engine().register_client(Arc::downgrade(&client)); let validator_contract = "0000000000000000000000000000000000000005".parse::
().unwrap(); diff --git a/ethcore/src/engines/validator_set/mod.rs b/ethcore/src/engines/validator_set/mod.rs index 56cff365f..d60518c45 100644 --- a/ethcore/src/engines/validator_set/mod.rs +++ b/ethcore/src/engines/validator_set/mod.rs @@ -25,7 +25,8 @@ mod multi; use std::sync::Weak; use ids::BlockId; -use util::{Bytes, Address, H256}; +use bigint::hash::H256; +use util::{Bytes, Address}; use ethjson::spec::ValidatorSet as ValidatorSpec; use client::Client; use header::{Header, BlockNumber}; diff --git a/ethcore/src/engines/validator_set/multi.rs b/ethcore/src/engines/validator_set/multi.rs index 9acf6050b..c115d1596 100644 --- a/ethcore/src/engines/validator_set/multi.rs +++ b/ethcore/src/engines/validator_set/multi.rs @@ -19,7 +19,9 @@ use std::collections::BTreeMap; use std::sync::Weak; use engines::{Call, Engine}; -use util::{Bytes, H256, Address, RwLock}; +use bigint::hash::H256; +use parking_lot::RwLock; +use util::{Bytes, Address}; use ids::BlockId; use header::{BlockNumber, Header}; use client::{Client, BlockChainClient}; @@ -144,6 +146,7 @@ impl ValidatorSet for Multi { mod tests { use std::sync::Arc; use std::collections::BTreeMap; + use hash::keccak; use account_provider::AccountProvider; use client::{BlockChainClient, EngineClient}; use engines::EpochChange; @@ -163,9 +166,9 @@ mod tests { let _ = ::env_logger::init(); let tap = Arc::new(AccountProvider::transient_provider()); - let s0: Secret = "0".sha3().into(); + let s0: Secret = keccak("0").into(); let v0 = tap.insert_account(s0.clone(), "").unwrap(); - let v1 = tap.insert_account("1".sha3().into(), "").unwrap(); + let v1 = tap.insert_account(keccak("1").into(), "").unwrap(); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_multi, Some(tap)); client.engine().register_client(Arc::downgrade(&client)); diff --git a/ethcore/src/engines/validator_set/safe_contract.rs b/ethcore/src/engines/validator_set/safe_contract.rs index 1abcf2c8f..6d5f89182 100644 --- a/ethcore/src/engines/validator_set/safe_contract.rs +++ b/ethcore/src/engines/validator_set/safe_contract.rs @@ -19,9 +19,14 @@ use std::sync::{Weak, Arc}; use futures::Future; use native_contracts::ValidatorSet as Provider; +use hash::keccak; +use bigint::prelude::U256; +use bigint::hash::{H160, H256}; +use parking_lot::RwLock; use util::*; use util::cache::MemoryLruCache; +use unexpected::Mismatch; use rlp::{UntrustedRlp, RlpStream}; use basic_types::LogBloom; @@ -41,7 +46,7 @@ const MEMOIZE_CAPACITY: usize = 500; const EVENT_NAME: &'static [u8] = &*b"InitiateChange(bytes32,address[])"; lazy_static! { - static ref EVENT_NAME_HASH: H256 = EVENT_NAME.sha3(); + static ref EVENT_NAME_HASH: H256 = keccak(EVENT_NAME); } /// The validator contract should have the following interface: @@ -351,7 +356,7 @@ impl ValidatorSet for ValidatorSafeContract { // ensure receipts match header. // TODO: optimize? these were just decoded. - let found_root = ::util::triehash::ordered_trie_root( + let found_root = ::triehash::ordered_trie_root( receipts.iter().map(::rlp::encode).map(|x| x.to_vec()) ); if found_root != *old_header.receipts_root() { @@ -424,6 +429,7 @@ impl ValidatorSet for ValidatorSafeContract { mod tests { use std::sync::Arc; use rustc_hex::FromHex; + use hash::keccak; use util::*; use types::ids::BlockId; use spec::Spec; @@ -449,9 +455,9 @@ mod tests { #[test] fn knows_validators() { let tap = Arc::new(AccountProvider::transient_provider()); - let s0: Secret = "1".sha3().into(); + let s0: Secret = keccak("1").into(); let v0 = tap.insert_account(s0.clone(), "").unwrap(); - let v1 = tap.insert_account("0".sha3().into(), "").unwrap(); + let v1 = tap.insert_account(keccak("0").into(), "").unwrap(); let chain_id = Spec::new_validator_safe_contract().chain_id(); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, Some(tap)); client.engine().register_client(Arc::downgrade(&client)); diff --git a/ethcore/src/engines/validator_set/simple_list.rs b/ethcore/src/engines/validator_set/simple_list.rs index 40cbe4e93..eeeb4cb80 100644 --- a/ethcore/src/engines/validator_set/simple_list.rs +++ b/ethcore/src/engines/validator_set/simple_list.rs @@ -17,7 +17,8 @@ /// Preconfigured validator list. use heapsize::HeapSizeOf; -use util::{H256, Address}; +use bigint::hash::H256; +use util::Address; use engines::{Call, Engine}; use header::{BlockNumber, Header}; diff --git a/ethcore/src/engines/validator_set/test.rs b/ethcore/src/engines/validator_set/test.rs index 92472d743..da89880ca 100644 --- a/ethcore/src/engines/validator_set/test.rs +++ b/ethcore/src/engines/validator_set/test.rs @@ -20,7 +20,8 @@ use std::str::FromStr; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; use heapsize::HeapSizeOf; -use util::{Bytes, H256, Address}; +use bigint::hash::H256; +use util::{Bytes, Address}; use engines::{Call, Engine}; use header::{Header, BlockNumber}; diff --git a/ethcore/src/engines/vote_collector.rs b/ethcore/src/engines/vote_collector.rs index b934fdb2e..f759cbaf8 100644 --- a/ethcore/src/engines/vote_collector.rs +++ b/ethcore/src/engines/vote_collector.rs @@ -19,6 +19,8 @@ use std::fmt::Debug; use std::collections::{BTreeMap, HashSet, HashMap}; use std::hash::Hash; +use bigint::hash::{H256, H520}; +use parking_lot:: RwLock; use util::*; use rlp::{Encodable, RlpStream}; @@ -206,6 +208,8 @@ impl VoteCollector { #[cfg(test)] mod tests { + use hash::keccak; + use bigint::hash::H160; use util::*; use rlp::*; use super::*; @@ -251,7 +255,7 @@ mod tests { #[test] fn seal_retrieval() { let collector = VoteCollector::default(); - let bh = Some("1".sha3()); + let bh = Some(keccak("1")); let mut signatures = Vec::new(); for _ in 0..5 { signatures.push(H520::random()); @@ -263,9 +267,9 @@ mod tests { // Good proposal random_vote(&collector, signatures[0].clone(), propose_round.clone(), bh.clone()); // Wrong block proposal. - random_vote(&collector, signatures[0].clone(), propose_round.clone(), Some("0".sha3())); + random_vote(&collector, signatures[0].clone(), propose_round.clone(), Some(keccak("0"))); // Wrong block commit. - random_vote(&collector, signatures[3].clone(), commit_round.clone(), Some("0".sha3())); + random_vote(&collector, signatures[3].clone(), commit_round.clone(), Some(keccak("0"))); // Wrong round. random_vote(&collector, signatures[0].clone(), 6, bh.clone()); // Wrong round. @@ -291,22 +295,22 @@ mod tests { let round1 = 1; let round3 = 3; // good 1 - random_vote(&collector, H520::random(), round1, Some("0".sha3())); - random_vote(&collector, H520::random(), 0, Some("0".sha3())); + random_vote(&collector, H520::random(), round1, Some(keccak("0"))); + random_vote(&collector, H520::random(), 0, Some(keccak("0"))); // good 3 - random_vote(&collector, H520::random(), round3, Some("0".sha3())); - random_vote(&collector, H520::random(), 2, Some("0".sha3())); + random_vote(&collector, H520::random(), round3, Some(keccak("0"))); + random_vote(&collector, H520::random(), 2, Some(keccak("0"))); // good prevote - random_vote(&collector, H520::random(), round1, Some("1".sha3())); + random_vote(&collector, H520::random(), round1, Some(keccak("1"))); // good prevote let same_sig = H520::random(); - random_vote(&collector, same_sig.clone(), round1, Some("1".sha3())); - random_vote(&collector, same_sig, round1, Some("1".sha3())); + random_vote(&collector, same_sig.clone(), round1, Some(keccak("1"))); + random_vote(&collector, same_sig, round1, Some(keccak("1"))); // good precommit - random_vote(&collector, H520::random(), round3, Some("1".sha3())); + random_vote(&collector, H520::random(), round3, Some(keccak("1"))); // good prevote - random_vote(&collector, H520::random(), round1, Some("0".sha3())); - random_vote(&collector, H520::random(), 4, Some("2".sha3())); + random_vote(&collector, H520::random(), round1, Some(keccak("0"))); + random_vote(&collector, H520::random(), 4, Some(keccak("2"))); assert_eq!(collector.count_round_votes(&round1), 4); assert_eq!(collector.count_round_votes(&round3), 2); @@ -314,7 +318,7 @@ mod tests { let message = TestMessage { signature: H520::default(), step: round1, - block_hash: Some("1".sha3()) + block_hash: Some(keccak("1")) }; assert_eq!(collector.count_aligned_votes(&message), 2); } @@ -325,11 +329,11 @@ mod tests { let vote = |round, hash| { random_vote(&collector, H520::random(), round, hash); }; - vote(6, Some("0".sha3())); - vote(3, Some("0".sha3())); - vote(7, Some("0".sha3())); - vote(8, Some("1".sha3())); - vote(1, Some("1".sha3())); + vote(6, Some(keccak("0"))); + vote(3, Some(keccak("0"))); + vote(7, Some(keccak("0"))); + vote(8, Some(keccak("1"))); + vote(1, Some(keccak("1"))); collector.throw_out_old(&7); assert_eq!(collector.len(), 2); @@ -340,9 +344,9 @@ mod tests { let collector = VoteCollector::default(); let round = 3; // Vote is inserted fine. - assert!(full_vote(&collector, H520::random(), round, Some("0".sha3()), &Address::default())); + assert!(full_vote(&collector, H520::random(), round, Some(keccak("0")), &Address::default())); // Returns the double voting address. - assert!(!full_vote(&collector, H520::random(), round, Some("1".sha3()), &Address::default())); + assert!(!full_vote(&collector, H520::random(), round, Some(keccak("1")), &Address::default())); assert_eq!(collector.count_round_votes(&round), 1); } } diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index 172323c0d..8b0554e5a 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -17,7 +17,10 @@ //! General error types for use in ethcore. use std::fmt; +use bigint::prelude::U256; +use bigint::hash::H256; use util::*; +use unexpected::{Mismatch, OutOfBounds}; use io::*; use header::BlockNumber; use basic_types::LogBloom; @@ -80,6 +83,8 @@ pub enum TransactionError { CodeBanned, /// Invalid chain ID given. InvalidChainId, + /// Not enough permissions given by permission contract. + NotAllowed, } impl fmt::Display for TransactionError { @@ -104,6 +109,7 @@ impl fmt::Display for TransactionError { RecipientBanned => "Recipient is temporarily banned.".into(), CodeBanned => "Contract code is temporarily banned.".into(), InvalidChainId => "Transaction of this chain ID is not allowed on this chain.".into(), + NotAllowed => "Sender does not have permissions to execute this type of transction".into(), }; f.write_fmt(format_args!("Transaction error ({})", msg)) @@ -389,7 +395,7 @@ impl From for Error { impl From<::rlp::DecoderError> for Error { fn from(err: ::rlp::DecoderError) -> Error { - Error::Util(UtilError::Decoder(err)) + Error::Util(UtilError::from(err)) } } @@ -422,7 +428,7 @@ impl From for Error { match err { BlockImportError::Block(e) => Error::Block(e), BlockImportError::Import(e) => Error::Import(e), - BlockImportError::Other(s) => Error::Util(UtilError::SimpleString(s)), + BlockImportError::Other(s) => Error::Util(UtilError::from(s)), } } } diff --git a/ethcore/src/ethereum/denominations.rs b/ethcore/src/ethereum/denominations.rs index 789c803b9..4364b0296 100644 --- a/ethcore/src/ethereum/denominations.rs +++ b/ethcore/src/ethereum/denominations.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::*; +use bigint::prelude::U256; #[inline] /// 1 Ether in Wei diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index ef997f5a8..b11700e09 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -17,9 +17,13 @@ use std::path::Path; use std::cmp; use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; +use std::sync::{Arc, Weak}; +use hash::{KECCAK_EMPTY_LIST_RLP}; use ethash::{quick_get_difficulty, slow_get_seedhash, EthashManager}; +use bigint::prelude::U256; +use bigint::hash::{H256, H64}; use util::*; +use unexpected::{OutOfBounds, Mismatch}; use block::*; use builtin::Builtin; use vm::EnvInfo; @@ -28,12 +32,15 @@ use trace::{Tracer, ExecutiveTracer, RewardType}; use header::{Header, BlockNumber}; use state::CleanupMode; use spec::CommonParams; -use transaction::UnverifiedTransaction; +use transaction::{UnverifiedTransaction, SignedTransaction}; use engines::{self, Engine}; use evm::Schedule; use ethjson; use rlp::{self, UntrustedRlp}; use vm::LastHashes; +use semantic_version::SemanticVersion; +use tx_filter::{TransactionFilter}; +use client::{Client, BlockChainClient}; /// Parity tries to round block.gas_limit to multiple of this constant pub const PARITY_GAS_LIMIT_DETERMINANT: U256 = U256([37, 0, 0, 0]); @@ -139,6 +146,7 @@ pub struct Ethash { ethash_params: EthashParams, builtins: BTreeMap, pow: EthashManager, + tx_filter: Option, } impl Ethash { @@ -150,6 +158,7 @@ impl Ethash { builtins: BTreeMap, ) -> Arc { Arc::new(Ethash { + tx_filter: TransactionFilter::from_params(¶ms), params, ethash_params, builtins, @@ -435,6 +444,14 @@ impl Engine for Arc { Ok(()) } + fn verify_transaction(&self, t: UnverifiedTransaction, header: &Header) -> Result { + let signed = SignedTransaction::new(t)?; + if !self.tx_filter.as_ref().map_or(true, |filter| filter.transaction_allowed(header.parent_hash(), &signed)) { + return Err(From::from(TransactionError::NotAllowed)); + } + Ok(signed) + } + fn epoch_verifier<'a>(&self, _header: &Header, _proof: &'a [u8]) -> engines::ConstructedVerifier<'a> { engines::ConstructedVerifier::Trusted(Box::new(self.clone())) } @@ -442,6 +459,13 @@ impl Engine for Arc { fn snapshot_components(&self) -> Option> { Some(Box::new(::snapshot::PowSnapshot::new(SNAPSHOT_BLOCKS, MAX_SNAPSHOT_BLOCKS))) } + + fn register_client(&self, client: Weak) { + if let Some(ref filter) = self.tx_filter { + filter.register_client(client as Weak); + } + } + } // Try to round gas_limit a bit so that: @@ -481,7 +505,7 @@ impl Ethash { panic!("Can't calculate genesis block difficulty"); } - let parent_has_uncles = parent.uncles_hash() != &sha3::SHA3_EMPTY_LIST_RLP; + let parent_has_uncles = parent.uncles_hash() != &KECCAK_EMPTY_LIST_RLP; let min_difficulty = self.ethash_params.minimum_difficulty; @@ -586,6 +610,8 @@ mod tests { use std::str::FromStr; use std::collections::BTreeMap; use std::sync::Arc; + use bigint::prelude::U256; + use bigint::hash::{H64, H256}; use util::*; use block::*; use tests::helpers::*; diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index dee86883b..8cc9446a6 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -92,7 +92,7 @@ pub fn new_metropolis_test() -> Spec { load(None, include_bytes!("../../res/ethe #[cfg(test)] mod tests { - use util::*; + use bigint::prelude::U256; use state::*; use super::*; use tests::helpers::*; @@ -119,7 +119,7 @@ mod tests { assert_eq!(morden.state_root(), "f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9".into()); let genesis = morden.genesis_block(); - assert_eq!(BlockView::new(&genesis).header_view().sha3(), "0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303".into()); + assert_eq!(BlockView::new(&genesis).header_view().hash(), "0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303".into()); let _ = morden.engine; } @@ -130,7 +130,7 @@ mod tests { assert_eq!(frontier.state_root(), "d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544".into()); let genesis = frontier.genesis_block(); - assert_eq!(BlockView::new(&genesis).header_view().sha3(), "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3".into()); + assert_eq!(BlockView::new(&genesis).header_view().hash(), "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3".into()); let _ = frontier.engine; } diff --git a/ethcore/src/executed.rs b/ethcore/src/executed.rs index 3154903ca..909e1b6cb 100644 --- a/ethcore/src/executed.rs +++ b/ethcore/src/executed.rs @@ -16,7 +16,8 @@ //! Transaction execution format module. -use util::{Bytes, U256, Address, U512, trie}; +use bigint::prelude::{U256, U512}; +use util::{Bytes, Address, trie}; use vm; use trace::{VMTrace, FlatTrace}; use log_entry::LogEntry; diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 66a770243..c00046e0e 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -17,6 +17,9 @@ //! Transaction Execution environment. use std::cmp; use std::sync::Arc; +use hash::keccak; +use bigint::prelude::{U256, U512}; +use bigint::hash::H256; use util::*; use state::{Backend as StateBackend, State, Substate, CleanupMode}; use engines::Engine; @@ -47,20 +50,20 @@ pub fn contract_address(address_scheme: CreateContractAddress, sender: &Address, let mut stream = RlpStream::new_list(2); stream.append(sender); stream.append(nonce); - (From::from(stream.as_raw().sha3()), None) + (From::from(keccak(stream.as_raw())), None) }, CreateContractAddress::FromCodeHash => { - let code_hash = code.sha3(); + let code_hash = keccak(code); let mut buffer = [0xffu8; 20 + 32]; &mut buffer[20..].copy_from_slice(&code_hash[..]); - (From::from((&buffer[..]).sha3()), Some(code_hash)) + (From::from(keccak(&buffer[..])), Some(code_hash)) }, CreateContractAddress::FromSenderAndCodeHash => { - let code_hash = code.sha3(); + let code_hash = keccak(code); let mut buffer = [0u8; 20 + 32]; &mut buffer[..20].copy_from_slice(&sender[..]); &mut buffer[20..].copy_from_slice(&code_hash[..]); - (From::from((&buffer[..]).sha3()), Some(code_hash)) + (From::from(keccak(&buffer[..])), Some(code_hash)) }, } } @@ -74,6 +77,8 @@ pub struct TransactOptions { pub vm_tracer: V, /// Check transaction nonce before execution. pub check_nonce: bool, + /// Records the output from init contract calls. + pub output_from_init_contract: bool, } impl TransactOptions { @@ -83,6 +88,7 @@ impl TransactOptions { tracer, vm_tracer, check_nonce: true, + output_from_init_contract: false, } } @@ -91,6 +97,12 @@ impl TransactOptions { self.check_nonce = false; self } + + /// Saves the output from contract creation. + pub fn save_output_from_contract(mut self) -> Self { + self.output_from_init_contract = true; + self + } } impl TransactOptions { @@ -100,6 +112,7 @@ impl TransactOptions { tracer: trace::ExecutiveTracer::default(), vm_tracer: trace::ExecutiveVMTracer::toplevel(), check_nonce: true, + output_from_init_contract: false, } } } @@ -111,6 +124,7 @@ impl TransactOptions { tracer: trace::ExecutiveTracer::default(), vm_tracer: trace::NoopVMTracer, check_nonce: true, + output_from_init_contract: false, } } } @@ -122,6 +136,7 @@ impl TransactOptions { tracer: trace::NoopTracer, vm_tracer: trace::ExecutiveVMTracer::toplevel(), check_nonce: true, + output_from_init_contract: false, } } } @@ -133,6 +148,7 @@ impl TransactOptions { tracer: trace::NoopTracer, vm_tracer: trace::NoopVMTracer, check_nonce: true, + output_from_init_contract: false, } } } @@ -201,7 +217,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> { pub fn transact(&'a mut self, t: &SignedTransaction, options: TransactOptions) -> Result where T: Tracer, V: VMTracer, { - self.transact_with_tracer(t, options.check_nonce, options.tracer, options.vm_tracer) + self.transact_with_tracer(t, options.check_nonce, options.output_from_init_contract, options.tracer, options.vm_tracer) } /// Execute a transaction in a "virtual" context. @@ -226,6 +242,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> { &'a mut self, t: &SignedTransaction, check_nonce: bool, + output_from_create: bool, mut tracer: T, mut vm_tracer: V ) -> Result where T: Tracer, V: VMTracer { @@ -294,7 +311,8 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> { data: None, call_type: CallType::None, }; - (self.create(params, &mut substate, &mut tracer, &mut vm_tracer), vec![]) + let mut out = if output_from_create { Some(vec![]) } else { None }; + (self.create(params, &mut substate, &mut out, &mut tracer, &mut vm_tracer), out.unwrap_or_else(Vec::new)) }, Action::Call(ref address) => { let params = ActionParams { @@ -487,6 +505,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> { &mut self, params: ActionParams, substate: &mut Substate, + output: &mut Option, tracer: &mut T, vm_tracer: &mut V, ) -> vm::Result<(U256, ReturnData)> where T: Tracer, V: VMTracer { @@ -528,7 +547,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> { let mut subvmtracer = vm_tracer.prepare_subtrace(params.code.as_ref().expect("two ways into create (Externalities::create and Executive::transact_with_tracer); both place `Some(...)` `code` in `params`; qed")); let res = { - self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract(trace_output.as_mut()), &mut subtracer, &mut subvmtracer) + self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract(output.as_mut().or(trace_output.as_mut())), &mut subtracer, &mut subvmtracer) }; vm_tracer.done_subtrace(subvmtracer); @@ -537,7 +556,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> { Ok(ref res) => tracer.trace_create( trace_info, gas - res.gas_left, - trace_output, + trace_output.map(|data| output.as_ref().map(|out| out.to_vec()).unwrap_or(data)), created, subtracer.drain() ), @@ -658,7 +677,9 @@ mod tests { use rustc_hex::FromHex; use ethkey::{Generator, Random}; use super::*; - use util::{H256, U256, U512, Address}; + use bigint::prelude::{U256, U512}; + use bigint::hash::H256; + use util::Address; use util::bytes::BytesRef; use vm::{ActionParams, ActionValue, CallType, EnvInfo, CreateContractAddress}; use evm::{Factory, VMType}; @@ -696,7 +717,7 @@ mod tests { let (gas_left, _) = { let mut ex = Executive::new(&mut state, &info, &engine); - ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() + ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap() }; assert_eq!(gas_left, U256::from(79_975)); @@ -754,7 +775,7 @@ mod tests { let (gas_left, _) = { let mut ex = Executive::new(&mut state, &info, &engine); - ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() + ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap() }; assert_eq!(gas_left, U256::from(62_976)); @@ -921,7 +942,7 @@ mod tests { let (gas_left, _) = { let mut ex = Executive::new(&mut state, &info, &engine); - ex.create(params.clone(), &mut substate, &mut tracer, &mut vm_tracer).unwrap() + ex.create(params.clone(), &mut substate, &mut None, &mut tracer, &mut vm_tracer).unwrap() }; assert_eq!(gas_left, U256::from(96_776)); @@ -1006,7 +1027,7 @@ mod tests { let (gas_left, _) = { let mut ex = Executive::new(&mut state, &info, &engine); - ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() + ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap() }; assert_eq!(gas_left, U256::from(62_976)); @@ -1057,7 +1078,7 @@ mod tests { { let mut ex = Executive::new(&mut state, &info, &engine); - ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap(); + ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap(); } assert_eq!(substate.contracts_created.len(), 1); @@ -1307,8 +1328,8 @@ mod tests { } } - evm_test!{test_sha3: test_sha3_jit, test_sha3_int} - fn test_sha3(factory: Factory) { + evm_test!{test_keccak: test_keccak_jit, test_keccak_int} + fn test_keccak(factory: Factory) { let code = "6064640fffffffff20600055".from_hex().unwrap(); let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); @@ -1330,7 +1351,7 @@ mod tests { let result = { let mut ex = Executive::new(&mut state, &info, &engine); - ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) + ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer) }; match result { diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index eae981f1b..5fc613844 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -17,6 +17,8 @@ //! Transaction Execution environment. use std::cmp; use std::sync::Arc; +use bigint::prelude::U256; +use bigint::hash::H256; use util::*; use state::{Backend as StateBackend, State, Substate, CleanupMode}; use engines::Engine; @@ -222,7 +224,7 @@ impl<'a, T: 'a, V: 'a, B: 'a, E: 'a> Ext for Externalities<'a, T, V, B, E> let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.depth, self.static_flag); // TODO: handle internal error separately - match ex.create(params, self.substate, self.tracer, self.vm_tracer) { + match ex.create(params, self.substate, &mut None, self.tracer, self.vm_tracer) { Ok((gas_left, _)) => { self.substate.contracts_created.push(address.clone()); ContractCreateResult::Created(address, gas_left) diff --git a/ethcore/src/header.rs b/ethcore/src/header.rs index 77cdd54af..aa94db036 100644 --- a/ethcore/src/header.rs +++ b/ethcore/src/header.rs @@ -18,7 +18,10 @@ use std::cmp; use std::cell::RefCell; +use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP, keccak}; use heapsize::HeapSizeOf; +use bigint::prelude::U256; +use bigint::hash::H256; use util::*; use basic_types::{LogBloom, ZERO_LOGBLOOM}; use time::get_time; @@ -101,12 +104,12 @@ impl Default for Header { number: 0, author: Address::default(), - transactions_root: SHA3_NULL_RLP, - uncles_hash: SHA3_EMPTY_LIST_RLP, + transactions_root: KECCAK_NULL_RLP, + uncles_hash: KECCAK_EMPTY_LIST_RLP, extra_data: vec![], - state_root: SHA3_NULL_RLP, - receipts_root: SHA3_NULL_RLP, + state_root: KECCAK_NULL_RLP, + receipts_root: KECCAK_NULL_RLP, log_bloom: ZERO_LOGBLOOM.clone(), gas_used: U256::default(), gas_limit: U256::default(), @@ -195,13 +198,13 @@ impl Header { /// Set the seal field of the header. pub fn set_seal(&mut self, a: Vec) { self.seal = a; self.note_dirty(); } - /// Get the hash of this header (sha3 of the RLP). + /// Get the hash of this header (keccak of the RLP). pub fn hash(&self) -> H256 { let mut hash = self.hash.borrow_mut(); match &mut *hash { &mut Some(ref h) => h.clone(), hash @ &mut None => { - let h = self.rlp_sha3(Seal::With); + let h = self.rlp_keccak(Seal::With); *hash = Some(h.clone()); h } @@ -214,7 +217,7 @@ impl Header { match &mut *hash { &mut Some(ref h) => h.clone(), hash @ &mut None => { - let h = self.rlp_sha3(Seal::Without); + let h = self.rlp_keccak(Seal::Without); *hash = Some(h.clone()); h } @@ -258,8 +261,8 @@ impl Header { s.out() } - /// Get the SHA3 (Keccak) of this header, optionally `with_seal`. - pub fn rlp_sha3(&self, with_seal: Seal) -> H256 { self.rlp(with_seal).sha3() } + /// Get the KECCAK (Keccak) of this header, optionally `with_seal`. + pub fn rlp_keccak(&self, with_seal: Seal) -> H256 { keccak(self.rlp(with_seal)) } } impl Decodable for Header { @@ -279,7 +282,7 @@ impl Decodable for Header { timestamp: cmp::min(r.val_at::(11)?, u64::max_value().into()).as_u64(), extra_data: r.val_at(12)?, seal: vec![], - hash: RefCell::new(Some(r.as_raw().sha3())), + hash: RefCell::new(Some(keccak(r.as_raw()))), bare_hash: RefCell::new(None), }; diff --git a/ethcore/src/json_tests/test_common.rs b/ethcore/src/json_tests/test_common.rs index fa1078776..33564cd87 100644 --- a/ethcore/src/json_tests/test_common.rs +++ b/ethcore/src/json_tests/test_common.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +pub use bigint::prelude::U256; +pub use bigint::hash::H256; pub use util::*; use std::collections::HashSet; use std::io::Read; diff --git a/ethcore/src/json_tests/trie.rs b/ethcore/src/json_tests/trie.rs index 0b1fd949a..5d2696893 100644 --- a/ethcore/src/json_tests/trie.rs +++ b/ethcore/src/json_tests/trie.rs @@ -16,7 +16,7 @@ use ethjson; use util::trie::{TrieFactory, TrieSpec}; -use util::hash::H256; +use bigint::hash::H256; use util::memorydb::MemoryDB; fn test_trie(json: &[u8], trie: TrieSpec) -> Vec { diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index ddeb5c73e..2f2329d64 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -85,6 +85,7 @@ extern crate ethcore_bloom_journal as bloom_journal; extern crate ethcore_devtools as devtools; extern crate ethcore_io as io; extern crate ethcore_ipc_nano as nanoipc; +extern crate ethcore_bigint as bigint; extern crate ethcore_logger; extern crate ethcore_stratum; extern crate ethjson; @@ -98,10 +99,16 @@ extern crate lru_cache; extern crate native_contracts; extern crate num_cpus; extern crate num; +extern crate parking_lot; extern crate price_info; extern crate rand; extern crate rlp; +extern crate hash; extern crate heapsize; +extern crate triehash; +extern crate ansi_term; +extern crate semantic_version; +extern crate unexpected; #[macro_use] extern crate rlp_derive; @@ -149,6 +156,7 @@ pub mod service; pub mod snapshot; pub mod spec; pub mod state; +pub mod timer; pub mod trace; pub mod transaction; pub mod verification; @@ -165,6 +173,7 @@ mod executive; mod externalities; mod blockchain; mod factory; +mod tx_filter; #[cfg(test)] mod tests; diff --git a/ethcore/src/migrations/state/v7.rs b/ethcore/src/migrations/state/v7.rs index 9a9a65d7b..64fa5b272 100644 --- a/ethcore/src/migrations/state/v7.rs +++ b/ethcore/src/migrations/state/v7.rs @@ -19,18 +19,18 @@ use std::collections::HashMap; -use util::Bytes; -use util::{Address, H256}; +use bigint::hash::H256; +use util::{Address, Bytes}; use util::kvdb::Database; use util::migration::{Batch, Config, Error, Migration, SimpleMigration, Progress}; -use util::sha3::Hashable; +use hash::keccak; use std::sync::Arc; use rlp::{decode, Rlp, RlpStream}; // attempt to migrate a key, value pair. None if migration not possible. fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Option { - let val_hash = val.sha3(); + let val_hash = keccak(val); if key_h != val_hash { // this is a key which has been xor'd with an address. @@ -43,7 +43,7 @@ fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Option { return None; } - let address_hash = Address::from(address).sha3(); + let address_hash = keccak(Address::from(address)); // create the xor'd key in place. key_h.copy_from_slice(&*val_hash); diff --git a/ethcore/src/migrations/v10.rs b/ethcore/src/migrations/v10.rs index 7982279cf..dd6968bbb 100644 --- a/ethcore/src/migrations/v10.rs +++ b/ethcore/src/migrations/v10.rs @@ -24,7 +24,8 @@ use views::HeaderView; use bloom_journal::Bloom; use util::migration::{Error, Migration, Progress, Batch, Config}; use util::journaldb; -use util::{H256, Trie}; +use bigint::hash::H256; +use util::Trie; use util::{Database, DBTransaction}; /// Account bloom upgrade routine. If bloom already present, does nothing. diff --git a/ethcore/src/miner/banning_queue.rs b/ethcore/src/miner/banning_queue.rs index 46d7e7cf8..a446da29e 100644 --- a/ethcore/src/miner/banning_queue.rs +++ b/ethcore/src/miner/banning_queue.rs @@ -24,7 +24,10 @@ use transient_hashmap::TransientHashMap; use miner::{TransactionQueue, TransactionQueueDetailsProvider, TransactionImportResult, TransactionOrigin}; use miner::transaction_queue::QueuingInstant; use error::{Error, TransactionError}; -use util::{U256, H256, Address, Hashable}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::Address; +use hash::keccak; type Count = u16; @@ -103,7 +106,7 @@ impl BanningTransactionQueue { // Check code if let Action::Create = transaction.action { - let code_hash = transaction.data.sha3(); + let code_hash = keccak(&transaction.data); let count = self.codes_bans.direct().get(&code_hash).cloned().unwrap_or(0); if count > threshold { debug!(target: "txqueue", "Ignoring transaction {:?} because code is banned.", transaction.hash()); @@ -131,7 +134,7 @@ impl BanningTransactionQueue { self.ban_recipient(recipient) }, Action::Create => { - self.ban_codehash(transaction.data.sha3()) + self.ban_codehash(keccak(&transaction.data)) }, }; sender_banned || recipient_or_code_banned @@ -210,13 +213,15 @@ impl DerefMut for BanningTransactionQueue { mod tests { use std::time::Duration; use rustc_hex::FromHex; + use hash::keccak; use super::{BanningTransactionQueue, Threshold}; use ethkey::{Random, Generator}; use transaction::{Transaction, SignedTransaction, Action}; use error::{Error, TransactionError}; use client::TransactionImportResult; use miner::{TransactionQueue, TransactionOrigin}; - use util::{U256, Address, Hashable}; + use bigint::prelude::U256; + use util::Address; use miner::transaction_queue::test::DummyTransactionDetailsProvider; fn queue() -> BanningTransactionQueue { @@ -310,7 +315,7 @@ mod tests { fn should_not_accept_transactions_with_banned_code() { // given let tx = transaction(Action::Create); - let codehash = tx.data.sha3(); + let codehash = keccak(&tx.data); let mut txq = queue(); // Banlist once (threshold not reached) let banlist1 = txq.ban_codehash(codehash); diff --git a/ethcore/src/miner/external.rs b/ethcore/src/miner/external.rs index 65f8ee8cc..3ca7a9cc0 100644 --- a/ethcore/src/miner/external.rs +++ b/ethcore/src/miner/external.rs @@ -17,7 +17,9 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::{Instant, Duration}; -use util::{Mutex, U256, H256}; +use bigint::prelude::U256; +use bigint::hash::H256; +use parking_lot::Mutex; /// External miner interface. pub trait ExternalMinerService: Send + Sync { @@ -70,7 +72,8 @@ mod tests { use super::*; use std::thread::sleep; use std::time::Duration; - use util::{H256, U256}; + use bigint::prelude::U256; + use bigint::hash::H256; fn miner() -> ExternalMiner { ExternalMiner::default() diff --git a/ethcore/src/miner/local_transactions.rs b/ethcore/src/miner/local_transactions.rs index 59137c3f4..12e14294a 100644 --- a/ethcore/src/miner/local_transactions.rs +++ b/ethcore/src/miner/local_transactions.rs @@ -19,7 +19,8 @@ use linked_hash_map::LinkedHashMap; use transaction::{SignedTransaction, PendingTransaction}; use error::TransactionError; -use util::{U256, H256}; +use bigint::prelude::U256; +use bigint::hash::H256; /// Status of local transaction. /// Can indicate that the transaction is currently part of the queue (`Pending/Future`) @@ -152,7 +153,7 @@ impl LocalTransactionsList { #[cfg(test)] mod tests { - use util::U256; + use bigint::prelude::U256; use ethkey::{Random, Generator}; use transaction::{Action, Transaction, SignedTransaction}; use super::{LocalTransactionsList, Status}; diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 0639c645f..b28288570 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -18,7 +18,11 @@ use std::time::{Instant, Duration}; use std::collections::{BTreeMap, HashSet}; use std::sync::Arc; +use bigint::prelude::U256; +use bigint::hash::H256; +use parking_lot::{Mutex, RwLock}; use util::*; +use timer::PerfTimer; use using_queue::{UsingQueue, GetAction}; use account_provider::{AccountProvider, SignError as AccountError}; use state::State; @@ -40,6 +44,7 @@ use miner::service_transaction_checker::ServiceTransactionChecker; use price_info::{Client as PriceInfoClient, PriceInfo}; use price_info::fetch::Client as FetchClient; use header::{Header, BlockNumber}; +use ansi_term::Colour; /// Different possible definitions for pending transaction set. #[derive(Debug, PartialEq)] @@ -1235,10 +1240,11 @@ mod tests { use std::sync::Arc; use std::time::Duration; use rustc_hex::FromHex; + use hash::keccak; use super::super::{MinerService, PrioritizationStrategy}; use super::*; use block::IsBlock; - use util::U256; + use bigint::prelude::U256; use ethkey::{Generator, Random}; use client::{BlockChainClient, TestBlockChainClient, EachBlockWith, TransactionImportResult}; use header::BlockNumber; @@ -1418,7 +1424,7 @@ mod tests { fn should_fail_setting_engine_signer_on_pow() { let spec = Spec::new_pow_test_spec; let tap = Arc::new(AccountProvider::transient_provider()); - let addr = tap.insert_account("1".sha3().into(), "").unwrap(); + let addr = tap.insert_account(keccak("1").into(), "").unwrap(); let client = generate_dummy_client_with_spec_and_accounts(spec, Some(tap.clone())); assert!(match client.miner().set_engine_signer(addr, "".into()) { Err(AccountError::InappropriateChain) => true, _ => false }) } @@ -1427,7 +1433,7 @@ mod tests { fn should_fail_setting_engine_signer_without_account_provider() { let spec = Spec::new_instant; let tap = Arc::new(AccountProvider::transient_provider()); - let addr = tap.insert_account("1".sha3().into(), "").unwrap(); + let addr = tap.insert_account(keccak("1").into(), "").unwrap(); let client = generate_dummy_client_with_spec_and_accounts(spec, None); assert!(match client.miner().set_engine_signer(addr, "".into()) { Err(AccountError::NotFound) => true, _ => false }); } diff --git a/ethcore/src/miner/mod.rs b/ethcore/src/miner/mod.rs index b4cb065fd..b9bf7859e 100644 --- a/ethcore/src/miner/mod.rs +++ b/ethcore/src/miner/mod.rs @@ -61,7 +61,9 @@ pub use self::work_notify::NotifyWork; pub use self::stratum::{Stratum, Error as StratumError, Options as StratumOptions}; use std::collections::BTreeMap; -use util::{H256, U256, Address, Bytes}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::{Address, Bytes}; use client::{MiningBlockChainClient}; use block::ClosedBlock; use header::BlockNumber; diff --git a/ethcore/src/miner/service_transaction_checker.rs b/ethcore/src/miner/service_transaction_checker.rs index d21643772..2416a63d0 100644 --- a/ethcore/src/miner/service_transaction_checker.rs +++ b/ethcore/src/miner/service_transaction_checker.rs @@ -20,7 +20,8 @@ use types::ids::BlockId; use futures::{future, Future}; use native_contracts::ServiceTransactionChecker as Contract; -use util::{U256, Mutex}; +use bigint::prelude::U256; +use parking_lot::Mutex; const SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME: &'static str = "service_transaction_checker"; diff --git a/ethcore/src/miner/stratum.rs b/ethcore/src/miner/stratum.rs index 0031bb715..f1eeaf2b4 100644 --- a/ethcore/src/miner/stratum.rs +++ b/ethcore/src/miner/stratum.rs @@ -25,10 +25,11 @@ use std::sync::{Arc, Weak}; use std::net::{SocketAddr, AddrParseError}; use std::fmt; -use util::{H256, U256, H64, clean_0x}; +use bigint::prelude::U256; +use bigint::hash::{H64, H256, clean_0x}; use ethereum::ethash::Ethash; use ethash::SeedHashCompute; -use util::Mutex; +use parking_lot::Mutex; use miner::{self, Miner, MinerService}; use client::Client; use block::IsBlock; diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index 5b29a1607..0234af068 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -25,11 +25,13 @@ //! //! ```rust //! extern crate ethcore_util as util; +//! extern crate ethcore_bigint as bigint; //! extern crate ethcore; //! extern crate ethkey; //! extern crate rustc_hex; //! -//! use util::{U256, Address}; +//! use bigint::prelude::U256; +//! use util::Address; //! use ethkey::{Random, Generator}; //! use ethcore::miner::{TransactionQueue, RemovalReason, TransactionQueueDetailsProvider, AccountDetails, TransactionOrigin}; //! use ethcore::transaction::*; @@ -106,7 +108,9 @@ use std::cmp; use std::collections::{HashSet, HashMap, BTreeSet, BTreeMap}; use linked_hash_map::LinkedHashMap; use heapsize::HeapSizeOf; -use util::{Address, H256, U256}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::Address; use table::Table; use transaction::*; use error::{Error, TransactionError}; diff --git a/ethcore/src/miner/work_notify.rs b/ethcore/src/miner/work_notify.rs index ff330d30c..76bc0a371 100644 --- a/ethcore/src/miner/work_notify.rs +++ b/ethcore/src/miner/work_notify.rs @@ -24,7 +24,9 @@ use hyper::{Next}; use hyper::net::HttpStream; use ethash::SeedHashCompute; use hyper::Url; -use util::*; +use bigint::prelude::U256; +use bigint::hash::H256; +use parking_lot::Mutex; use ethereum::ethash::Ethash; /// Trait for notifying about new mining work diff --git a/ethcore/src/pod_account.rs b/ethcore/src/pod_account.rs index 4204b591d..8f21de298 100644 --- a/ethcore/src/pod_account.rs +++ b/ethcore/src/pod_account.rs @@ -17,6 +17,10 @@ use std::fmt; use std::collections::BTreeMap; use itertools::Itertools; +use hash::{keccak}; +use bigint::prelude::U256; +use bigint::hash::H256; +use triehash::sec_trie_root; use util::*; use state::Account; use ethjson; @@ -61,7 +65,7 @@ impl PodAccount { stream.append(&self.nonce); stream.append(&self.balance); stream.append(&sec_trie_root(self.storage.iter().map(|(k, v)| (k.to_vec(), rlp::encode(&U256::from(&**v)).to_vec())).collect())); - stream.append(&self.code.as_ref().unwrap_or(&vec![]).sha3()); + stream.append(&keccak(&self.code.as_ref().unwrap_or(&vec![]))); stream.out() } @@ -117,7 +121,7 @@ impl fmt::Display for PodAccount { self.balance, self.nonce, self.code.as_ref().map_or(0, |c| c.len()), - self.code.as_ref().map_or_else(H256::new, |c| c.sha3()), + self.code.as_ref().map_or_else(H256::new, |c| keccak(c)), self.storage.len(), ) } diff --git a/ethcore/src/pod_state.rs b/ethcore/src/pod_state.rs index 5a0265dc1..abd253e76 100644 --- a/ethcore/src/pod_state.rs +++ b/ethcore/src/pod_state.rs @@ -19,6 +19,8 @@ use std::fmt; use std::collections::BTreeMap; use itertools::Itertools; +use bigint::hash::H256; +use triehash::sec_trie_root; use util::*; use pod_account::{self, PodAccount}; use types::state_diff::StateDiff; diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 5e65a4de8..b8f43f11c 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -18,6 +18,7 @@ use std::sync::Arc; use std::path::Path; +use bigint::hash::H256; use util::*; use io::*; use spec::Spec; @@ -28,6 +29,7 @@ use miner::Miner; use snapshot::ManifestData; use snapshot::service::{Service as SnapshotService, ServiceParams as SnapServiceParams}; use std::sync::atomic::AtomicBool; +use ansi_term::Colour; #[cfg(feature="ipc")] use nanoipc; diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index a9f2ba593..47598c86e 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -19,8 +19,11 @@ use account_db::{AccountDB, AccountDBMut}; use basic_account::BasicAccount; use snapshot::Error; +use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP}; -use util::{U256, H256, Bytes, HashDB, SHA3_EMPTY, SHA3_NULL_RLP}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::{Bytes, HashDB}; use util::trie::{TrieDB, Trie}; use rlp::{RlpStream, UntrustedRlp}; @@ -30,8 +33,8 @@ use std::collections::HashSet; const ACC_EMPTY: BasicAccount = BasicAccount { nonce: U256([0, 0, 0, 0]), balance: U256([0, 0, 0, 0]), - storage_root: SHA3_NULL_RLP, - code_hash: SHA3_EMPTY, + storage_root: KECCAK_NULL_RLP, + code_hash: KECCAK_EMPTY, }; // whether an encoded account has code and how it is referred to. @@ -78,7 +81,7 @@ pub fn to_fat_rlps(account_hash: &H256, acc: &BasicAccount, acct_db: &AccountDB, .append(&acc.balance); // [has_code, code_hash]. - if acc.code_hash == SHA3_EMPTY { + if acc.code_hash == KECCAK_EMPTY { account_stream.append(&CodeState::Empty.raw()).append_empty_data(); } else if used_code.contains(&acc.code_hash) { account_stream.append(&CodeState::Hash.raw()).append(&acc.code_hash); @@ -164,7 +167,7 @@ pub fn from_fat_rlp( // load the code if it exists. let (code_hash, new_code) = match code_state { - CodeState::Empty => (SHA3_EMPTY, None), + CodeState::Empty => (KECCAK_EMPTY, None), CodeState::Inline => { let code: Bytes = rlp.val_at(3)?; let code_hash = acct_db.insert(&code); @@ -210,8 +213,9 @@ mod tests { use tests::helpers::get_temp_state_db; use snapshot::tests::helpers::fill_storage; - use util::sha3::{SHA3_EMPTY, SHA3_NULL_RLP}; - use util::{Address, H256, HashDB, DBValue, Hashable}; + use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak}; + use bigint::hash::H256; + use util::{Address, HashDB, DBValue}; use rlp::UntrustedRlp; use std::collections::HashSet; @@ -226,14 +230,14 @@ mod tests { let account = BasicAccount { nonce: 50.into(), balance: 123456789.into(), - storage_root: SHA3_NULL_RLP, - code_hash: SHA3_EMPTY, + storage_root: KECCAK_NULL_RLP, + code_hash: KECCAK_EMPTY, }; let thin_rlp = ::rlp::encode(&account); assert_eq!(::rlp::decode::(&thin_rlp), account); - let fat_rlps = to_fat_rlps(&addr.sha3(), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap(); + let fat_rlps = to_fat_rlps(&keccak(&addr), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap(); let fat_rlp = UntrustedRlp::new(&fat_rlps[0]).at(1).unwrap(); assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account); } @@ -245,20 +249,20 @@ mod tests { let account = { let acct_db = AccountDBMut::new(db.as_hashdb_mut(), &addr); - let mut root = SHA3_NULL_RLP; + let mut root = KECCAK_NULL_RLP; fill_storage(acct_db, &mut root, &mut H256::zero()); BasicAccount { nonce: 25.into(), balance: 987654321.into(), storage_root: root, - code_hash: SHA3_EMPTY, + code_hash: KECCAK_EMPTY, } }; let thin_rlp = ::rlp::encode(&account); assert_eq!(::rlp::decode::(&thin_rlp), account); - let fat_rlp = to_fat_rlps(&addr.sha3(), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap(); + let fat_rlp = to_fat_rlps(&keccak(&addr), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap(); let fat_rlp = UntrustedRlp::new(&fat_rlp[0]).at(1).unwrap(); assert_eq!(from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, H256::zero()).unwrap().0, account); } @@ -270,21 +274,21 @@ mod tests { let account = { let acct_db = AccountDBMut::new(db.as_hashdb_mut(), &addr); - let mut root = SHA3_NULL_RLP; + let mut root = KECCAK_NULL_RLP; fill_storage(acct_db, &mut root, &mut H256::zero()); BasicAccount { nonce: 25.into(), balance: 987654321.into(), storage_root: root, - code_hash: SHA3_EMPTY, + code_hash: KECCAK_EMPTY, } }; let thin_rlp = ::rlp::encode(&account); assert_eq!(::rlp::decode::(&thin_rlp), account); - let fat_rlps = to_fat_rlps(&addr.sha3(), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 500, 1000).unwrap(); - let mut root = SHA3_NULL_RLP; + let fat_rlps = to_fat_rlps(&keccak(addr), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 500, 1000).unwrap(); + let mut root = KECCAK_NULL_RLP; let mut restored_account = None; for rlp in fat_rlps { let fat_rlp = UntrustedRlp::new(&rlp).at(1).unwrap(); @@ -314,21 +318,21 @@ mod tests { let account1 = BasicAccount { nonce: 50.into(), balance: 123456789.into(), - storage_root: SHA3_NULL_RLP, + storage_root: KECCAK_NULL_RLP, code_hash: code_hash, }; let account2 = BasicAccount { nonce: 400.into(), balance: 98765432123456789usize.into(), - storage_root: SHA3_NULL_RLP, + storage_root: KECCAK_NULL_RLP, code_hash: code_hash, }; let mut used_code = HashSet::new(); - let fat_rlp1 = to_fat_rlps(&addr1.sha3(), &account1, &AccountDB::new(db.as_hashdb(), &addr1), &mut used_code, usize::max_value(), usize::max_value()).unwrap(); - let fat_rlp2 = to_fat_rlps(&addr2.sha3(), &account2, &AccountDB::new(db.as_hashdb(), &addr2), &mut used_code, usize::max_value(), usize::max_value()).unwrap(); + let fat_rlp1 = to_fat_rlps(&keccak(&addr1), &account1, &AccountDB::new(db.as_hashdb(), &addr1), &mut used_code, usize::max_value(), usize::max_value()).unwrap(); + let fat_rlp2 = to_fat_rlps(&keccak(&addr2), &account2, &AccountDB::new(db.as_hashdb(), &addr2), &mut used_code, usize::max_value(), usize::max_value()).unwrap(); assert_eq!(used_code.len(), 1); let fat_rlp1 = UntrustedRlp::new(&fat_rlp1[0]).at(1).unwrap(); diff --git a/ethcore/src/snapshot/block.rs b/ethcore/src/snapshot/block.rs index e7b100f69..ed8c130d4 100644 --- a/ethcore/src/snapshot/block.rs +++ b/ethcore/src/snapshot/block.rs @@ -18,11 +18,13 @@ use block::Block; use header::Header; +use hash::keccak; use views::BlockView; use rlp::{DecoderError, RlpStream, UntrustedRlp}; -use util::{Bytes, Hashable, H256}; -use util::triehash::ordered_trie_root; +use bigint::hash::H256; +use util::Bytes; +use triehash::ordered_trie_root; const HEADER_FIELDS: usize = 8; const BLOCK_FIELDS: usize = 2; @@ -111,7 +113,7 @@ impl AbridgedBlock { let mut uncles_rlp = RlpStream::new(); uncles_rlp.append_list(&uncles); - header.set_uncles_hash(uncles_rlp.as_raw().sha3()); + header.set_uncles_hash(keccak(uncles_rlp.as_raw())); let mut seal_fields = Vec::new(); for i in (HEADER_FIELDS + BLOCK_FIELDS)..rlp.item_count()? { @@ -136,7 +138,9 @@ mod tests { use super::AbridgedBlock; use transaction::{Action, Transaction}; - use util::{Address, H256, U256, Bytes}; + use bigint::prelude::U256; + use bigint::hash::H256; + use util::{Address, Bytes}; fn encode_block(b: &Block) -> Bytes { b.rlp_bytes(::basic_types::Seal::With) @@ -189,7 +193,7 @@ mod tests { b.transactions.push(t2.into()); let receipts_root = b.header.receipts_root().clone(); - b.header.set_transactions_root(::util::triehash::ordered_trie_root( + b.header.set_transactions_root(::triehash::ordered_trie_root( b.transactions.iter().map(::rlp::encode).map(|out| out.into_vec()) )); diff --git a/ethcore/src/snapshot/consensus/authority.rs b/ethcore/src/snapshot/consensus/authority.rs index 1a1215522..28222bfb5 100644 --- a/ethcore/src/snapshot/consensus/authority.rs +++ b/ethcore/src/snapshot/consensus/authority.rs @@ -33,7 +33,8 @@ use snapshot::{Error, ManifestData}; use itertools::{Position, Itertools}; use rlp::{RlpStream, UntrustedRlp}; -use util::{Bytes, H256, KeyValueDB}; +use bigint::hash::H256; +use util::{Bytes, KeyValueDB}; /// Snapshot creation and restoration for PoA chains. /// Chunk format: @@ -332,7 +333,7 @@ impl Rebuilder for ChunkRebuilder { } } - let parent_td: ::util::U256 = last_rlp.val_at(4)?; + let parent_td: ::bigint::prelude::U256 = last_rlp.val_at(4)?; let mut batch = self.db.transaction(); self.chain.insert_unordered_block(&mut batch, &block_data, receipts, Some(parent_td), true, false); diff --git a/ethcore/src/snapshot/consensus/mod.rs b/ethcore/src/snapshot/consensus/mod.rs index 3f583893b..b24c67be2 100644 --- a/ethcore/src/snapshot/consensus/mod.rs +++ b/ethcore/src/snapshot/consensus/mod.rs @@ -24,7 +24,7 @@ use blockchain::BlockChain; use engines::Engine; use snapshot::{Error, ManifestData}; -use util::H256; +use bigint::hash::H256; use util::kvdb::KeyValueDB; mod authority; diff --git a/ethcore/src/snapshot/consensus/work.rs b/ethcore/src/snapshot/consensus/work.rs index 2bf154fc4..16b8a5c18 100644 --- a/ethcore/src/snapshot/consensus/work.rs +++ b/ethcore/src/snapshot/consensus/work.rs @@ -30,7 +30,8 @@ use blockchain::{BlockChain, BlockProvider}; use engines::Engine; use snapshot::{Error, ManifestData}; use snapshot::block::AbridgedBlock; -use util::{Bytes, H256, KeyValueDB}; +use bigint::hash::H256; +use util::{Bytes, KeyValueDB}; use rlp::{RlpStream, UntrustedRlp}; use rand::OsRng; @@ -221,8 +222,8 @@ impl Rebuilder for PowRebuilder { use basic_types::Seal::With; use views::BlockView; use snapshot::verify_old_block; - use util::U256; - use util::triehash::ordered_trie_root; + use bigint::prelude::U256; + use triehash::ordered_trie_root; let rlp = UntrustedRlp::new(chunk); let item_count = rlp.item_count()?; diff --git a/ethcore/src/snapshot/error.rs b/ethcore/src/snapshot/error.rs index 56be84c96..b7a310c0d 100644 --- a/ethcore/src/snapshot/error.rs +++ b/ethcore/src/snapshot/error.rs @@ -20,7 +20,7 @@ use std::fmt; use ids::BlockId; -use util::H256; +use bigint::hash::H256; use util::trie::TrieError; use rlp::DecoderError; diff --git a/ethcore/src/snapshot/io.rs b/ethcore/src/snapshot/io.rs index f28adcf7f..dabb86283 100644 --- a/ethcore/src/snapshot/io.rs +++ b/ethcore/src/snapshot/io.rs @@ -26,7 +26,7 @@ use std::fs::{self, File}; use std::path::{Path, PathBuf}; use util::Bytes; -use util::hash::H256; +use bigint::hash::H256; use rlp::{RlpStream, UntrustedRlp}; use super::ManifestData; @@ -342,7 +342,7 @@ impl SnapshotReader for LooseReader { #[cfg(test)] mod tests { use devtools::RandomTempPath; - use util::sha3::Hashable; + use hash::keccak; use snapshot::ManifestData; use super::{SnapshotWriter, SnapshotReader, PackedWriter, PackedReader, LooseWriter, LooseReader, SNAPSHOT_VERSION}; @@ -359,24 +359,24 @@ mod tests { let mut block_hashes = Vec::new(); for chunk in STATE_CHUNKS { - let hash = chunk.sha3(); + let hash = keccak(&chunk); state_hashes.push(hash.clone()); writer.write_state_chunk(hash, chunk).unwrap(); } for chunk in BLOCK_CHUNKS { - let hash = chunk.sha3(); + let hash = keccak(&chunk); block_hashes.push(hash.clone()); - writer.write_block_chunk(chunk.sha3(), chunk).unwrap(); + writer.write_block_chunk(keccak(&chunk), chunk).unwrap(); } let manifest = ManifestData { version: SNAPSHOT_VERSION, state_hashes: state_hashes, block_hashes: block_hashes, - state_root: b"notarealroot".sha3(), + state_root: keccak(b"notarealroot"), block_number: 12345678987654321, - block_hash: b"notarealblock".sha3(), + block_hash: keccak(b"notarealblock"), }; writer.finish(manifest.clone()).unwrap(); @@ -398,24 +398,24 @@ mod tests { let mut block_hashes = Vec::new(); for chunk in STATE_CHUNKS { - let hash = chunk.sha3(); + let hash = keccak(&chunk); state_hashes.push(hash.clone()); writer.write_state_chunk(hash, chunk).unwrap(); } for chunk in BLOCK_CHUNKS { - let hash = chunk.sha3(); + let hash = keccak(&chunk); block_hashes.push(hash.clone()); - writer.write_block_chunk(chunk.sha3(), chunk).unwrap(); + writer.write_block_chunk(keccak(&chunk), chunk).unwrap(); } let manifest = ManifestData { version: SNAPSHOT_VERSION, state_hashes: state_hashes, block_hashes: block_hashes, - state_root: b"notarealroot".sha3(), + state_root: keccak(b"notarealroot"), block_number: 12345678987654321, - block_hash: b"notarealblock".sha3(), + block_hash: keccak(b"notarealblock)"), }; writer.finish(manifest.clone()).unwrap(); diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index a15c15566..bd9672caf 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -22,6 +22,7 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY}; use account_db::{AccountDB, AccountDBMut}; use blockchain::{BlockChain, BlockProvider}; @@ -29,13 +30,13 @@ use engines::Engine; use header::Header; use ids::BlockId; -use util::{Bytes, Hashable, HashDB, DBValue, snappy, U256}; -use util::Mutex; -use util::hash::{H256}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::{Bytes, HashDB, DBValue, snappy}; +use parking_lot::Mutex; use util::journaldb::{self, Algorithm, JournalDB}; use util::kvdb::KeyValueDB; use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut}; -use util::sha3::SHA3_NULL_RLP; use rlp::{RlpStream, UntrustedRlp}; use bloom_journal::Bloom; @@ -183,7 +184,7 @@ pub fn chunk_secondary<'a>(mut chunker: Box, chain: &'a Bloc let mut chunk_sink = |raw_data: &[u8]| { let compressed_size = snappy::compress_into(raw_data, &mut snappy_buffer); let compressed = &snappy_buffer[..compressed_size]; - let hash = compressed.sha3(); + let hash = keccak(&compressed); let size = compressed.len(); writer.lock().write_block_chunk(hash, compressed)?; @@ -240,7 +241,7 @@ impl<'a> StateChunker<'a> { let compressed_size = snappy::compress_into(&raw_data, &mut self.snappy_buffer); let compressed = &self.snappy_buffer[..compressed_size]; - let hash = compressed.sha3(); + let hash = keccak(&compressed); self.writer.lock().write_state_chunk(hash, compressed)?; trace!(target: "snapshot", "wrote state chunk. size: {}, uncompressed size: {}", compressed_size, raw_data.len()); @@ -318,7 +319,7 @@ impl StateRebuilder { pub fn new(db: Arc, pruning: Algorithm) -> Self { StateRebuilder { db: journaldb::new(db.clone(), pruning, ::db::COL_STATE), - state_root: SHA3_NULL_RLP, + state_root: KECCAK_NULL_RLP, known_code: HashMap::new(), missing_code: HashMap::new(), bloom: StateDB::load_bloom(&*db), @@ -362,7 +363,7 @@ impl StateRebuilder { // batch trie writes { - let mut account_trie = if self.state_root != SHA3_NULL_RLP { + let mut account_trie = if self.state_root != KECCAK_NULL_RLP { TrieDBMut::from_existing(self.db.as_hashdb_mut(), &mut self.state_root)? } else { TrieDBMut::new(self.db.as_hashdb_mut(), &mut self.state_root) @@ -443,7 +444,7 @@ fn rebuild_accounts( // new inline code Some(code) => status.new_code.push((code_hash, code, hash)), None => { - if code_hash != ::util::SHA3_EMPTY { + if code_hash != KECCAK_EMPTY { // see if this code has already been included inline match known_code.get(&code_hash) { Some(&first_with) => { diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 47bbb5997..72ef8ccc1 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -35,7 +35,9 @@ use service::ClientIoMessage; use io::IoChannel; -use util::{Bytes, H256, Mutex, RwLock, RwLockReadGuard, UtilError}; +use bigint::hash::H256; +use parking_lot::{Mutex, RwLock, RwLockReadGuard}; +use util::{Bytes, UtilError}; use util::journaldb::Algorithm; use util::kvdb::{Database, DatabaseConfig}; use util::snappy; @@ -100,7 +102,7 @@ impl Restoration { let block_chunks = manifest.block_hashes.iter().cloned().collect(); let raw_db = Arc::new(Database::open(params.db_config, &*params.db_path.to_string_lossy()) - .map_err(UtilError::SimpleString)?); + .map_err(UtilError::from)?); let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone()); let components = params.engine.snapshot_components() @@ -517,7 +519,7 @@ impl Service { match is_done { true => { - db.flush().map_err(::util::UtilError::SimpleString)?; + db.flush().map_err(UtilError::from)?; drop(db); return self.finalize_restoration(&mut *restoration); }, @@ -530,7 +532,7 @@ impl Service { } } }; - result.and_then(|_| db.flush().map_err(|e| ::util::UtilError::SimpleString(e).into())) + result.and_then(|_| db.flush().map_err(|e| UtilError::from(e).into())) } /// Feed a state chunk to be processed synchronously. @@ -678,7 +680,7 @@ mod tests { #[test] fn cannot_finish_with_invalid_chunks() { - use util::H256; + use bigint::hash::H256; use util::kvdb::DatabaseConfig; let spec = get_test_spec(); diff --git a/ethcore/src/snapshot/snapshot_service_trait.rs b/ethcore/src/snapshot/snapshot_service_trait.rs index 9df366250..9ef7706b4 100644 --- a/ethcore/src/snapshot/snapshot_service_trait.rs +++ b/ethcore/src/snapshot/snapshot_service_trait.rs @@ -15,7 +15,8 @@ // along with Parity. If not, see . use super::{ManifestData, RestorationStatus}; -use util::{Bytes, H256}; +use bigint::hash::H256; +use util::Bytes; use ipc::IpcConfig; /// The interface for a snapshot network service. diff --git a/ethcore/src/snapshot/tests/helpers.rs b/ethcore/src/snapshot/tests/helpers.rs index 6429667f5..c93409c51 100644 --- a/ethcore/src/snapshot/tests/helpers.rs +++ b/ethcore/src/snapshot/tests/helpers.rs @@ -18,6 +18,7 @@ //! which can be queried before and after a full snapshot/restore cycle. use std::sync::Arc; +use hash::{KECCAK_NULL_RLP}; use account_db::AccountDBMut; use basic_account::BasicAccount; @@ -31,12 +32,11 @@ use devtools::{RandomTempPath, GuardedTempResult}; use rand::Rng; use util::{DBValue, KeyValueDB}; -use util::hash::H256; +use bigint::hash::H256; use util::hashdb::HashDB; use util::journaldb; use util::trie::{Alphabet, StandardMap, SecTrieDBMut, TrieMut, ValueMode}; use util::trie::{TrieDB, TrieDBMut, Trie}; -use util::sha3::SHA3_NULL_RLP; // the proportion of accounts we will alter each tick. const ACCOUNT_CHURN: f32 = 0.01; @@ -51,7 +51,7 @@ impl StateProducer { /// Create a new `StateProducer`. pub fn new() -> Self { StateProducer { - state_root: SHA3_NULL_RLP, + state_root: KECCAK_NULL_RLP, storage_seed: H256::zero(), } } @@ -115,7 +115,7 @@ pub fn fill_storage(mut db: AccountDBMut, root: &mut H256, seed: &mut H256) { count: 100, }; { - let mut trie = if *root == SHA3_NULL_RLP { + let mut trie = if *root == KECCAK_NULL_RLP { SecTrieDBMut::new(&mut db, root) } else { SecTrieDBMut::from_existing(&mut db, root).unwrap() diff --git a/ethcore/src/snapshot/tests/proof_of_authority.rs b/ethcore/src/snapshot/tests/proof_of_authority.rs index 7c44ec20a..509ccb8fd 100644 --- a/ethcore/src/snapshot/tests/proof_of_authority.rs +++ b/ethcore/src/snapshot/tests/proof_of_authority.rs @@ -30,7 +30,7 @@ use spec::Spec; use tests::helpers; use transaction::{Transaction, Action, SignedTransaction}; -use util::{Address, Hashable}; +use util::Address; use util::kvdb; const PASS: &'static str = ""; @@ -38,14 +38,14 @@ const TRANSITION_BLOCK_1: usize = 2; // block at which the contract becomes acti const TRANSITION_BLOCK_2: usize = 10; // block at which the second contract activates. macro_rules! secret { - ($e: expr) => { Secret::from_slice(&$e.sha3()) } + ($e: expr) => { Secret::from_slice(&$crate::hash::keccak($e)) } } lazy_static! { // contract addresses. static ref CONTRACT_ADDR_1: Address = Address::from_str("0000000000000000000000000000000000000005").unwrap(); static ref CONTRACT_ADDR_2: Address = Address::from_str("0000000000000000000000000000000000000006").unwrap(); - // secret: `sha3(1)`, and initial validator. + // secret: `keccak(1)`, and initial validator. static ref RICH_ADDR: Address = Address::from_str("7d577a597b2742b498cb5cf0c26cdcd726d39e6e").unwrap(); // rich address' secret. static ref RICH_SECRET: Secret = secret!("1"); @@ -53,7 +53,7 @@ lazy_static! { /// Contract code used here: https://gist.github.com/anonymous/2a43783647e0f0dfcc359bd6fd81d6d9 -/// Account with secrets "1".sha3() is initially the validator. +/// Account with secrets keccak("1") is initially the validator. /// Transitions to the contract at block 2, initially same validator set. /// Create a new Spec with AuthorityRound which uses a contract at address 5 to determine the current validators using `getValidators`. /// `native_contracts::test_contracts::ValidatorSet` provides a native wrapper for the ABi. diff --git a/ethcore/src/snapshot/tests/proof_of_work.rs b/ethcore/src/snapshot/tests/proof_of_work.rs index a6c0166f5..806ebf424 100644 --- a/ethcore/src/snapshot/tests/proof_of_work.rs +++ b/ethcore/src/snapshot/tests/proof_of_work.rs @@ -24,7 +24,8 @@ use blockchain::BlockChain; use snapshot::{chunk_secondary, Error as SnapshotError, Progress, SnapshotComponents}; use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}; -use util::{Mutex, snappy}; +use parking_lot::Mutex; +use util::snappy; use util::kvdb::{self, KeyValueDB, DBTransaction}; use std::sync::Arc; @@ -71,7 +72,7 @@ fn chunk_and_restore(amount: u64) { version: 2, state_hashes: Vec::new(), block_hashes: block_hashes, - state_root: ::util::sha3::SHA3_NULL_RLP, + state_root: ::hash::KECCAK_NULL_RLP, block_number: amount, block_hash: best_hash, }; @@ -108,7 +109,7 @@ fn chunk_and_restore_40k() { chunk_and_restore(40000) } #[test] fn checks_flag() { use rlp::RlpStream; - use util::H256; + use bigint::hash::H256; let mut stream = RlpStream::new_list(5); @@ -134,7 +135,7 @@ fn checks_flag() { version: 2, state_hashes: Vec::new(), block_hashes: Vec::new(), - state_root: ::util::sha3::SHA3_NULL_RLP, + state_root: ::hash::KECCAK_NULL_RLP, block_number: 102, block_hash: H256::default(), }; diff --git a/ethcore/src/snapshot/tests/state.rs b/ethcore/src/snapshot/tests/state.rs index 744b86577..d3bea08dc 100644 --- a/ethcore/src/snapshot/tests/state.rs +++ b/ethcore/src/snapshot/tests/state.rs @@ -25,17 +25,16 @@ use super::helpers::{compare_dbs, StateProducer}; use error::Error; use rand::{XorShiftRng, SeedableRng}; -use util::hash::H256; +use bigint::hash::H256; use util::journaldb::{self, Algorithm}; use util::kvdb::{Database, DatabaseConfig}; use util::memorydb::MemoryDB; -use util::Mutex; +use parking_lot::Mutex; use devtools::RandomTempPath; -use util::sha3::SHA3_NULL_RLP; - use std::sync::Arc; use std::sync::atomic::AtomicBool; +use hash::{KECCAK_NULL_RLP, keccak}; #[test] fn snap_and_restore() { @@ -98,7 +97,9 @@ fn snap_and_restore() { fn get_code_from_prev_chunk() { use std::collections::HashSet; use rlp::RlpStream; - use util::{HashDB, H256, U256, Hashable}; + use bigint::prelude::U256; + use bigint::hash::H256; + use util::HashDB; use account_db::{AccountDBMut, AccountDB}; @@ -107,8 +108,8 @@ fn get_code_from_prev_chunk() { let mut acc_stream = RlpStream::new_list(4); acc_stream.append(&U256::default()) .append(&U256::default()) - .append(&SHA3_NULL_RLP) - .append(&code.sha3()); + .append(&KECCAK_NULL_RLP) + .append(&keccak(code)); let (h1, h2) = (H256::random(), H256::random()); diff --git a/ethcore/src/snapshot/watcher.rs b/ethcore/src/snapshot/watcher.rs index cff9be18c..e4b236b88 100644 --- a/ethcore/src/snapshot/watcher.rs +++ b/ethcore/src/snapshot/watcher.rs @@ -16,13 +16,14 @@ //! Watcher for snapshot-related chain events. -use util::Mutex; +use parking_lot::Mutex; use client::{BlockChainClient, Client, ChainNotify}; use ids::BlockId; use service::ClientIoMessage; use io::IoChannel; -use util::{H256, Bytes}; +use bigint::hash::H256; +use util::Bytes; use std::sync::Arc; @@ -133,7 +134,8 @@ mod tests { use client::ChainNotify; - use util::{H256, U256}; + use bigint::prelude::U256; + use bigint::hash::H256; use std::collections::HashMap; diff --git a/ethcore/src/spec/genesis.rs b/ethcore/src/spec/genesis.rs index b7db9aa90..0e810f29c 100644 --- a/ethcore/src/spec/genesis.rs +++ b/ethcore/src/spec/genesis.rs @@ -14,8 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::{Address, H256, U256}; -use util::sha3::SHA3_NULL_RLP; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::Address; +use hash::KECCAK_NULL_RLP; use ethjson; use super::seal::Seal; @@ -54,8 +56,8 @@ impl From for Genesis { timestamp: g.timestamp.map_or(0, Into::into), parent_hash: g.parent_hash.map_or_else(H256::zero, Into::into), gas_limit: g.gas_limit.into(), - transactions_root: g.transactions_root.map_or_else(|| SHA3_NULL_RLP.clone(), Into::into), - receipts_root: g.receipts_root.map_or_else(|| SHA3_NULL_RLP.clone(), Into::into), + transactions_root: g.transactions_root.map_or_else(|| KECCAK_NULL_RLP.clone(), Into::into), + receipts_root: g.receipts_root.map_or_else(|| KECCAK_NULL_RLP.clone(), Into::into), state_root: g.state_root.map(Into::into), gas_used: g.gas_used.map_or_else(U256::zero, Into::into), extra_data: g.extra_data.map_or_else(Vec::new, Into::into), diff --git a/ethcore/src/spec/seal.rs b/ethcore/src/spec/seal.rs index a1e929604..7dac32983 100644 --- a/ethcore/src/spec/seal.rs +++ b/ethcore/src/spec/seal.rs @@ -17,7 +17,7 @@ //! Spec seal. use rlp::*; -use util::hash::{H64, H256, H520}; +use bigint::hash::{H64, H256, H520}; use ethjson; /// Classic ethereum seal. diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 3fbfb66ef..202daba13 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -21,6 +21,7 @@ use std::collections::BTreeMap; use std::path::Path; use std::sync::Arc; use rustc_hex::FromHex; +use hash::{KECCAK_NULL_RLP, keccak}; use super::genesis::Genesis; use super::seal::Generic as GenericSeal; @@ -39,6 +40,9 @@ use state_db::StateDB; use state::{Backend, State, Substate}; use state::backend::Basic as BasicBackend; use trace::{NoopTracer, NoopVMTracer}; +use bigint::prelude::U256; +use bigint::hash::{H256, H2048}; +use parking_lot::RwLock; use util::*; /// Parameters common to ethereum-like blockchains. @@ -102,6 +106,8 @@ pub struct CommonParams { pub registrar: Address, /// Node permission managing contract address. pub node_permission_contract: Option
, + /// Transaction permission managing contract address. + pub transaction_permission_contract: Option
, } impl CommonParams { @@ -174,6 +180,7 @@ impl From for CommonParams { block_reward: p.block_reward.map_or_else(U256::zero, Into::into), registrar: p.registrar.map_or_else(Address::new, Into::into), node_permission_contract: p.node_permission_contract.map(Into::into), + transaction_permission_contract: p.transaction_permission_contract.map(Into::into), } } } @@ -203,9 +210,9 @@ pub struct Spec { pub gas_used: U256, /// The genesis block's timestamp field. pub timestamp: u64, - /// Transactions root of the genesis block. Should be SHA3_NULL_RLP. + /// Transactions root of the genesis block. Should be KECCAK_NULL_RLP. pub transactions_root: H256, - /// Receipts root of the genesis block. Should be SHA3_NULL_RLP. + /// Receipts root of the genesis block. Should be KECCAK_NULL_RLP. pub receipts_root: H256, /// The genesis block's extra data field. pub extra_data: Bytes, @@ -287,7 +294,7 @@ impl Spec { // given a pre-constructor state, run all the given constructors and produce a new state and state root. fn run_constructors(&self, factories: &Factories, mut db: T) -> Result { - let mut root = SHA3_NULL_RLP; + let mut root = KECCAK_NULL_RLP; // basic accounts in spec. { @@ -301,7 +308,7 @@ impl Spec { for (address, account) in self.genesis_state.get().iter() { db.note_non_null_account(address); account.insert_additional( - &mut *factories.accountdb.create(db.as_hashdb_mut(), address.sha3()), + &mut *factories.accountdb.create(db.as_hashdb_mut(), keccak(address)), &factories.trie ); } @@ -333,7 +340,7 @@ impl Spec { trace!(target: "spec", " .. root before = {}", state.root()); let params = ActionParams { code_address: address.clone(), - code_hash: Some(constructor.sha3()), + code_hash: Some(keccak(constructor)), address: address.clone(), sender: from.clone(), origin: from.clone(), @@ -349,7 +356,7 @@ impl Spec { { let mut exec = Executive::new(&mut state, &env_info, self.engine.as_ref()); - if let Err(e) = exec.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) { + if let Err(e) = exec.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer) { warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e); } } @@ -399,7 +406,7 @@ impl Spec { header.set_number(0); header.set_author(self.author.clone()); header.set_transactions_root(self.transactions_root.clone()); - header.set_uncles_hash(RlpStream::new_list(0).out().sha3()); + header.set_uncles_hash(keccak(RlpStream::new_list(0).out())); header.set_extra_data(self.extra_data.clone()); header.set_state_root(self.state_root()); header.set_receipts_root(self.receipts_root.clone()); @@ -486,7 +493,7 @@ impl Spec { /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus with applying reward on block close. pub fn new_test_with_reward() -> Spec { load_bundled!("null_morden_with_reward") } - /// Create a new Spec which is a NullEngine consensus with a premine of address whose secret is sha3(''). + /// Create a new Spec which is a NullEngine consensus with a premine of address whose secret is keccak(''). pub fn new_null() -> Spec { load_bundled!("null") } /// Create a new Spec which constructs a contract at address 5 with storage at 0 equal to 1. @@ -496,15 +503,15 @@ impl Spec { pub fn new_instant() -> Spec { load_bundled!("instant_seal") } /// Create a new Spec with AuthorityRound consensus which does internal sealing (not requiring work). - /// Accounts with secrets "0".sha3() and "1".sha3() are the validators. + /// Accounts with secrets keccak("0") and keccak("1") are the validators. pub fn new_test_round() -> Self { load_bundled!("authority_round") } /// Create a new Spec with Tendermint consensus which does internal sealing (not requiring work). - /// Account "0".sha3() and "1".sha3() are a authorities. + /// Account keccak("0") and keccak("1") are a authorities. pub fn new_test_tendermint() -> Self { load_bundled!("tendermint") } /// TestList.sol used in both specs: https://github.com/paritytech/contracts/pull/30/files - /// Accounts with secrets "0".sha3() and "1".sha3() are initially the validators. + /// Accounts with secrets keccak("0") and keccak("1") are initially the validators. /// Create a new Spec with BasicAuthority which uses a contract at address 5 to determine the current validators using `getValidators`. /// Second validator can be removed with "0xbfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1" and added back in using "0x4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1". pub fn new_validator_safe_contract() -> Self { load_bundled!("validator_safe_contract") } @@ -515,7 +522,7 @@ impl Spec { pub fn new_validator_contract() -> Self { load_bundled!("validator_contract") } /// Create a new Spec with BasicAuthority which uses multiple validator sets changing with height. - /// Account with secrets "0".sha3() is the validator for block 1 and with "1".sha3() onwards. + /// Account with secrets keccak("0") is the validator for block 1 and with keccak("1") onwards. pub fn new_validator_multi() -> Self { load_bundled!("validator_multi") } /// Create a new spec for a PoW chain @@ -543,7 +550,7 @@ mod tests { assert_eq!(test_spec.state_root(), H256::from_str("f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9").unwrap()); let genesis = test_spec.genesis_block(); - assert_eq!(BlockView::new(&genesis).header_view().sha3(), H256::from_str("0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303").unwrap()); + assert_eq!(BlockView::new(&genesis).header_view().hash(), H256::from_str("0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303").unwrap()); } #[test] diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index 1235fd289..b46b5ec4c 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -19,6 +19,9 @@ use std::fmt; use std::sync::Arc; use std::collections::HashMap; +use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak}; +use bigint::prelude::U256; +use bigint::hash::H256; use util::*; use pod_account::*; use rlp::*; @@ -81,10 +84,10 @@ impl Account { Account { balance: balance, nonce: nonce, - storage_root: SHA3_NULL_RLP, + storage_root: KECCAK_NULL_RLP, storage_cache: Self::empty_storage_cache(), storage_changes: storage, - code_hash: code.sha3(), + code_hash: keccak(&code), code_size: Some(code.len()), code_cache: Arc::new(code), code_filth: Filth::Dirty, @@ -101,10 +104,10 @@ impl Account { Account { balance: pod.balance, nonce: pod.nonce, - storage_root: SHA3_NULL_RLP, + storage_root: KECCAK_NULL_RLP, storage_cache: Self::empty_storage_cache(), storage_changes: pod.storage.into_iter().collect(), - code_hash: pod.code.as_ref().map_or(SHA3_EMPTY, |c| c.sha3()), + code_hash: pod.code.as_ref().map_or(KECCAK_EMPTY, |c| keccak(c)), code_filth: Filth::Dirty, code_size: Some(pod.code.as_ref().map_or(0, |c| c.len())), code_cache: Arc::new(pod.code.map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c)), @@ -117,10 +120,10 @@ impl Account { Account { balance: balance, nonce: nonce, - storage_root: SHA3_NULL_RLP, + storage_root: KECCAK_NULL_RLP, storage_cache: Self::empty_storage_cache(), storage_changes: HashMap::new(), - code_hash: SHA3_EMPTY, + code_hash: KECCAK_EMPTY, code_cache: Arc::new(vec![]), code_size: Some(0), code_filth: Filth::Clean, @@ -140,10 +143,10 @@ impl Account { Account { balance: balance, nonce: nonce, - storage_root: SHA3_NULL_RLP, + storage_root: KECCAK_NULL_RLP, storage_cache: Self::empty_storage_cache(), storage_changes: HashMap::new(), - code_hash: SHA3_EMPTY, + code_hash: KECCAK_EMPTY, code_cache: Arc::new(vec![]), code_size: None, code_filth: Filth::Clean, @@ -154,7 +157,7 @@ impl Account { /// Set this account's code to the given code. /// NOTE: Account should have been created with `new_contract()` pub fn init_code(&mut self, code: Bytes) { - self.code_hash = code.sha3(); + self.code_hash = keccak(&code); self.code_cache = Arc::new(code); self.code_size = Some(self.code_cache.len()); self.code_filth = Filth::Dirty; @@ -211,7 +214,7 @@ impl Account { pub fn address_hash(&self, address: &Address) -> H256 { let hash = self.address_hash.get(); hash.unwrap_or_else(|| { - let hash = address.sha3(); + let hash = keccak(address); self.address_hash.set(Some(hash.clone())); hash }) @@ -220,7 +223,7 @@ impl Account { /// returns the account's code. If `None` then the code cache isn't available - /// get someone who knows to call `note_code`. pub fn code(&self) -> Option> { - if self.code_hash != SHA3_EMPTY && self.code_cache.is_empty() { + if self.code_hash != KECCAK_EMPTY && self.code_cache.is_empty() { return None; } Some(self.code_cache.clone()) @@ -235,7 +238,7 @@ impl Account { #[cfg(test)] /// Provide a byte array which hashes to the `code_hash`. returns the hash as a result. pub fn note_code(&mut self, code: Bytes) -> Result<(), H256> { - let h = code.sha3(); + let h = keccak(&code); if self.code_hash == h { self.code_cache = Arc::new(code); self.code_size = Some(self.code_cache.len()); @@ -247,7 +250,7 @@ impl Account { /// Is `code_cache` valid; such that code is going to return Some? pub fn is_cached(&self) -> bool { - !self.code_cache.is_empty() || (self.code_cache.is_empty() && self.code_hash == SHA3_EMPTY) + !self.code_cache.is_empty() || (self.code_cache.is_empty() && self.code_hash == KECCAK_EMPTY) } /// Provide a database to get `code_hash`. Should not be called if it is a contract without code. @@ -284,7 +287,7 @@ impl Account { // TODO: fill out self.code_cache; trace!("Account::cache_code_size: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); self.code_size.is_some() || - if self.code_hash != SHA3_EMPTY { + if self.code_hash != KECCAK_EMPTY { match db.get(&self.code_hash) { Some(x) => { self.code_size = Some(x.len()); @@ -308,19 +311,19 @@ impl Account { /// NOTE: Will panic if `!self.storage_is_clean()` pub fn is_empty(&self) -> bool { assert!(self.storage_is_clean(), "Account::is_empty() may only legally be called when storage is clean."); - self.is_null() && self.storage_root == SHA3_NULL_RLP + self.is_null() && self.storage_root == KECCAK_NULL_RLP } /// Check if account has zero nonce, balance, no code. pub fn is_null(&self) -> bool { self.balance.is_zero() && self.nonce.is_zero() && - self.code_hash == SHA3_EMPTY + self.code_hash == KECCAK_EMPTY } /// Check if account is basic (Has no code). pub fn is_basic(&self) -> bool { - self.code_hash == SHA3_EMPTY + self.code_hash == KECCAK_EMPTY } /// Return the storage root associated with this account or None if it has been altered via the overlay. @@ -592,8 +595,8 @@ mod tests { assert_eq!(a.rlp().to_hex(), "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); assert_eq!(a.balance(), &U256::from(69u8)); assert_eq!(a.nonce(), &U256::from(0u8)); - assert_eq!(a.code_hash(), SHA3_EMPTY); - assert_eq!(a.storage_root().unwrap(), &SHA3_NULL_RLP); + assert_eq!(a.code_hash(), KECCAK_EMPTY); + assert_eq!(a.storage_root().unwrap(), &KECCAK_NULL_RLP); } #[test] diff --git a/ethcore/src/state/backend.rs b/ethcore/src/state/backend.rs index ea172b5af..c0d0380e7 100644 --- a/ethcore/src/state/backend.rs +++ b/ethcore/src/state/backend.rs @@ -25,7 +25,9 @@ use std::collections::{HashSet, HashMap}; use std::sync::Arc; use state::Account; -use util::{Address, MemoryDB, Mutex, H256}; +use bigint::hash::H256; +use parking_lot::Mutex; +use util::{Address, MemoryDB}; use util::hashdb::{AsHashDB, HashDB, DBValue}; /// State backend. See module docs for more details. diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 1b36c91ec..eb6deac23 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -24,6 +24,7 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, BTreeMap, HashSet}; use std::fmt; use std::sync::Arc; +use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY}; use receipt::Receipt; use engines::Engine; @@ -41,6 +42,8 @@ use transaction::SignedTransaction; use state_db::StateDB; use evm::{Factory as EvmFactory}; +use bigint::prelude::U256; +use bigint::hash::H256; use util::*; use util::trie; @@ -209,13 +212,52 @@ pub fn check_proof( Err(_) => return ProvedExecution::BadProof, }; - match state.execute(env_info, engine, transaction, TransactOptions::with_no_tracing(), true) { + let options = TransactOptions::with_no_tracing().save_output_from_contract(); + match state.execute(env_info, engine, transaction, options, true) { Ok(executed) => ProvedExecution::Complete(executed), Err(ExecutionError::Internal(_)) => ProvedExecution::BadProof, Err(e) => ProvedExecution::Failed(e), } } +/// Prove a transaction on the given state. +/// Returns `None` when the transacion could not be proved, +/// and a proof otherwise. +pub fn prove_transaction( + db: H, + root: H256, + transaction: &SignedTransaction, + engine: &Engine, + env_info: &EnvInfo, + factories: Factories, + virt: bool, +) -> Option<(Bytes, Vec)> { + use self::backend::Proving; + + let backend = Proving::new(db); + let res = State::from_existing( + backend, + root, + engine.account_start_nonce(env_info.number), + factories, + ); + + let mut state = match res { + Ok(state) => state, + Err(_) => return None, + }; + + let options = TransactOptions::with_no_tracing().dont_check_nonce().save_output_from_contract(); + match state.execute(env_info, engine, transaction, options, virt) { + Err(ExecutionError::Internal(_)) => None, + Err(e) => { + trace!(target: "state", "Proved call failed: {}", e); + Some((Vec::new(), state.drop().1.extract_proof())) + } + Ok(res) => Some((res.output, state.drop().1.extract_proof())), + } +} + /// Representation of the entire state of all accounts in the system. /// /// `State` can work together with `StateDB` to share account cache. @@ -552,7 +594,7 @@ impl State { /// Get an account's code hash. pub fn code_hash(&self, a: &Address) -> trie::Result { self.ensure_cached(a, RequireCache::None, true, - |a| a.as_ref().map_or(SHA3_EMPTY, |a| a.code_hash())) + |a| a.as_ref().map_or(KECCAK_EMPTY, |a| a.code_hash())) } /// Get accounts' code size. @@ -938,7 +980,7 @@ impl State { /// Returns a merkle proof of the account's trie node omitted or an encountered trie error. /// If the account doesn't exist in the trie, prove that and return defaults. /// Requires a secure trie to be used for accurate results. - /// `account_key` == sha3(address) + /// `account_key` == keccak(address) pub fn prove_account(&self, account_key: H256) -> trie::Result<(Vec, BasicAccount)> { let mut recorder = Recorder::new(); let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; @@ -949,8 +991,8 @@ impl State { let account = maybe_account.unwrap_or_else(|| BasicAccount { balance: 0.into(), nonce: self.account_start_nonce, - code_hash: SHA3_EMPTY, - storage_root: ::util::sha3::SHA3_NULL_RLP, + code_hash: KECCAK_EMPTY, + storage_root: KECCAK_NULL_RLP, }); Ok((recorder.drain().into_iter().map(|r| r.data).collect(), account)) @@ -959,11 +1001,11 @@ impl State { /// Prove an account's storage key's existence or nonexistence in the state. /// Returns a merkle proof of the account's storage trie. /// Requires a secure trie to be used for correctness. - /// `account_key` == sha3(address) - /// `storage_key` == sha3(key) + /// `account_key` == keccak(address) + /// `storage_key` == keccak(key) pub fn prove_storage(&self, account_key: H256, storage_key: H256) -> trie::Result<(Vec, H256)> { // TODO: probably could look into cache somehow but it's keyed by - // address, not sha3(address). + // address, not keccak(address). let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; let acc = match trie.get_with(&account_key, Account::from_rlp)? { Some(acc) => acc, @@ -1008,13 +1050,15 @@ impl Clone for State { #[cfg(test)] mod tests { - use std::sync::Arc; use std::str::FromStr; use rustc_hex::FromHex; + use hash::keccak; use super::*; use ethkey::Secret; - use util::{U256, H256, Address, Hashable}; + use bigint::prelude::U256; + use bigint::hash::H256; + use util::Address; use tests::helpers::*; use vm::EnvInfo; use spec::*; @@ -1024,7 +1068,7 @@ mod tests { use evm::CallType; fn secret() -> Secret { - "".sha3().into() + keccak("").into() } #[test] diff --git a/ethcore/src/state/substate.rs b/ethcore/src/state/substate.rs index 76f6eaed7..4acc54114 100644 --- a/ethcore/src/state/substate.rs +++ b/ethcore/src/state/substate.rs @@ -16,7 +16,8 @@ //! Execution environment substate. use std::collections::HashSet; -use util::{Address, U256}; +use bigint::prelude::U256; +use util::Address; use log_entry::LogEntry; use evm::{Schedule, CleanDustMode}; use super::CleanupMode; diff --git a/ethcore/src/state_db.rs b/ethcore/src/state_db.rs index e2f6fdaf0..346312c3b 100644 --- a/ethcore/src/state_db.rs +++ b/ethcore/src/state_db.rs @@ -20,11 +20,13 @@ use lru_cache::LruCache; use util::cache::MemoryLruCache; use util::journaldb::JournalDB; use util::kvdb::KeyValueDB; -use util::hash::{H256}; +use bigint::hash::H256; use util::hashdb::HashDB; use state::{self, Account}; use header::BlockNumber; -use util::{Address, DBTransaction, UtilError, Mutex, Hashable}; +use hash::keccak; +use parking_lot::Mutex; +use util::{Address, DBTransaction, UtilError}; use bloom_journal::{Bloom, BloomJournal}; use db::COL_ACCOUNT_BLOOM; use byteorder::{LittleEndian, ByteOrder}; @@ -443,20 +445,22 @@ impl state::Backend for StateDB { fn note_non_null_account(&self, address: &Address) { trace!(target: "account_bloom", "Note account bloom: {:?}", address); let mut bloom = self.account_bloom.lock(); - bloom.set(&*address.sha3()); + bloom.set(&*keccak(address)); } fn is_known_null(&self, address: &Address) -> bool { trace!(target: "account_bloom", "Check account bloom: {:?}", address); let bloom = self.account_bloom.lock(); - let is_null = !bloom.check(&*address.sha3()); + let is_null = !bloom.check(&*keccak(address)); is_null } } #[cfg(test)] mod tests { - use util::{U256, H256, Address, DBTransaction}; + use bigint::prelude::U256; + use bigint::hash::H256; + use util::{Address, DBTransaction}; use tests::helpers::*; use state::{Account, Backend}; use ethcore_logger::init_log; diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index ea7dd32f5..25955679c 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -16,6 +16,7 @@ use std::str::FromStr; use std::sync::Arc; +use hash::keccak; use io::IoChannel; use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, BlockId}; use state::{self, State, CleanupMode}; @@ -24,6 +25,7 @@ use ethereum; use block::IsBlock; use tests::helpers::*; use types::filter::Filter; +use bigint::prelude::U256; use util::*; use devtools::*; use miner::Miner; @@ -254,7 +256,7 @@ fn can_mine() { let b = client.prepare_open_block(Address::default(), (3141562.into(), 31415620.into()), vec![]).close(); - assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().sha3()); + assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().hash()); } #[test] @@ -298,7 +300,7 @@ fn change_history_size() { #[test] fn does_not_propagate_delayed_transactions() { - let key = KeyPair::from_secret("test".sha3().into()).unwrap(); + let key = KeyPair::from_secret(keccak("test").into()).unwrap(); let secret = key.secret(); let tx0 = PendingTransaction::new(Transaction { nonce: 0.into(), diff --git a/ethcore/src/tests/evm.rs b/ethcore/src/tests/evm.rs index 7b0e03d24..8b51c3487 100644 --- a/ethcore/src/tests/evm.rs +++ b/ethcore/src/tests/evm.rs @@ -1,6 +1,7 @@ //! Tests of EVM integration with transaction execution. use std::sync::Arc; +use hash::keccak; use vm::{EnvInfo, ActionParams, ActionValue, CallType}; use evm::{Factory, VMType}; use executive::Executive; @@ -11,16 +12,17 @@ use transaction::SYSTEM_ADDRESS; use rustc_hex::FromHex; +use bigint::hash::H256; use util::*; evm_test!{test_blockhash_eip210: test_blockhash_eip210_jit, test_blockhash_eip210_int} fn test_blockhash_eip210(factory: Factory) { let get_prev_hash_code = Arc::new("600143034060205260206020f3".from_hex().unwrap()); // this returns previous block hash - let get_prev_hash_code_hash = get_prev_hash_code.sha3(); + let get_prev_hash_code_hash = keccak(get_prev_hash_code.as_ref()); // This is same as DEFAULT_BLOCKHASH_CONTRACT except for metropolis transition block check removed. let test_blockhash_contract = "73fffffffffffffffffffffffffffffffffffffffe33141561007a57600143036020526000356101006020510755600061010060205107141561005057600035610100610100602051050761010001555b6000620100006020510714156100755760003561010062010000602051050761020001555b61014a565b4360003512151561009057600060405260206040f35b610100600035430312156100b357610100600035075460605260206060f3610149565b62010000600035430312156100d157600061010060003507146100d4565b60005b156100f6576101006101006000350507610100015460805260206080f3610148565b630100000060003543031215610116576000620100006000350714610119565b60005b1561013c57610100620100006000350507610200015460a052602060a0f3610147565b600060c052602060c0f35b5b5b5b5b"; let blockhash_contract_code = Arc::new(test_blockhash_contract.from_hex().unwrap()); - let blockhash_contract_code_hash = blockhash_contract_code.sha3(); + let blockhash_contract_code_hash = keccak(blockhash_contract_code.as_ref()); let engine = TestEngine::new_metropolis(); let mut env_info = EnvInfo::default(); diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index d8ba8313e..ac5669da7 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -16,9 +16,12 @@ use std::collections::BTreeMap; use std::sync::Arc; +use hash::keccak; use ethkey::KeyPair; use io::*; use client::{BlockChainClient, Client, ClientConfig}; +use bigint::prelude::U256; +use bigint::hash::H256; use util::*; use spec::*; use account_provider::AccountProvider; @@ -178,7 +181,7 @@ pub fn generate_dummy_client_with_spec_accounts_and_data(get_test_spec: F, ac let mut last_hashes = vec![]; let mut last_header = genesis_header.clone(); - let kp = KeyPair::from_secret_slice(&"".sha3()).unwrap(); + let kp = KeyPair::from_secret_slice(&keccak("")).unwrap(); let author = kp.address(); let mut n = 0; diff --git a/ethcore/src/tests/trace.rs b/ethcore/src/tests/trace.rs index 61377daee..ed434d8e8 100644 --- a/ethcore/src/tests/trace.rs +++ b/ethcore/src/tests/trace.rs @@ -17,7 +17,9 @@ //! Client tests of tracing use ethkey::KeyPair; +use hash::keccak; use block::*; +use bigint::prelude::U256; use util::*; use io::*; use spec::*; @@ -62,14 +64,14 @@ fn can_trace_block_and_uncle_reward() { // | // block with transaction and uncle - let genesis_header = spec.genesis_header(); + let genesis_header = spec.genesis_header(); let mut db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let mut rolling_timestamp = 40; let mut last_hashes = vec![]; let mut last_header = genesis_header.clone(); last_hashes.push(last_header.hash()); - let kp = KeyPair::from_secret_slice(&"".sha3()).unwrap(); + let kp = KeyPair::from_secret_slice(&keccak("")).unwrap(); let author = kp.address(); // Add root block first @@ -89,7 +91,7 @@ fn can_trace_block_and_uncle_reward() { rolling_timestamp += 10; root_block.set_timestamp(rolling_timestamp); - let root_block = root_block.close_and_lock().seal(engine, vec![]).unwrap(); + let root_block = root_block.close_and_lock().seal(engine, vec![]).unwrap(); if let Err(e) = client.import_block(root_block.rlp_bytes()) { panic!("error importing block which is valid by definition: {:?}", e); @@ -118,7 +120,7 @@ fn can_trace_block_and_uncle_reward() { rolling_timestamp += 10; parent_block.set_timestamp(rolling_timestamp); - let parent_block = parent_block.close_and_lock().seal(engine, vec![]).unwrap(); + let parent_block = parent_block.close_and_lock().seal(engine, vec![]).unwrap(); if let Err(e) = client.import_block(parent_block.rlp_bytes()) { panic!("error importing block which is valid by definition: {:?}", e); @@ -131,14 +133,14 @@ fn can_trace_block_and_uncle_reward() { // Add testing block with transaction and uncle let mut block = OpenBlock::new( - engine, - Default::default(), - true, - db, - &last_header, + engine, + Default::default(), + true, + db, + &last_header, Arc::new(last_hashes.clone()), author.clone(), - (3141562.into(), 31415620.into()), + (3141562.into(), 31415620.into()), vec![], false ).unwrap(); @@ -172,7 +174,7 @@ fn can_trace_block_and_uncle_reward() { let res = client.import_block(block.rlp_bytes()); if res.is_err() { - panic!("error importing block: {:#?}", res.err().unwrap()); + panic!("error importing block: {:#?}", res.err().unwrap()); } block.drain(); @@ -202,5 +204,5 @@ fn can_trace_block_and_uncle_reward() { // Test1. Check block filter let traces = client.block_traces(BlockId::Number(3)); - assert_eq!(traces.unwrap().len(), 3); + assert_eq!(traces.unwrap().len(), 3); } diff --git a/util/src/timer.rs b/ethcore/src/timer.rs similarity index 100% rename from util/src/timer.rs rename to ethcore/src/timer.rs diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs index 0fe057a64..267acc45a 100644 --- a/ethcore/src/trace/db.rs +++ b/ethcore/src/trace/db.rs @@ -21,7 +21,9 @@ use std::sync::Arc; use bloomchain::{Number, Config as BloomConfig}; use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup}; use heapsize::HeapSizeOf; -use util::{H256, H264, KeyValueDB, DBTransaction, RwLock}; +use bigint::hash::{H256, H264}; +use util::{KeyValueDB, DBTransaction}; +use parking_lot::RwLock; use header::BlockNumber; use trace::{LocalizedTrace, Config, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras}; use db::{self, Key, Writable, Readable, CacheUpdatePolicy}; @@ -411,7 +413,9 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { mod tests { use std::collections::HashMap; use std::sync::Arc; - use util::{Address, U256, H256, DBTransaction}; + use bigint::prelude::U256; + use bigint::hash::H256; + use util::{Address, DBTransaction}; use header::BlockNumber; use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest}; use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError}; diff --git a/ethcore/src/trace/executive_tracer.rs b/ethcore/src/trace/executive_tracer.rs index 57e8eabb8..fbc8eaee1 100644 --- a/ethcore/src/trace/executive_tracer.rs +++ b/ethcore/src/trace/executive_tracer.rs @@ -16,7 +16,8 @@ //! Simple executive tracer. -use util::{Bytes, Address, U256}; +use bigint::prelude::U256; +use util::{Bytes, Address}; use vm::ActionParams; use trace::trace::{Call, Create, Action, Res, CreateResult, CallResult, VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, Suicide, Reward, RewardType}; use trace::{Tracer, VMTracer, FlatTrace, TraceError}; @@ -158,7 +159,7 @@ impl Tracer for ExecutiveTracer { debug!(target: "trace", "Traced suicide {:?}", trace); self.traces.push(trace); } - + fn trace_reward(&mut self, author: Address, value: U256, reward_type: RewardType) { let trace = FlatTrace { subtraces: 0, diff --git a/ethcore/src/trace/import.rs b/ethcore/src/trace/import.rs index 4bcc376f9..e74e03eea 100644 --- a/ethcore/src/trace/import.rs +++ b/ethcore/src/trace/import.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . //! Traces import request. -use util::H256; +use bigint::hash::H256; use header::BlockNumber; use trace::FlatBlockTraces; diff --git a/ethcore/src/trace/mod.rs b/ethcore/src/trace/mod.rs index cbec8a149..06f1afb0b 100644 --- a/ethcore/src/trace/mod.rs +++ b/ethcore/src/trace/mod.rs @@ -37,7 +37,9 @@ pub use self::types::trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDi pub use self::types::flat::{FlatTrace, FlatTransactionTraces, FlatBlockTraces}; pub use self::types::filter::{Filter, AddressesFilter}; -use util::{Bytes, Address, U256, H256, DBTransaction}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::{Bytes, Address, DBTransaction}; use self::trace::{Call, Create}; use vm::ActionParams; use header::BlockNumber; diff --git a/ethcore/src/trace/noop_tracer.rs b/ethcore/src/trace/noop_tracer.rs index c9bd5f2e8..c39c805fb 100644 --- a/ethcore/src/trace/noop_tracer.rs +++ b/ethcore/src/trace/noop_tracer.rs @@ -16,7 +16,8 @@ //! Nonoperative tracer. -use util::{Bytes, Address, U256}; +use bigint::prelude::U256; +use util::{Bytes, Address}; use vm::ActionParams; use trace::{Tracer, VMTracer, FlatTrace, TraceError}; use trace::trace::{Call, Create, VMTrace, RewardType}; diff --git a/ethcore/src/trace/types/filter.rs b/ethcore/src/trace/types/filter.rs index 3abdb7143..f7e2d2140 100644 --- a/ethcore/src/trace/types/filter.rs +++ b/ethcore/src/trace/types/filter.rs @@ -18,8 +18,8 @@ use std::ops::Range; use bloomchain::{Filter as BloomFilter, Bloom, Number}; +use hash::keccak; use util::Address; -use util::sha3::Hashable; use bloomable::Bloomable; use basic_types::LogBloom; use trace::flat::FlatTrace; @@ -55,7 +55,7 @@ impl AddressesFilter { match self.list.is_empty() { true => vec![LogBloom::default()], false => self.list.iter() - .map(|address| LogBloom::from_bloomed(&address.sha3())) + .map(|address| LogBloom::from_bloomed(&keccak(address))) .collect(), } } @@ -67,7 +67,7 @@ impl AddressesFilter { false => blooms .into_iter() .flat_map(|bloom| self.list.iter() - .map(|address| bloom.with_bloomed(&address.sha3())) + .map(|address| bloom.with_bloomed(&keccak(address))) .collect::>()) .collect(), } @@ -139,7 +139,7 @@ impl Filter { #[cfg(test)] mod tests { use util::Address; - use util::sha3::Hashable; + use hash::keccak; use bloomable::Bloomable; use trace::trace::{Action, Call, Res, Create, CreateResult, Suicide, Reward}; use trace::flat::FlatTrace; @@ -169,9 +169,9 @@ mod tests { let blooms = filter.bloom_possibilities(); assert_eq!(blooms.len(), 1); - assert!(blooms[0].contains_bloomed(&Address::from(1).sha3())); - assert!(blooms[0].contains_bloomed(&Address::from(2).sha3())); - assert!(!blooms[0].contains_bloomed(&Address::from(3).sha3())); + assert!(blooms[0].contains_bloomed(&keccak(Address::from(1)))); + assert!(blooms[0].contains_bloomed(&keccak(Address::from(2)))); + assert!(!blooms[0].contains_bloomed(&keccak(Address::from(3)))); } #[test] @@ -185,8 +185,8 @@ mod tests { let blooms = filter.bloom_possibilities(); assert_eq!(blooms.len(), 1); - assert!(blooms[0].contains_bloomed(&Address::from(1).sha3())); - assert!(!blooms[0].contains_bloomed(&Address::from(2).sha3())); + assert!(blooms[0].contains_bloomed(&keccak(Address::from(1)))); + assert!(!blooms[0].contains_bloomed(&keccak(Address::from(2)))); } #[test] @@ -200,8 +200,8 @@ mod tests { let blooms = filter.bloom_possibilities(); assert_eq!(blooms.len(), 1); - assert!(blooms[0].contains_bloomed(&Address::from(1).sha3())); - assert!(!blooms[0].contains_bloomed(&Address::from(2).sha3())); + assert!(blooms[0].contains_bloomed(&keccak(Address::from(1)))); + assert!(!blooms[0].contains_bloomed(&keccak(Address::from(2)))); } #[test] @@ -215,25 +215,25 @@ mod tests { let blooms = filter.bloom_possibilities(); assert_eq!(blooms.len(), 4); - assert!(blooms[0].contains_bloomed(&Address::from(1).sha3())); - assert!(blooms[0].contains_bloomed(&Address::from(2).sha3())); - assert!(!blooms[0].contains_bloomed(&Address::from(3).sha3())); - assert!(!blooms[0].contains_bloomed(&Address::from(4).sha3())); + assert!(blooms[0].contains_bloomed(&keccak(Address::from(1)))); + assert!(blooms[0].contains_bloomed(&keccak(Address::from(2)))); + assert!(!blooms[0].contains_bloomed(&keccak(Address::from(3)))); + assert!(!blooms[0].contains_bloomed(&keccak(Address::from(4)))); - assert!(blooms[1].contains_bloomed(&Address::from(1).sha3())); - assert!(blooms[1].contains_bloomed(&Address::from(4).sha3())); - assert!(!blooms[1].contains_bloomed(&Address::from(2).sha3())); - assert!(!blooms[1].contains_bloomed(&Address::from(3).sha3())); + assert!(blooms[1].contains_bloomed(&keccak(Address::from(1)))); + assert!(blooms[1].contains_bloomed(&keccak(Address::from(4)))); + assert!(!blooms[1].contains_bloomed(&keccak(Address::from(2)))); + assert!(!blooms[1].contains_bloomed(&keccak(Address::from(3)))); - assert!(blooms[2].contains_bloomed(&Address::from(2).sha3())); - assert!(blooms[2].contains_bloomed(&Address::from(3).sha3())); - assert!(!blooms[2].contains_bloomed(&Address::from(1).sha3())); - assert!(!blooms[2].contains_bloomed(&Address::from(4).sha3())); + assert!(blooms[2].contains_bloomed(&keccak(Address::from(2)))); + assert!(blooms[2].contains_bloomed(&keccak(Address::from(3)))); + assert!(!blooms[2].contains_bloomed(&keccak(Address::from(1)))); + assert!(!blooms[2].contains_bloomed(&keccak(Address::from(4)))); - assert!(blooms[3].contains_bloomed(&Address::from(3).sha3())); - assert!(blooms[3].contains_bloomed(&Address::from(4).sha3())); - assert!(!blooms[3].contains_bloomed(&Address::from(1).sha3())); - assert!(!blooms[3].contains_bloomed(&Address::from(2).sha3())); + assert!(blooms[3].contains_bloomed(&keccak(Address::from(3)))); + assert!(blooms[3].contains_bloomed(&keccak(Address::from(4)))); + assert!(!blooms[3].contains_bloomed(&keccak(Address::from(1)))); + assert!(!blooms[3].contains_bloomed(&keccak(Address::from(2)))); } #[test] diff --git a/ethcore/src/trace/types/localized.rs b/ethcore/src/trace/types/localized.rs index 2d4850a8a..f82d710c1 100644 --- a/ethcore/src/trace/types/localized.rs +++ b/ethcore/src/trace/types/localized.rs @@ -16,7 +16,7 @@ //! Localized traces type definitions -use util::H256; +use bigint::hash::H256; use super::trace::{Action, Res}; use header::BlockNumber; diff --git a/ethcore/src/trace/types/trace.rs b/ethcore/src/trace/types/trace.rs index 01c5c4b43..a4efcee0e 100644 --- a/ethcore/src/trace/types/trace.rs +++ b/ethcore/src/trace/types/trace.rs @@ -16,8 +16,9 @@ //! Tracing datatypes. -use util::{U256, Bytes, Address}; -use util::sha3::Hashable; +use bigint::prelude::U256; +use util::{Bytes, Address}; +use hash::keccak; use bloomable::Bloomable; use rlp::*; @@ -51,7 +52,7 @@ pub struct CreateResult { impl CreateResult { /// Returns bloom. pub fn bloom(&self) -> LogBloom { - LogBloom::from_bloomed(&self.address.sha3()) + LogBloom::from_bloomed(&keccak(&self.address)) } } @@ -90,8 +91,8 @@ impl Call { /// Returns call action bloom. /// The bloom contains from and to addresses. pub fn bloom(&self) -> LogBloom { - LogBloom::from_bloomed(&self.from.sha3()) - .with_bloomed(&self.to.sha3()) + LogBloom::from_bloomed(&keccak(&self.from)) + .with_bloomed(&keccak(&self.to)) } } @@ -124,7 +125,7 @@ impl Create { /// Returns bloom create action bloom. /// The bloom contains only from address. pub fn bloom(&self) -> LogBloom { - LogBloom::from_bloomed(&self.from.sha3()) + LogBloom::from_bloomed(&keccak(&self.from)) } } @@ -173,7 +174,7 @@ pub struct Reward { impl Reward { /// Return reward action bloom. pub fn bloom(&self) -> LogBloom { - LogBloom::from_bloomed(&self.author.sha3()) + LogBloom::from_bloomed(&keccak(&self.author)) } } @@ -214,8 +215,8 @@ pub struct Suicide { impl Suicide { /// Return suicide action bloom. pub fn bloom(&self) -> LogBloom { - LogBloom::from_bloomed(&self.address.sha3()) - .with_bloomed(&self.refund_address.sha3()) + LogBloom::from_bloomed(&keccak(self.address)) + .with_bloomed(&keccak(self.refund_address)) } } diff --git a/ethcore/src/transaction.rs b/ethcore/src/transaction.rs index 636fb89d0..ce767cd0d 100644 --- a/ethcore/src/transaction.rs +++ b/ethcore/src/transaction.rs @@ -18,9 +18,11 @@ use std::ops::Deref; use rlp::*; -use util::sha3::Hashable; +use hash::keccak; use heapsize::HeapSizeOf; -use util::{H256, Address, U256, Bytes}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::{Address, Bytes}; use ethkey::{Signature, Secret, Public, recover, public_to_address, Error as EthkeyError}; use error::*; use evm::Schedule; @@ -28,10 +30,10 @@ use header::BlockNumber; use ethjson; /// Fake address for unsigned transactions as defined by EIP-86. -pub const UNSIGNED_SENDER: Address = ::util::H160([0xff; 20]); +pub const UNSIGNED_SENDER: Address = ::bigint::hash::H160([0xff; 20]); /// System sender address for internal state updates. -pub const SYSTEM_ADDRESS: Address = ::util::H160([0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,0xff, 0xff, 0xff, 0xff,0xff, 0xff, 0xff, 0xff,0xff, 0xff, 0xff, 0xfe]); +pub const SYSTEM_ADDRESS: Address = ::bigint::hash::H160([0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,0xff, 0xff, 0xff, 0xff,0xff, 0xff, 0xff, 0xff,0xff, 0xff, 0xff, 0xfe]); /// Transaction action type. #[derive(Debug, Clone, PartialEq, Eq)] @@ -167,7 +169,7 @@ impl Transaction { pub fn hash(&self, chain_id: Option) -> H256 { let mut stream = RlpStream::new(); self.rlp_append_unsigned_transaction(&mut stream, chain_id); - stream.as_raw().sha3() + keccak(stream.as_raw()) } /// Signs the transaction as coming from `sender`. @@ -274,7 +276,7 @@ impl Decodable for UnverifiedTransaction { if d.item_count()? != 9 { return Err(DecoderError::RlpIncorrectListLen); } - let hash = d.as_raw().sha3(); + let hash = keccak(d.as_raw()); Ok(UnverifiedTransaction { unsigned: Transaction { nonce: d.val_at(0)?, @@ -299,7 +301,7 @@ impl Encodable for UnverifiedTransaction { impl UnverifiedTransaction { /// Used to compute hash of created transactions fn compute_hash(mut self) -> UnverifiedTransaction { - let hash = (&*self.rlp_bytes()).sha3(); + let hash = keccak(&*self.rlp_bytes()); self.hash = hash; self } @@ -357,7 +359,7 @@ impl UnverifiedTransaction { } } - /// Get the hash of this header (sha3 of the RLP). + /// Get the hash of this header (keccak of the RLP). pub fn hash(&self) -> H256 { self.hash } @@ -378,7 +380,7 @@ impl UnverifiedTransaction { self.recover_public()?; } if self.gas < U256::from(self.gas_required(&schedule)) { - return Err(TransactionError::InvalidGasLimit(::util::OutOfBounds{min: Some(U256::from(self.gas_required(&schedule))), max: None, found: self.gas}).into()) + return Err(TransactionError::InvalidGasLimit(::unexpected::OutOfBounds{min: Some(U256::from(self.gas_required(&schedule))), max: None, found: self.gas}).into()) } Ok(self) } @@ -545,7 +547,8 @@ impl From for PendingTransaction { #[cfg(test)] mod tests { use super::*; - use util::{Hashable, U256}; + use bigint::prelude::U256; + use hash::keccak; #[test] fn sender_test() { @@ -575,7 +578,7 @@ mod tests { value: U256::from(1), data: b"Hello!".to_vec() }.sign(&key.secret(), None); - assert_eq!(Address::from(key.public().sha3()), t.sender()); + assert_eq!(Address::from(keccak(key.public())), t.sender()); assert_eq!(t.chain_id(), None); } @@ -609,7 +612,7 @@ mod tests { value: U256::from(1), data: b"Hello!".to_vec() }.sign(&key.secret(), Some(69)); - assert_eq!(Address::from(key.public().sha3()), t.sender()); + assert_eq!(Address::from(keccak(key.public())), t.sender()); assert_eq!(t.chain_id(), Some(69)); } diff --git a/ethcore/src/tx_filter.rs b/ethcore/src/tx_filter.rs new file mode 100644 index 000000000..0ba986608 --- /dev/null +++ b/ethcore/src/tx_filter.rs @@ -0,0 +1,234 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Smart contract based transaction filter. + +use std::sync::Weak; +use std::collections::HashMap; +use std::collections::hash_map::Entry; +use native_contracts::TransactAcl as Contract; +use client::{BlockChainClient, BlockId, ChainNotify}; +use util::{Address, Bytes}; +use bigint::hash::H256; +use parking_lot::{Mutex, RwLock}; +use futures::{self, Future}; +use spec::CommonParams; +use transaction::{Action, SignedTransaction}; +use hash::KECCAK_EMPTY; + +const MAX_CACHE_SIZE: usize = 4096; + +mod tx_permissions { + pub const _ALL: u32 = 0xffffffff; + pub const NONE: u32 = 0x0; + pub const BASIC: u32 = 0b00000001; + pub const CALL: u32 = 0b00000010; + pub const CREATE: u32 = 0b00000100; + pub const _PRIVATE: u32 = 0b00001000; +} + +/// Connection filter that uses a contract to manage permissions. +pub struct TransactionFilter { + contract: Mutex>, + client: RwLock>>, + contract_address: Address, + permission_cache: Mutex>, +} + +impl TransactionFilter { + /// Create a new instance if address is specified in params. + pub fn from_params(params: &CommonParams) -> Option { + params.transaction_permission_contract.map(|address| + TransactionFilter { + contract: Mutex::new(None), + client: RwLock::new(None), + contract_address: address, + permission_cache: Mutex::new(HashMap::new()), + } + ) + } + + /// Clear cached permissions. + pub fn clear_cache(&self) { + self.permission_cache.lock().clear(); + } + + /// Set client reference to be used for contract call. + pub fn register_client(&self, client: Weak) { + *self.client.write() = Some(client); + } + + /// Check if transaction is allowed at given block. + pub fn transaction_allowed(&self, parent_hash: &H256, transaction: &SignedTransaction) -> bool { + self.client.read().as_ref().map_or(false, |client| { + let mut cache = self.permission_cache.lock(); let len = cache.len(); + let client = match client.upgrade() { + Some(client) => client, + _ => return false, + }; + let tx_type = match transaction.action { + Action::Create => tx_permissions::CREATE, + Action::Call(address) => if client.code_hash(&address, BlockId::Hash(*parent_hash)).map_or(false, |c| c != KECCAK_EMPTY) { + tx_permissions::CALL + } else { + tx_permissions::BASIC + } + }; + let sender = transaction.sender(); + match cache.entry((*parent_hash, sender)) { + Entry::Occupied(entry) => *entry.get() & tx_type != 0, + Entry::Vacant(entry) => { + let mut contract = self.contract.lock(); + if contract.is_none() { + *contract = Some(Contract::new(self.contract_address)); + } + + let permissions = match &*contract { + &Some(ref contract) => { + contract.allowed_tx_types( + |addr, data| futures::done(client.call_contract(BlockId::Hash(*parent_hash), addr, data)), + sender, + ).wait().unwrap_or_else(|e| { + debug!("Error callling tx permissions contract: {:?}", e); + tx_permissions::NONE + }) + } + _ => tx_permissions::NONE, + }; + + if len < MAX_CACHE_SIZE { + entry.insert(permissions); + } + trace!("Permissions required: {}, got: {}", tx_type, permissions); + permissions & tx_type != 0 + } + } + }) + } +} + +impl ChainNotify for TransactionFilter { + fn new_blocks(&self, imported: Vec, _invalid: Vec, _enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) { + if !imported.is_empty() { + self.clear_cache(); + } + } +} + +#[cfg(test)] +mod test { + use std::sync::{Arc, Weak}; + use spec::Spec; + use client::{BlockChainClient, Client, ClientConfig, BlockId}; + use miner::Miner; + use util::{Address}; + use io::IoChannel; + use ethkey::{Secret, KeyPair}; + use super::TransactionFilter; + use transaction::{Transaction, Action}; + + /// Contract code: https://gist.github.com/arkpar/38a87cb50165b7e683585eec71acb05a + #[test] + fn transaction_filter() { + let spec_data = r#" + { + "name": "TestNodeFilterContract", + "engine": { + "authorityRound": { + "params": { + "stepDuration": 1, + "startStep": 2, + "validators": { + "contract": "0x0000000000000000000000000000000000000000" + } + } + } + }, + "params": { + "accountStartNonce": "0x0", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID" : "0x69", + "gasLimitBoundDivisor": "0x0400", + "transactionPermissionContract": "0x0000000000000000000000000000000000000005" + }, + "genesis": { + "seal": { + "generic": "0xc180" + }, + "difficulty": "0x20000", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x00", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x", + "gasLimit": "0x222222" + }, + "accounts": { + "0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, + "0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, + "0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, + "0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, + "0000000000000000000000000000000000000005": { + "balance": "1", + "constructor": "6060604052341561000f57600080fd5b5b6101868061001f6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063e17512211461003e575b600080fd5b341561004957600080fd5b610075600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610097565b604051808263ffffffff1663ffffffff16815260200191505060405180910390f35b6000737e5f4552091a69125d5dfcb7b8c2659029395bdf8273ffffffffffffffffffffffffffffffffffffffff1614156100d75763ffffffff9050610155565b732b5ad5c4795c026514f8317c7a215e218dccd6cf8273ffffffffffffffffffffffffffffffffffffffff1614156101155760026001179050610155565b736813eb9362372eef6200f3b1dbc3f819671cba698273ffffffffffffffffffffffffffffffffffffffff1614156101505760019050610155565b600090505b9190505600a165627a7a72305820f1f21cb978925a8a92c6e30c8c81adf598adff6d1ef941cf5ed6c0ec7ad1ae3d0029" + } + } + } + "#; + + let spec = Spec::load(::std::env::temp_dir(), spec_data.as_bytes()).unwrap(); + let client_db = Arc::new(::util::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); + + let client = Client::new( + ClientConfig::default(), + &spec, + client_db, + Arc::new(Miner::with_spec(&spec)), + IoChannel::disconnected(), + ).unwrap(); + let key1 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000001")).unwrap(); + let key2 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000002")).unwrap(); + let key3 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000003")).unwrap(); + let key4 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000004")).unwrap(); + + let filter = TransactionFilter::from_params(spec.params()).unwrap(); + filter.register_client(Arc::downgrade(&client) as Weak); + let mut basic_tx = Transaction::default(); + basic_tx.action = Action::Call(Address::from("000000000000000000000000000000000000032")); + let create_tx = Transaction::default(); + let mut call_tx = Transaction::default(); + call_tx.action = Action::Call(Address::from("0000000000000000000000000000000000000005")); + + let genesis = client.block_hash(BlockId::Latest).unwrap(); + + assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key1.secret(), None))); + assert!(filter.transaction_allowed(&genesis, &create_tx.clone().sign(key1.secret(), None))); + assert!(filter.transaction_allowed(&genesis, &call_tx.clone().sign(key1.secret(), None))); + + assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key2.secret(), None))); + assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key2.secret(), None))); + assert!(filter.transaction_allowed(&genesis, &call_tx.clone().sign(key2.secret(), None))); + + assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key3.secret(), None))); + assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key3.secret(), None))); + assert!(!filter.transaction_allowed(&genesis, &call_tx.clone().sign(key3.secret(), None))); + + assert!(!filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key4.secret(), None))); + assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key4.secret(), None))); + assert!(!filter.transaction_allowed(&genesis, &call_tx.clone().sign(key4.secret(), None))); + } +} + diff --git a/ethcore/src/verification/queue/kind.rs b/ethcore/src/verification/queue/kind.rs index 4b547c1b7..686311579 100644 --- a/ethcore/src/verification/queue/kind.rs +++ b/ethcore/src/verification/queue/kind.rs @@ -20,7 +20,8 @@ use engines::Engine; use error::Error; use heapsize::HeapSizeOf; -use util::{H256, U256}; +use bigint::prelude::U256; +use bigint::hash::H256; pub use self::blocks::Blocks; pub use self::headers::Headers; @@ -74,7 +75,9 @@ pub mod blocks { use verification::{PreverifiedBlock, verify_block_basic, verify_block_unordered}; use heapsize::HeapSizeOf; - use util::{Bytes, H256, U256}; + use bigint::prelude::U256; + use bigint::hash::H256; + use util::Bytes; /// A mode for verifying blocks. pub struct Blocks; @@ -169,8 +172,8 @@ pub mod headers { use header::Header; use verification::verify_header_params; - use util::hash::H256; - use util::U256; + use bigint::prelude::U256; + use bigint::hash::H256; impl BlockLike for Header { fn hash(&self) -> H256 { self.hash() } diff --git a/ethcore/src/verification/queue/mod.rs b/ethcore/src/verification/queue/mod.rs index 8fc4a5919..6e2e2f128 100644 --- a/ethcore/src/verification/queue/mod.rs +++ b/ethcore/src/verification/queue/mod.rs @@ -23,7 +23,9 @@ use std::sync::{Condvar as SCondvar, Mutex as SMutex, Arc}; use std::cmp; use std::collections::{VecDeque, HashSet, HashMap}; use heapsize::HeapSizeOf; -use util::*; +use bigint::prelude::U256; +use bigint::hash::H256; +use parking_lot::{Condvar, Mutex, RwLock}; use io::*; use error::*; use engines::Engine; diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index 5e86fba9d..62639e849 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -22,8 +22,12 @@ //! 3. Final verification against the blockchain done before enactment. use std::collections::HashSet; +use hash::keccak; +use triehash::ordered_trie_root; use heapsize::HeapSizeOf; +use bigint::hash::H256; use util::*; +use unexpected::{Mismatch, OutOfBounds}; use engines::Engine; use error::{BlockError, Error}; use blockchain::*; @@ -257,7 +261,7 @@ fn verify_block_integrity(block: &[u8], transactions_root: &H256, uncles_hash: & if expected_root != transactions_root { return Err(From::from(BlockError::InvalidTransactionsRoot(Mismatch { expected: expected_root.clone(), found: transactions_root.clone() }))) } - let expected_uncles = &block.at(2)?.as_raw().sha3(); + let expected_uncles = &keccak(block.at(2)?.as_raw()); if expected_uncles != uncles_hash { return Err(From::from(BlockError::InvalidUnclesHash(Mismatch { expected: expected_uncles.clone(), found: uncles_hash.clone() }))) } @@ -267,6 +271,11 @@ fn verify_block_integrity(block: &[u8], transactions_root: &H256, uncles_hash: & #[cfg(test)] mod tests { use std::collections::{BTreeMap, HashMap}; + use hash::keccak; + use bigint::prelude::U256; + use bigint::hash::{H256, H2048}; + use triehash::ordered_trie_root; + use unexpected::{Mismatch, OutOfBounds}; use util::*; use ethkey::{Random, Generator}; use header::*; @@ -325,7 +334,7 @@ mod tests { pub fn insert(&mut self, bytes: Bytes) { let number = BlockView::new(&bytes).header_view().number(); - let hash = BlockView::new(&bytes).header_view().sha3(); + let hash = BlockView::new(&bytes).header_view().hash(); self.blocks.insert(hash.clone(), bytes); self.numbers.insert(number, hash.clone()); } @@ -483,7 +492,7 @@ mod tests { let good_uncles = vec![ good_uncle1.clone(), good_uncle2.clone() ]; let mut uncles_rlp = RlpStream::new(); uncles_rlp.append_list(&good_uncles); - let good_uncles_hash = uncles_rlp.as_raw().sha3(); + let good_uncles_hash = keccak(uncles_rlp.as_raw()); let good_transactions_root = ordered_trie_root(good_transactions.iter().map(|t| ::rlp::encode::(t).into_vec())); let mut parent = good.clone(); diff --git a/ethcore/src/views/block.rs b/ethcore/src/views/block.rs index f00ca548f..27906c6c5 100644 --- a/ethcore/src/views/block.rs +++ b/ethcore/src/views/block.rs @@ -16,6 +16,8 @@ //! View onto block rlp. +use hash::keccak; +use bigint::hash::H256; use util::*; use header::*; use transaction::*; @@ -44,7 +46,7 @@ impl<'a> BlockView<'a> { /// Block header hash. pub fn hash(&self) -> H256 { - self.sha3() + self.header_view().hash() } /// Return reference to underlaying rlp. @@ -75,7 +77,7 @@ impl<'a> BlockView<'a> { /// Return List of transactions with additional localization info. pub fn localized_transactions(&self) -> Vec { let header = self.header_view(); - let block_hash = header.sha3(); + let block_hash = header.hash(); let block_number = header.number(); self.transactions() .into_iter() @@ -101,7 +103,7 @@ impl<'a> BlockView<'a> { /// Return transaction hashes. pub fn transaction_hashes(&self) -> Vec { - self.rlp.at(1).iter().map(|rlp| rlp.as_raw().sha3()).collect() + self.rlp.at(1).iter().map(|rlp| keccak(rlp.as_raw())).collect() } /// Returns transaction at given index without deserializing unnecessary data. @@ -112,7 +114,7 @@ impl<'a> BlockView<'a> { /// Returns localized transaction at given index. pub fn localized_transaction_at(&self, index: usize) -> Option { let header = self.header_view(); - let block_hash = header.sha3(); + let block_hash = header.hash(); let block_number = header.number(); self.transaction_at(index).map(|t| LocalizedTransaction { signed: t, @@ -140,7 +142,7 @@ impl<'a> BlockView<'a> { /// Return list of uncle hashes of given block. pub fn uncle_hashes(&self) -> Vec { - self.rlp.at(2).iter().map(|rlp| rlp.as_raw().sha3()).collect() + self.rlp.at(2).iter().map(|rlp| keccak(rlp.as_raw())).collect() } /// Return nth uncle. @@ -154,17 +156,11 @@ impl<'a> BlockView<'a> { } } -impl<'a> Hashable for BlockView<'a> { - fn sha3(&self) -> H256 { - self.header_view().sha3() - } -} - #[cfg(test)] mod tests { use std::str::FromStr; use rustc_hex::FromHex; - use util::H256; + use bigint::hash::H256; use super::BlockView; #[test] diff --git a/ethcore/src/views/body.rs b/ethcore/src/views/body.rs index 038e152b0..c9f78fc1c 100644 --- a/ethcore/src/views/body.rs +++ b/ethcore/src/views/body.rs @@ -16,6 +16,8 @@ //! View onto block body rlp. +use hash::keccak; +use bigint::hash::H256; use util::*; use header::*; use transaction::*; @@ -78,7 +80,7 @@ impl<'a> BodyView<'a> { /// Return transaction hashes. pub fn transaction_hashes(&self) -> Vec { - self.rlp.at(0).iter().map(|rlp| rlp.as_raw().sha3()).collect() + self.rlp.at(0).iter().map(|rlp| keccak(rlp.as_raw())).collect() } /// Returns transaction at given index without deserializing unnecessary data. @@ -114,7 +116,7 @@ impl<'a> BodyView<'a> { /// Return list of uncle hashes of given block. pub fn uncle_hashes(&self) -> Vec { - self.rlp.at(1).iter().map(|rlp| rlp.as_raw().sha3()).collect() + self.rlp.at(1).iter().map(|rlp| keccak(rlp.as_raw())).collect() } /// Return nth uncle. diff --git a/ethcore/src/views/header.rs b/ethcore/src/views/header.rs index 4506d98ec..c3c446e19 100644 --- a/ethcore/src/views/header.rs +++ b/ethcore/src/views/header.rs @@ -16,7 +16,10 @@ //! View onto block header rlp -use util::{U256, Bytes, Hashable, H256, Address, H2048}; +use hash::keccak; +use bigint::prelude::U256; +use bigint::hash::{H256, H2048}; +use util::{Bytes, Address}; use rlp::Rlp; use header::BlockNumber; @@ -41,7 +44,9 @@ impl<'a> HeaderView<'a> { } /// Returns header hash. - pub fn hash(&self) -> H256 { self.sha3() } + pub fn hash(&self) -> H256 { + keccak(self.rlp.as_raw()) + } /// Returns raw rlp. pub fn rlp(&self) -> &Rlp<'a> { &self.rlp } @@ -95,17 +100,13 @@ impl<'a> HeaderView<'a> { } } -impl<'a> Hashable for HeaderView<'a> { - fn sha3(&self) -> H256 { - self.rlp.as_raw().sha3() - } -} - #[cfg(test)] mod tests { use std::str::FromStr; use rustc_hex::FromHex; - use util::{H256, Address, H2048, U256}; + use bigint::prelude::U256; + use bigint::hash::{H256, H2048}; + use util::Address; use super::HeaderView; #[test] diff --git a/ethcore/src/views/transaction.rs b/ethcore/src/views/transaction.rs index 11c7fc2f1..25a6fde3a 100644 --- a/ethcore/src/views/transaction.rs +++ b/ethcore/src/views/transaction.rs @@ -15,7 +15,10 @@ // along with Parity. If not, see . //! View onto transaction rlp -use util::{U256, Bytes, Hashable, H256}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::Bytes; +use hash::keccak; use rlp::Rlp; /// View onto transaction rlp. @@ -43,6 +46,11 @@ impl<'a> TransactionView<'a> { &self.rlp } + /// Returns transaction hash. + pub fn hash(&self) -> H256 { + keccak(self.rlp.as_raw()) + } + /// Get the nonce field of the transaction. pub fn nonce(&self) -> U256 { self.rlp.val_at(0) } @@ -68,17 +76,11 @@ impl<'a> TransactionView<'a> { pub fn s(&self) -> U256 { self.rlp.val_at(8) } } -impl<'a> Hashable for TransactionView<'a> { - fn sha3(&self) -> H256 { - self.rlp.as_raw().sha3() - } -} - #[cfg(test)] mod tests { use std::str::FromStr; use rustc_hex::FromHex; - use util::U256; + use bigint::prelude::U256; use super::TransactionView; #[test] diff --git a/ethcore/types/Cargo.toml b/ethcore/types/Cargo.toml index 85a5dfa98..8e156f416 100644 --- a/ethcore/types/Cargo.toml +++ b/ethcore/types/Cargo.toml @@ -8,8 +8,10 @@ authors = ["Parity Technologies "] rlp = { path = "../../util/rlp" } rlp_derive = { path = "../../util/rlp_derive" } ethcore-util = { path = "../../util" } +ethcore-bigint = { path = "../../util/bigint" } ethjson = { path = "../../json" } bloomable = { path = "../../util/bloomable" } +hash = { path = "../../util/hash" } heapsize = "0.4" [dev-dependencies] diff --git a/ethcore/types/src/account_diff.rs b/ethcore/types/src/account_diff.rs index 337c5df1b..b862fb220 100644 --- a/ethcore/types/src/account_diff.rs +++ b/ethcore/types/src/account_diff.rs @@ -19,7 +19,9 @@ use std::cmp::*; use std::fmt; use std::collections::BTreeMap; -use util::{U256, H256, Bytes}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::Bytes; #[derive(Debug, PartialEq, Eq, Clone)] /// Diff type for specifying a change (or not). diff --git a/ethcore/types/src/basic_account.rs b/ethcore/types/src/basic_account.rs index f30872f6b..0e9040fc3 100644 --- a/ethcore/types/src/basic_account.rs +++ b/ethcore/types/src/basic_account.rs @@ -16,7 +16,8 @@ //! Basic account type -- the decoded RLP from the state trie. -use util::{U256, H256}; +use bigint::prelude::U256; +use bigint::prelude::H256; /// Basic account type. #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] diff --git a/ethcore/types/src/blockchain_info.rs b/ethcore/types/src/blockchain_info.rs index 4ab1432b7..87c581508 100644 --- a/ethcore/types/src/blockchain_info.rs +++ b/ethcore/types/src/blockchain_info.rs @@ -18,7 +18,8 @@ use std::fmt; -use util::{U256, H256}; +use bigint::prelude::U256; +use bigint::hash::H256; use security_level::SecurityLevel; use {BlockNumber}; diff --git a/ethcore/types/src/filter.rs b/ethcore/types/src/filter.rs index 6e344b4ef..807ea92cd 100644 --- a/ethcore/types/src/filter.rs +++ b/ethcore/types/src/filter.rs @@ -16,10 +16,12 @@ //! Blockchain filter -use util::{Address, H256, Hashable, H2048}; +use util::Address; +use bigint::hash::{H256, H2048}; use bloomable::Bloomable; use ids::BlockId; use log_entry::LogEntry; +use hash::keccak; /// Blockchain Filter. #[derive(Debug, PartialEq)] @@ -78,7 +80,7 @@ impl Filter { Some(ref addresses) if !addresses.is_empty() => addresses.iter().map(|ref address| { let mut bloom = H2048::default(); - bloom.shift_bloomed(&address.sha3()); + bloom.shift_bloomed(&keccak(address)); bloom }).collect(), _ => vec![H2048::default()] @@ -89,7 +91,7 @@ impl Filter { Some(ref topics) => bs.into_iter().flat_map(|bloom| { topics.into_iter().map(|topic| { let mut b = bloom.clone(); - b.shift_bloomed(&topic.sha3()); + b.shift_bloomed(&keccak(topic)); b }).collect::>() }).collect() diff --git a/ethcore/types/src/ids.rs b/ethcore/types/src/ids.rs index feda18307..150bb77b1 100644 --- a/ethcore/types/src/ids.rs +++ b/ethcore/types/src/ids.rs @@ -16,7 +16,7 @@ //! Unique identifiers. -use util::hash::H256; +use bigint::hash::H256; use {BlockNumber}; /// Uniquely identifies block. diff --git a/ethcore/types/src/lib.rs b/ethcore/types/src/lib.rs index 1143b0d90..a3af37a69 100644 --- a/ethcore/types/src/lib.rs +++ b/ethcore/types/src/lib.rs @@ -17,11 +17,13 @@ //! Types used in the public API extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate ethjson; extern crate rlp; #[macro_use] extern crate rlp_derive; extern crate bloomable; +extern crate hash; extern crate heapsize; #[cfg(test)] diff --git a/ethcore/types/src/log_entry.rs b/ethcore/types/src/log_entry.rs index 058174d8b..1878c66b9 100644 --- a/ethcore/types/src/log_entry.rs +++ b/ethcore/types/src/log_entry.rs @@ -17,14 +17,16 @@ //! Log entry type definition. use std::ops::Deref; +use hash::keccak; use heapsize::HeapSizeOf; -use util::{H256, Address, Bytes, Hashable}; +use util::{Address, Bytes}; +use bigint::hash::H256; use bloomable::Bloomable; use {BlockNumber}; use ethjson; -pub type LogBloom = ::util::H2048; +pub type LogBloom = ::bigint::hash::H2048; /// A record of execution for a `LOG` operation. #[derive(Default, Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] @@ -46,7 +48,7 @@ impl HeapSizeOf for LogEntry { impl LogEntry { /// Calculates the bloom of this log entry. pub fn bloom(&self) -> LogBloom { - self.topics.iter().fold(LogBloom::from_bloomed(&self.address.sha3()), |b, t| b.with_bloomed(&t.sha3())) + self.topics.iter().fold(LogBloom::from_bloomed(&keccak(&self.address)), |b, t| b.with_bloomed(&keccak(t))) } } @@ -94,7 +96,7 @@ mod tests { #[test] fn test_empty_log_bloom() { - let bloom = "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".parse::().unwrap(); + let bloom = "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".parse::<::bigint::hash::H2048>().unwrap(); let address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse::
().unwrap(); let log = LogEntry { address: address, diff --git a/ethcore/types/src/receipt.rs b/ethcore/types/src/receipt.rs index 63a122179..759f67050 100644 --- a/ethcore/types/src/receipt.rs +++ b/ethcore/types/src/receipt.rs @@ -16,7 +16,9 @@ //! Receipt -use util::{H256, U256, Address}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::Address; use heapsize::HeapSizeOf; use rlp::*; diff --git a/ethcore/types/src/snapshot_manifest.rs b/ethcore/types/src/snapshot_manifest.rs index 2dcce2904..8b7503bef 100644 --- a/ethcore/types/src/snapshot_manifest.rs +++ b/ethcore/types/src/snapshot_manifest.rs @@ -16,7 +16,7 @@ //! Snapshot manifest type definition -use util::hash::H256; +use bigint::hash::H256; use rlp::*; use util::Bytes; diff --git a/ethcore/types/src/tree_route.rs b/ethcore/types/src/tree_route.rs index a47e94ee5..f21e089a5 100644 --- a/ethcore/types/src/tree_route.rs +++ b/ethcore/types/src/tree_route.rs @@ -16,7 +16,7 @@ //! Tree route info type definition -use util::H256; +use bigint::hash::H256; /// Represents a tree route between `from` block and `to` block: #[derive(Debug)] diff --git a/ethcore/vm/Cargo.toml b/ethcore/vm/Cargo.toml index 50efe936c..6e5c7d3f2 100644 --- a/ethcore/vm/Cargo.toml +++ b/ethcore/vm/Cargo.toml @@ -6,9 +6,11 @@ authors = ["Parity Technologies "] [dependencies] byteorder = "1.0" ethcore-util = { path = "../../util" } +ethcore-bigint = { path = "../../util/bigint" } log = "0.3" common-types = { path = "../types" } evmjit = { path = "../../evmjit", optional = true } ethjson = { path = "../../json" } lazy_static = "0.2" -rlp = { path = "../../util/rlp" } \ No newline at end of file +rlp = { path = "../../util/rlp" } +hash = { path = "../../util/hash" } diff --git a/ethcore/vm/src/action_params.rs b/ethcore/vm/src/action_params.rs index 401d7ee57..89e5da801 100644 --- a/ethcore/vm/src/action_params.rs +++ b/ethcore/vm/src/action_params.rs @@ -15,9 +15,10 @@ // along with Parity. If not, see . //! Evm input params. -use util::{Address, Bytes, U256}; -use util::hash::{H256}; -use util::sha3::{Hashable, SHA3_EMPTY}; +use bigint::prelude::U256; +use bigint::hash::{H256}; +use util::{Address, Bytes}; +use hash::{keccak, KECCAK_EMPTY}; use ethjson; use call_type::CallType; @@ -87,7 +88,7 @@ impl Default for ActionParams { fn default() -> ActionParams { ActionParams { code_address: Address::new(), - code_hash: Some(SHA3_EMPTY), + code_hash: Some(KECCAK_EMPTY), address: Address::new(), sender: Address::new(), origin: Address::new(), @@ -106,7 +107,7 @@ impl From for ActionParams { let address: Address = t.address.into(); ActionParams { code_address: Address::new(), - code_hash: Some((&*t.code).sha3()), + code_hash: Some(keccak(&*t.code)), address: address, sender: t.sender.into(), origin: t.origin.into(), diff --git a/ethcore/vm/src/env_info.rs b/ethcore/vm/src/env_info.rs index 8634d9c75..cb8ff2241 100644 --- a/ethcore/vm/src/env_info.rs +++ b/ethcore/vm/src/env_info.rs @@ -18,7 +18,10 @@ use std::cmp; use std::sync::Arc; -use util::{U256, Address, H256, Hashable}; +use hash::keccak; +use bigint::hash::H256; +use bigint::prelude::U256; +use util::Address; use types::BlockNumber; use ethjson; @@ -68,7 +71,7 @@ impl From for EnvInfo { difficulty: e.difficulty.into(), gas_limit: e.gas_limit.into(), timestamp: e.timestamp.into(), - last_hashes: Arc::new((1..cmp::min(number + 1, 257)).map(|i| format!("{}", number - i).as_bytes().sha3()).collect()), + last_hashes: Arc::new((1..cmp::min(number + 1, 257)).map(|i| keccak(format!("{}", number - i).as_bytes())).collect()), gas_used: U256::default(), } } @@ -78,7 +81,8 @@ impl From for EnvInfo { mod tests { use std::str::FromStr; use super::*; - use util::{U256, Address}; + use bigint::prelude::U256; + use util::Address; use ethjson; #[test] diff --git a/ethcore/vm/src/ext.rs b/ethcore/vm/src/ext.rs index 54871e511..d292af8d8 100644 --- a/ethcore/vm/src/ext.rs +++ b/ethcore/vm/src/ext.rs @@ -17,6 +17,8 @@ //! Interface for Evm externalities. use std::sync::Arc; +use bigint::prelude::U256; +use bigint::hash::H256; use util::*; use call_type::CallType; use env_info::EnvInfo; diff --git a/ethcore/vm/src/lib.rs b/ethcore/vm/src/lib.rs index 0c9e32dc6..ee8873b01 100644 --- a/ethcore/vm/src/lib.rs +++ b/ethcore/vm/src/lib.rs @@ -17,9 +17,11 @@ //! Virtual machines support library extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate common_types as types; extern crate ethjson; extern crate rlp; +extern crate hash; mod action_params; mod call_type; @@ -45,4 +47,4 @@ pub trait Vm { /// It returns either an error, a known amount of gas left, or parameters to be used /// to compute the final gas left. fn exec(&mut self, params: ActionParams, ext: &mut Ext) -> Result; -} \ No newline at end of file +} diff --git a/ethcore/vm/src/return_data.rs b/ethcore/vm/src/return_data.rs index 3c8bd182f..c7d5f39dc 100644 --- a/ethcore/vm/src/return_data.rs +++ b/ethcore/vm/src/return_data.rs @@ -14,7 +14,7 @@ //! Return data structures -use util::U256; +use bigint::prelude::U256; /// Return data buffer. Holds memory from a previous call and a slice into that memory. #[derive(Debug)] @@ -65,4 +65,4 @@ pub enum GasLeft { /// Apply or revert state changes on revert. apply_state: bool }, -} \ No newline at end of file +} diff --git a/ethcore/vm/src/tests.rs b/ethcore/vm/src/tests.rs index c3a290d41..923216114 100644 --- a/ethcore/vm/src/tests.rs +++ b/ethcore/vm/src/tests.rs @@ -17,7 +17,9 @@ use std::sync::Arc; use std::collections::{HashMap, HashSet}; -use util::{H256, U256, Address, Bytes}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::{Address, Bytes}; use { CallType, Schedule, EnvInfo, ReturnData, Ext, ContractCreateResult, MessageCallResult, diff --git a/ethcore/wasm/Cargo.toml b/ethcore/wasm/Cargo.toml index bbeeeffc5..7f3c41917 100644 --- a/ethcore/wasm/Cargo.toml +++ b/ethcore/wasm/Cargo.toml @@ -6,8 +6,9 @@ authors = ["Parity Technologies "] [dependencies] byteorder = "1.0" ethcore-util = { path = "../../util" } +ethcore-bigint = { path = "../../util/bigint" } log = "0.3" parity-wasm = "0.12" wasm-utils = { git = "https://github.com/paritytech/wasm-utils" } vm = { path = "../vm" } -ethcore-logger = { path = "../../logger" } \ No newline at end of file +ethcore-logger = { path = "../../logger" } diff --git a/ethcore/wasm/src/call_args.rs b/ethcore/wasm/src/call_args.rs index b4cce4982..7fb50bff3 100644 --- a/ethcore/wasm/src/call_args.rs +++ b/ethcore/wasm/src/call_args.rs @@ -16,7 +16,8 @@ //! Wasm evm call arguments helper -use util::{U256, H160}; +use bigint::prelude::U256; +use bigint::hash::H160; /// Input part of the wasm call descriptor pub struct CallArgs { @@ -59,4 +60,4 @@ impl CallArgs { pub fn len(&self) -> u32 { self.data.len() as u32 + 92 } -} \ No newline at end of file +} diff --git a/ethcore/wasm/src/lib.rs b/ethcore/wasm/src/lib.rs index 3fec0f781..fec270be2 100644 --- a/ethcore/wasm/src/lib.rs +++ b/ethcore/wasm/src/lib.rs @@ -18,6 +18,7 @@ extern crate vm; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; #[macro_use] extern crate log; extern crate ethcore_logger; extern crate byteorder; @@ -153,4 +154,4 @@ impl From for vm::Error { fn from(err: runtime::Error) -> vm::Error { vm::Error::Wasm(format!("WASM runtime-error: {:?}", err)) } -} \ No newline at end of file +} diff --git a/ethcore/wasm/src/runtime.rs b/ethcore/wasm/src/runtime.rs index e1def857e..f7fb07473 100644 --- a/ethcore/wasm/src/runtime.rs +++ b/ethcore/wasm/src/runtime.rs @@ -22,7 +22,9 @@ use byteorder::{LittleEndian, ByteOrder}; use vm; use parity_wasm::interpreter; -use util::{Address, H256, U256}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::Address; use vm::CallType; use super::ptr::{WasmPtr, Error as PtrError}; diff --git a/ethcore/wasm/src/tests.rs b/ethcore/wasm/src/tests.rs index de0b528a1..f0a828394 100644 --- a/ethcore/wasm/src/tests.rs +++ b/ethcore/wasm/src/tests.rs @@ -16,7 +16,9 @@ use std::sync::Arc; use byteorder::{LittleEndian, ByteOrder}; -use util::{U256, H256, Address}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::Address; use super::WasmInterpreter; use vm::{self, Vm, GasLeft, ActionParams, ActionValue}; diff --git a/evmbin/Cargo.toml b/evmbin/Cargo.toml index 5538e10cc..310a2de25 100644 --- a/evmbin/Cargo.toml +++ b/evmbin/Cargo.toml @@ -16,6 +16,7 @@ serde_derive = "1.0" ethcore = { path = "../ethcore" } ethjson = { path = "../json" } ethcore-util = { path = "../util" } +ethcore-bigint = { path = "../util/bigint" } evm = { path = "../ethcore/evm" } vm = { path = "../ethcore/vm" } panic_hook = { path = "../panic_hook" } diff --git a/evmbin/benches/mod.rs b/evmbin/benches/mod.rs index f4deda88e..6b6746e74 100644 --- a/evmbin/benches/mod.rs +++ b/evmbin/benches/mod.rs @@ -26,13 +26,14 @@ extern crate test; extern crate ethcore; extern crate evm; extern crate ethcore_util; +extern crate ethcore_bigint; extern crate rustc_hex; use self::test::{Bencher, black_box}; use evm::run_vm; use ethcore::vm::ActionParams; -use ethcore_util::U256; +use ethcore_bigint::prelude::U256; use rustc_hex::FromHex; #[bench] diff --git a/evmbin/src/display/json.rs b/evmbin/src/display/json.rs index 39147ffa3..47e9ccfe5 100644 --- a/evmbin/src/display/json.rs +++ b/evmbin/src/display/json.rs @@ -18,7 +18,9 @@ use ethcore::trace; use std::collections::HashMap; -use util::{U256, H256, ToPretty}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::ToPretty; use display; use info as vm; diff --git a/evmbin/src/display/mod.rs b/evmbin/src/display/mod.rs index 2c25fe721..50acac20c 100644 --- a/evmbin/src/display/mod.rs +++ b/evmbin/src/display/mod.rs @@ -17,7 +17,7 @@ //! VM Output display utils. use std::time::Duration; -use util::U256; +use bigint::prelude::U256; pub mod json; pub mod simple; diff --git a/evmbin/src/info.rs b/evmbin/src/info.rs index 3392cb441..4af30db71 100644 --- a/evmbin/src/info.rs +++ b/evmbin/src/info.rs @@ -17,7 +17,8 @@ //! VM runner. use std::time::{Instant, Duration}; -use util::{U256, H256}; +use bigint::prelude::U256; +use bigint::hash::H256; use ethcore::{trace, spec, transaction, pod_state}; use ethcore::client::{self, EvmTestClient, EvmTestError, TransactResult}; use ethjson; diff --git a/evmbin/src/main.rs b/evmbin/src/main.rs index 5eb43ed61..bb6435ee0 100644 --- a/evmbin/src/main.rs +++ b/evmbin/src/main.rs @@ -26,6 +26,7 @@ extern crate serde; extern crate serde_derive; extern crate docopt; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate vm; extern crate evm; extern crate panic_hook; @@ -35,7 +36,8 @@ use std::{fmt, fs}; use std::path::PathBuf; use docopt::Docopt; use rustc_hex::FromHex; -use util::{U256, Bytes, Address}; +use bigint::prelude::U256; +use util::{Bytes, Address}; use ethcore::spec; use vm::{ActionParams, CallType}; diff --git a/hash-fetch/Cargo.toml b/hash-fetch/Cargo.toml index 502723bbb..d5cec4d47 100644 --- a/hash-fetch/Cargo.toml +++ b/hash-fetch/Cargo.toml @@ -14,7 +14,10 @@ mime = "0.2" mime_guess = "1.6.1" rand = "0.3" rustc-hex = "1.0" +parking_lot = "0.4" fetch = { path = "../util/fetch" } ethcore-util = { path = "../util" } +ethcore-bigint = { path = "../util/bigint" } parity-reactor = { path = "../util/reactor" } native-contracts = { path = "../ethcore/native_contracts" } +hash = { path = "../util/hash" } diff --git a/hash-fetch/src/client.rs b/hash-fetch/src/client.rs index bd773e9b3..6a756aa82 100644 --- a/hash-fetch/src/client.rs +++ b/hash-fetch/src/client.rs @@ -21,11 +21,12 @@ use std::io::Write; use std::sync::Arc; use std::path::PathBuf; +use hash::keccak_buffer; use fetch::{Fetch, Response, Error as FetchError, Client as FetchClient}; use futures::Future; use parity_reactor::Remote; use urlhint::{ContractClient, URLHintContract, URLHint, URLHintResult}; -use util::{H256, sha3}; +use bigint::hash::H256; /// API for fetching by hash. pub trait HashFetch: Send + Sync + 'static { @@ -101,7 +102,7 @@ fn validate_hash(path: PathBuf, hash: H256, result: Result // And validate the hash let mut file_reader = io::BufReader::new(fs::File::open(&path)?); - let content_hash = sha3(&mut file_reader)?; + let content_hash = keccak_buffer(&mut file_reader)?; if content_hash != hash { Err(Error::HashMismatch{ got: content_hash, expected: hash }) } else { @@ -190,7 +191,7 @@ fn random_temp_path() -> PathBuf { mod tests { use rustc_hex::FromHex; use std::sync::{Arc, mpsc}; - use util::Mutex; + use parking_lot::Mutex; use futures::future; use fetch::{self, Fetch}; use parity_reactor::Remote; diff --git a/hash-fetch/src/lib.rs b/hash-fetch/src/lib.rs index 4952ecea2..ad048f3d9 100644 --- a/hash-fetch/src/lib.rs +++ b/hash-fetch/src/lib.rs @@ -25,12 +25,15 @@ extern crate mime; extern crate ethabi; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate futures; extern crate mime_guess; extern crate native_contracts; extern crate parity_reactor; +extern crate parking_lot; extern crate rand; extern crate rustc_hex; +extern crate hash; pub extern crate fetch; diff --git a/hash-fetch/src/urlhint.rs b/hash-fetch/src/urlhint.rs index bc4b63cf2..c41b0b960 100644 --- a/hash-fetch/src/urlhint.rs +++ b/hash-fetch/src/urlhint.rs @@ -20,10 +20,11 @@ use std::sync::Arc; use rustc_hex::ToHex; use mime::Mime; use mime_guess; +use hash::keccak; use futures::{future, BoxFuture, Future}; use native_contracts::{Registry, Urlhint}; -use util::{Address, Bytes, Hashable}; +use util::{Address, Bytes}; const COMMIT_LEN: usize = 20; @@ -116,7 +117,7 @@ impl URLHintContract { } } -fn decode_urlhint_output(output: (String, ::util::H160, Address)) -> Option { +fn decode_urlhint_output(output: (String, ::bigint::hash::H160, Address)) -> Option { let (account_slash_repo, commit, owner) = output; if owner == Address::default() { @@ -164,7 +165,7 @@ impl URLHint for URLHintContract { let urlhint = self.urlhint.clone(); let client = self.client.clone(); - self.registrar.get_address(do_call, "githubhint".sha3(), "A".into()) + self.registrar.get_address(do_call, keccak("githubhint"), "A".into()) .map(|addr| if addr == Address::default() { None } else { Some(addr) }) .and_then(move |address| { let mut fixed_id = [0; 32]; @@ -175,7 +176,7 @@ impl URLHint for URLHintContract { None => Either::A(future::ok(None)), Some(address) => { let do_call = move |_, data| client.call(address, data); - Either::B(urlhint.entries(do_call, ::util::H256(fixed_id)).map(decode_urlhint_output)) + Either::B(urlhint.entries(do_call, ::bigint::hash::H256(fixed_id)).map(decode_urlhint_output)) } } }).boxed() @@ -215,7 +216,8 @@ pub mod tests { use super::*; use super::guess_mime_type; - use util::{Bytes, Address, Mutex, ToPretty}; + use parking_lot::Mutex; + use util::{Bytes, Address, ToPretty}; pub struct FakeRegistrar { pub calls: Arc>>, diff --git a/hw/src/lib.rs b/hw/src/lib.rs index cda6c2241..e4b5c9f54 100644 --- a/hw/src/lib.rs +++ b/hw/src/lib.rs @@ -155,7 +155,6 @@ impl HardwareWalletManager { self.ledger.lock().set_key_path(key_path); } - /// List connected wallets. This only returns wallets that are ready to be used. pub fn list_wallets(&self) -> Vec { self.ledger.lock().list_devices() diff --git a/ipc-common-types/Cargo.toml b/ipc-common-types/Cargo.toml index ec9b157d5..a9efe1439 100644 --- a/ipc-common-types/Cargo.toml +++ b/ipc-common-types/Cargo.toml @@ -13,3 +13,4 @@ ethcore-ipc-codegen = { path = "../ipc/codegen" } semver = "0.6" ethcore-ipc = { path = "../ipc/rpc" } ethcore-util = { path = "../util" } +ethcore-bigint = { path = "../util/bigint" } diff --git a/ipc-common-types/src/lib.rs b/ipc-common-types/src/lib.rs index 2038c9116..c1d18a8d7 100644 --- a/ipc-common-types/src/lib.rs +++ b/ipc-common-types/src/lib.rs @@ -18,6 +18,7 @@ extern crate semver; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate ethcore_ipc as ipc; mod types; diff --git a/ipc-common-types/src/types/version_info.rs b/ipc-common-types/src/types/version_info.rs index 1f0e58452..ef386824d 100644 --- a/ipc-common-types/src/types/version_info.rs +++ b/ipc-common-types/src/types/version_info.rs @@ -19,7 +19,7 @@ use std::fmt; use std::str::FromStr; use semver::{Version}; -use util::H160; +use bigint::hash::H160; use util::misc::raw_package_info; use release_track::ReleaseTrack; diff --git a/ipc/rpc/Cargo.toml b/ipc/rpc/Cargo.toml index edfcdf103..12d73a118 100644 --- a/ipc/rpc/Cargo.toml +++ b/ipc/rpc/Cargo.toml @@ -9,5 +9,6 @@ license = "GPL-3.0" [dependencies] ethcore-devtools = { path = "../../devtools" } nanomsg = { git = "https://github.com/paritytech/nanomsg.rs.git", branch = "parity-1.7" } +ethcore-bigint = { path = "../../util/bigint"} ethcore-util = { path = "../../util" } semver = "0.6" diff --git a/ipc/rpc/src/binary.rs b/ipc/rpc/src/binary.rs index b2fc4bb4b..6466acdb1 100644 --- a/ipc/rpc/src/binary.rs +++ b/ipc/rpc/src/binary.rs @@ -16,7 +16,9 @@ //! Binary representation of types -use util::{U256, U512, H256, H512, H2048, Address}; +use bigint::prelude::{U256, U512}; +use bigint::hash::{H256, H512, H2048}; +use util::{Address}; use std::mem; use std::collections::{VecDeque, BTreeMap}; use std::ops::Range; diff --git a/ipc/rpc/src/lib.rs b/ipc/rpc/src/lib.rs index 61493ea36..c6aff23b5 100644 --- a/ipc/rpc/src/lib.rs +++ b/ipc/rpc/src/lib.rs @@ -20,6 +20,7 @@ extern crate ethcore_devtools as devtools; extern crate semver; extern crate nanomsg; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; pub mod interface; pub mod binary; diff --git a/ipfs/Cargo.toml b/ipfs/Cargo.toml index 4b438efe7..f56159399 100644 --- a/ipfs/Cargo.toml +++ b/ipfs/Cargo.toml @@ -8,6 +8,7 @@ authors = ["Parity Technologies "] [dependencies] ethcore = { path = "../ethcore" } ethcore-util = { path = "../util" } +ethcore-bigint = { path = "../util/bigint" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } rlp = { path = "../util/rlp" } mime = "0.2" diff --git a/ipfs/src/lib.rs b/ipfs/src/lib.rs index 104c7db19..ec065fa48 100644 --- a/ipfs/src/lib.rs +++ b/ipfs/src/lib.rs @@ -22,6 +22,7 @@ extern crate cid; extern crate rlp; extern crate ethcore; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate jsonrpc_http_server as http; pub mod error; diff --git a/ipfs/src/route.rs b/ipfs/src/route.rs index c72ca53c1..e95552404 100644 --- a/ipfs/src/route.rs +++ b/ipfs/src/route.rs @@ -19,7 +19,8 @@ use error::{Error, Result}; use cid::{ToCid, Codec}; use multihash::Hash; -use util::{Bytes, H256}; +use bigint::hash::H256; +use util::Bytes; use ethcore::client::{BlockId, TransactionId}; type Reason = &'static str; diff --git a/js/i18n/_default/account.js b/js/i18n/_default/account.js index 7c9d25520..3783ff368 100644 --- a/js/i18n/_default/account.js +++ b/js/i18n/_default/account.js @@ -18,12 +18,29 @@ export default { button: { delete: `delete`, edit: `edit`, + export: `export`, faucet: `Kovan ETH`, + forget: `forget`, password: `password`, shapeshift: `shapeshift`, transfer: `transfer`, verify: `verify` }, + export: { + info: `Export your account as a JSON file. Please enter the password linked with this account.`, + password: { + hint: `The password specified when creating this account`, + label: `Account password` + }, + setPassword: { + hint: `Enter password Here`, + label: `Password` + }, + title: `Export Account` + }, + external: { + confirmDelete: `Are you sure you want to remove the following external address from your account list?` + }, hardware: { confirmDelete: `Are you sure you want to remove the following hardware address from your account list?` }, diff --git a/js/i18n/_default/accounts.js b/js/i18n/_default/accounts.js index 2db3d5fd9..e90f0984f 100644 --- a/js/i18n/_default/accounts.js +++ b/js/i18n/_default/accounts.js @@ -16,10 +16,19 @@ export default { button: { + export: `export`, newAccount: `account`, newWallet: `wallet`, + restoreAccount: `restore`, vaults: `vaults` }, + export: { + button: { + cancel: `Cancel`, + export: `Export` + }, + title: `Export an Account` + }, summary: { minedBlock: `Mined at block #{blockNumber}` }, diff --git a/js/i18n/_default/application.js b/js/i18n/_default/application.js index f2e6be1ee..8f0de0dc1 100644 --- a/js/i18n/_default/application.js +++ b/js/i18n/_default/application.js @@ -22,8 +22,7 @@ export default { consensus: { capable: `Upgrade not required.`, capableUntil: `Upgrade required before #{blockNumber}`, - incapableSince: `Upgrade required since #{blockNumber}`, - unknown: `Upgrade status is unknown.` + incapableSince: `Upgrade required since #{blockNumber}` }, upgrade: `Upgrade` } diff --git a/js/i18n/_default/connection.js b/js/i18n/_default/connection.js index e51943178..4f38162e4 100644 --- a/js/i18n/_default/connection.js +++ b/js/i18n/_default/connection.js @@ -19,6 +19,7 @@ export default { connectingNode: `Connecting to the Parity Node. If this informational message persists, please ensure that your Parity node is running and reachable on the network.`, invalidToken: `invalid signer token`, noConnection: `Unable to make a connection to the Parity Secure API. To update your secure token or to generate a new one, run {newToken} and paste the generated token into the space below.`, + timestamp: `Ensure that both the Parity node and this machine connecting have computer clocks in-sync with each other and with a timestamp server, ensuring both successful token validation and block operations.`, token: { hint: `a generated token from Parity`, label: `secure token` diff --git a/js/i18n/_default/createAccount.js b/js/i18n/_default/createAccount.js index 8930fc7b4..791ee796b 100644 --- a/js/i18n/_default/createAccount.js +++ b/js/i18n/_default/createAccount.js @@ -21,8 +21,11 @@ export default { label: `address` }, phrase: { + backedUp: `Type "I have written down the phrase" below to confirm it is backed up.`, + backup: `Please back up the recovery phrase now. Make sure to keep it private and secure, it allows full and unlimited access to the account.`, + backupConfirm: `Type your recovery phrase now.`, hint: `the account recovery phrase`, - label: `owner recovery phrase (keep private and secure, it allows full and unlimited access to the account)` + label: `owner recovery phrase` } }, accountDetailsGeth: { @@ -50,14 +53,14 @@ export default { description: `Selecting your identity icon and specifying the password`, label: `New Account` }, - fromPhrase: { - description: `Recover using a previously stored recovery phrase and new password`, - label: `Recovery phrase` - }, fromPresale: { description: `Import an Ethereum presale wallet file with the original password`, label: `Presale wallet` }, + fromQr: { + description: `Attach an externally managed account via QR code`, + label: `External Account` + }, fromRaw: { description: `Enter a previously created raw private key with a new password`, label: `Private key` @@ -104,6 +107,21 @@ export default { label: `password` } }, + newQr: { + address: { + hint: `the network address for the account`, + label: `address` + }, + description: { + hint: `a description for the account`, + label: `account description` + }, + name: { + hint: `a descriptive name for the account`, + label: `account name` + }, + summary: `Use the built-in machine camera to scan to QR code of the account you wish to attach as an external account. External accounts are signed on the external device.` + }, rawKey: { hint: { hint: `(optional) a hint to help with remembering the password`, @@ -135,6 +153,9 @@ export default { hint: `a descriptive name for the account`, label: `account name` }, + passPhrase: { + error: `enter a recovery phrase` + }, password: { hint: `a strong, unique password`, label: `password` @@ -147,14 +168,27 @@ export default { hint: `the account recovery phrase`, label: `account recovery phrase` }, + warning: { + emptyPhrase: `The recovery phrase is empty. + This account can be recovered by anyone.`, + shortPhrase: `The recovery phrase is less than 11 words. + This account has not been generated by Parity and might be insecure. + Proceed with caution.`, + testnetEmptyPhrase: `The recovery phrase is empty. + This account can be recovered by anyone. + Proceed with caution.` + }, windowsKey: { label: `Key was created with Parity <1.4.5 on Windows` } }, title: { accountInfo: `account information`, + backupPhrase: `confirm recovery phrase`, createAccount: `create account`, createType: `creation type`, - importWallet: `import wallet` + importAccount: `import account`, + qr: `external account`, + restoreAccount: `restore account` } }; diff --git a/js/i18n/_default/createWallet.js b/js/i18n/_default/createWallet.js index eeb9e9a98..f32e95248 100644 --- a/js/i18n/_default/createWallet.js +++ b/js/i18n/_default/createWallet.js @@ -18,14 +18,9 @@ export default { button: { add: `Add`, cancel: `Cancel`, - close: `Close`, create: `Create`, done: `Done`, - next: `Next`, - sending: `Sending...` - }, - deployment: { - message: `The deployment is currently in progress` + next: `Next` }, details: { address: { @@ -73,21 +68,7 @@ export default { numOwners: `{numOwners} owners are required to confirm a transaction.`, owners: `The following are wallet owners` }, - rejected: { - message: `The deployment has been rejected`, - state: `The wallet will not be created. You can safely close this window.`, - title: `rejected` - }, - states: { - completed: `The contract deployment has been completed`, - confirmationNeeded: `The contract deployment needs confirmations from other owners of the Wallet`, - preparing: `Preparing transaction for network transmission`, - validatingCode: `Validating the deployed contract code`, - waitingConfirm: `Waiting for confirmation of the transaction in the Parity Secure Signer`, - waitingReceipt: `Waiting for the contract deployment transaction receipt` - }, steps: { - deployment: `wallet deployment`, details: `wallet details`, info: `wallet informaton`, type: `wallet type` diff --git a/js/i18n/_default/dapps.js b/js/i18n/_default/dapps.js index 9ed3415d1..514ef59c0 100644 --- a/js/i18n/_default/dapps.js +++ b/js/i18n/_default/dapps.js @@ -31,6 +31,9 @@ export default { } }, button: { + dapp: { + refresh: `refresh` + }, edit: `edit`, permissions: `permissions` }, diff --git a/js/i18n/_default/deployContract.js b/js/i18n/_default/deployContract.js index 0b5a05503..2436d540f 100644 --- a/js/i18n/_default/deployContract.js +++ b/js/i18n/_default/deployContract.js @@ -15,19 +15,12 @@ // along with Parity. If not, see . export default { - busy: { - title: `The deployment is currently in progress` - }, button: { cancel: `Cancel`, close: `Close`, create: `Create`, - done: `Done`, next: `Next` }, - completed: { - description: `Your contract has been deployed at` - }, details: { abi: { hint: `the abi of the contract to deploy or solc combined-output`, @@ -66,25 +59,9 @@ export default { parameters: { choose: `Choose the contract parameters` }, - rejected: { - description: `You can safely close this window, the contract deployment will not occur.`, - title: `The deployment has been rejected` - }, - state: { - completed: `The contract deployment has been completed`, - confirmationNeeded: `The operation needs confirmations from the other owners of the contract`, - preparing: `Preparing transaction for network transmission`, - validatingCode: `Validating the deployed contract code`, - waitReceipt: `Waiting for the contract deployment transaction receipt`, - waitSigner: `Waiting for confirmation of the transaction in the Parity Secure Signer` - }, title: { - completed: `completed`, - deployment: `deployment`, details: `contract details`, extras: `extra information`, - failed: `deployment failed`, - parameters: `contract parameters`, - rejected: `rejected` + parameters: `contract parameters` } }; diff --git a/js/i18n/_default/errors.js b/js/i18n/_default/errors.js index 76fed24cd..1a9c3516a 100644 --- a/js/i18n/_default/errors.js +++ b/js/i18n/_default/errors.js @@ -19,6 +19,8 @@ export default { invalidKey: `the raw key needs to be hex, 64 characters in length and contain the prefix "0x"`, noFile: `select a valid wallet file to import`, noKey: `you need to provide the raw private key`, + noMatchBackupPhrase: `the supplied recovery phrase does not match`, noMatchPassword: `the supplied passwords does not match`, + noMatchPhraseBackedUp: `type "I have written down the phrase"`, noName: `you need to specify a valid name` }; diff --git a/js/i18n/_default/executeContract.js b/js/i18n/_default/executeContract.js index 011264d3f..205a1d4dc 100644 --- a/js/i18n/_default/executeContract.js +++ b/js/i18n/_default/executeContract.js @@ -15,14 +15,8 @@ // along with Parity. If not, see . export default { - busy: { - posted: `Your transaction has been posted to the network`, - title: `The function execution is in progress`, - waitAuth: `Waiting for authorization in the Parity Signer` - }, button: { cancel: `cancel`, - done: `done`, next: `next`, post: `post transaction`, prev: `prev` @@ -44,15 +38,8 @@ export default { label: `function to execute` } }, - rejected: { - state: `You can safely close this window, the function execution will not occur.`, - title: `The execution has been rejected` - }, steps: { advanced: `advanced options`, - complete: `complete`, - rejected: `rejected`, - sending: `sending`, transfer: `function details` } }; diff --git a/js/i18n/_default/firstRun.js b/js/i18n/_default/firstRun.js index 5f41fa9c4..c6e6a640e 100644 --- a/js/i18n/_default/firstRun.js +++ b/js/i18n/_default/firstRun.js @@ -20,6 +20,7 @@ export default { create: `Create`, next: `Next`, print: `Print Phrase`, + restart: `Start Over`, skip: `Skip` }, completed: { @@ -28,6 +29,7 @@ export default { }, title: { completed: `completed`, + confirmation: `confirmation`, newAccount: `new account`, recovery: `recovery`, terms: `terms`, diff --git a/js/i18n/_default/index.js b/js/i18n/_default/index.js index 687e558dc..341663c92 100644 --- a/js/i18n/_default/index.js +++ b/js/i18n/_default/index.js @@ -41,11 +41,14 @@ export home from './home'; export loadContract from './loadContract'; export parityBar from './parityBar'; export passwordChange from './passwordChange'; +export peers from './peers'; +export requests from './requests'; export saveContract from './saveContract'; export settings from './settings'; export shapeshift from './shapeshift'; export signer from './signer'; export status from './status'; +export syncWarning from './syncWarning'; export tabBar from './tabBar'; export transfer from './transfer'; export txEditor from './txEditor'; diff --git a/js/i18n/_default/peers.js b/js/i18n/_default/peers.js new file mode 100644 index 000000000..2e66aaff0 --- /dev/null +++ b/js/i18n/_default/peers.js @@ -0,0 +1,46 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +export default { + acceptNonReserved: { + label: `Accept non-reserved` + }, + acceptNonReservedPeers: { + success: `Accepting non-reserved peers` + }, + addReserved: { + label: `Add reserved` + }, + dropNonReserved: { + label: `Drop non-reserved` + }, + dropNonReservedPeers: { + success: `Dropping non-reserved peers` + }, + form: { + action: { + label: `{add, select, true {Add} false {}}{remove, select, true {Remove} false {}}`, + success: `Successfully {add, select, true {added} false {}}{remove, select, true {removed} false {}} a reserved peer` + }, + cancel: { + label: `Cancel` + }, + label: `Peer enode URL` + }, + removeReserved: { + label: `Remove reserved` + } +}; diff --git a/js/i18n/_default/requests.js b/js/i18n/_default/requests.js new file mode 100644 index 000000000..328d52156 --- /dev/null +++ b/js/i18n/_default/requests.js @@ -0,0 +1,24 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +export default { + status: { + error: `An error occured:`, + transactionMined: `Transaction mined at block #{blockNumber} ({blockHeight} blocks ago)`, + transactionSent: `Transaction sent to network with hash`, + waitingForSigner: `Waiting for authorization in the Parity Signer` + } +}; diff --git a/js/i18n/_default/settings.js b/js/i18n/_default/settings.js index ea447e4a9..aef412b48 100644 --- a/js/i18n/_default/settings.js +++ b/js/i18n/_default/settings.js @@ -36,7 +36,7 @@ export default { }, languages: { hint: `the language this interface is displayed with`, - label: `UI language` + label: `language` }, loglevels: `Choose the different logs level.`, modes: { @@ -51,7 +51,7 @@ export default { label: `parity` }, proxy: { - details_0: `Instead of accessing Parity via the IP address and port, you will be able to access it via the .parity subdomain, by visiting {homeProxy}. To setup subdomain-based routing, you need to add the relevant proxy entries to your browser,`, + details_0: `Instead of accessing Parity via the IP address and port, you will be able to access it via the .web3.site subdomain, by visiting {homeProxy}. To setup subdomain-based routing, you need to add the relevant proxy entries to your browser,`, details_1: `To learn how to configure the proxy, instructions are provided for {windowsLink}, {macOSLink} or {ubuntuLink}.`, details_macos: `macOS`, details_ubuntu: `Ubuntu`, @@ -88,13 +88,12 @@ export default { description: `The secure transaction management area of the application where you can approve any outgoing transactions made from the application as well as those placed into the queue by decentralized applications.`, label: `Signer` }, - status: { - description: `See how the Parity node is performing in terms of connections to the network, logs from the actual running instance and details of mining (if enabled and configured).`, - label: `Status` - }, label: `views`, home: { label: `Home` + }, + status: { + label: `Status` } }, label: `settings` diff --git a/js/i18n/_default/signer.js b/js/i18n/_default/signer.js index 3f8615c52..6ba8fde46 100644 --- a/js/i18n/_default/signer.js +++ b/js/i18n/_default/signer.js @@ -15,6 +15,13 @@ // along with Parity. If not, see . export default { + decryptRequest: { + request: `A request to decrypt data using your account:`, + state: { + confirmed: `Confirmed`, + rejected: `Rejected` + } + }, embedded: { noPending: `There are currently no pending requests awaiting your confirmation` }, @@ -29,7 +36,7 @@ export default { requestOrigin: { dapp: `by a dapp at {url}`, ipc: `via IPC session`, - rpc: `via RPC {rpc}`, + rpc: `via RPC {url}`, signerCurrent: `via current tab`, signerUI: `via UI session`, unknownInterface: `via unknown interface`, @@ -38,10 +45,14 @@ export default { }, requestsPage: { noPending: `There are no requests requiring your confirmation.`, - pendingTitle: `Pending Requests`, + pendingTitle: `Pending Signature Authorization`, queueTitle: `Local Transactions` }, sending: { + external: { + scanSigned: `Scan the QR code of the signed transaction from your external device`, + scanTx: `Please scan the transaction QR on your external device` + }, hardware: { confirm: `Please confirm the transaction on your attached hardware device`, connect: `Please attach your hardware device before confirming the transaction` @@ -53,6 +64,10 @@ export default { confirmed: `Confirmed`, rejected: `Rejected` }, + tooltip: { + data: `Data: {data}`, + hash: `Hash to be signed: {hashToSign}` + }, unknownBinary: `(Unknown binary data)`, warning: `WARNING: This consequences of doing this may be grave. Confirm the request only if you are sure.` }, @@ -65,7 +80,8 @@ export default { txPendingConfirm: { buttons: { confirmBusy: `Confirming...`, - confirmRequest: `Confirm Request` + confirmRequest: `Confirm Request`, + scanSigned: `Scan Signed QR` }, errors: { invalidWallet: `Given wallet file is invalid.` diff --git a/js/i18n/_default/status.js b/js/i18n/_default/status.js index 536c0ff90..1874a4044 100644 --- a/js/i18n/_default/status.js +++ b/js/i18n/_default/status.js @@ -20,6 +20,14 @@ export default { stopped: `Refresh and display of logs from Parity is currently stopped via the UI, start it to see the latest updates.`, title: `Node Logs` }, + health: { + no: `no`, + peers: `Connected Peers`, + sync: `Chain Synchronized`, + time: `Time Synchronized`, + title: `Node Health`, + yes: `yes` + }, miningSettings: { input: { author: { @@ -41,13 +49,25 @@ export default { }, title: `mining settings` }, + peers: { + table: { + header: { + caps: `Capabilities`, + ethDiff: `Difficulty (ETH)`, + ethHeader: `Header (ETH)`, + id: `ID`, + name: `Name`, + remoteAddress: `Remote Address` + } + }, + title: `network peers` + }, status: { hashrate: `{hashrate} H/s`, input: { chain: `chain`, enode: `enode`, no: `no`, - peers: `peers`, port: `network port`, rpcEnabled: `rpc enabled`, rpcInterface: `rpc interface`, diff --git a/js/i18n/_default/syncWarning.js b/js/i18n/_default/syncWarning.js new file mode 100644 index 000000000..109a7d6ca --- /dev/null +++ b/js/i18n/_default/syncWarning.js @@ -0,0 +1,24 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +export default { + dontShowAgain: { + label: `Do not show this warning again` + }, + understandBtn: { + label: `I understand` + } +}; diff --git a/js/i18n/_default/transfer.js b/js/i18n/_default/transfer.js index 3207f5b06..16c7c27a2 100644 --- a/js/i18n/_default/transfer.js +++ b/js/i18n/_default/transfer.js @@ -24,7 +24,6 @@ export default { buttons: { back: `Back`, cancel: `Cancel`, - close: `Close`, next: `Next`, send: `Send` }, @@ -51,10 +50,6 @@ export default { label: `total transaction amount` } }, - wallet: { - confirmation: `This transaction needs confirmation from other owners.`, - operationHash: `operation hash` - }, warning: { wallet_spent_limit: `This transaction value is above the remaining daily limit. It will need to be confirmed by other owners.` } diff --git a/js/i18n/_default/ui.js b/js/i18n/_default/ui.js index d84e7bd13..5d04208ad 100644 --- a/js/i18n/_default/ui.js +++ b/js/i18n/_default/ui.js @@ -66,6 +66,18 @@ export default { errors: { close: `close` }, + features: { + defaults: { + i18n: { + desc: `Allows changing the default interface language`, + name: `Language Selection` + }, + logging: { + desc: `Allows changing of the log levels for various components`, + name: `Logging Level Selection` + } + } + }, fileSelect: { defaultLabel: `Drop a file here, or click to select a file to upload` }, @@ -80,8 +92,8 @@ export default { }, methodDecoding: { condition: { - block: `, {historic, select, true {Submitted} false {Submission}} at block {blockNumber}`, - time: `, {historic, select, true {Submitted} false {Submission}} at {timestamp}` + block: `{historic, select, true {Will be submitted} false {To be submitted}} at block {blockNumber}`, + time: `{historic, select, true {Will be submitted} false {To be submitted}} {timestamp}` }, deploy: { address: `Deployed a contract at address`, @@ -101,7 +113,7 @@ export default { info: `{historic, select, true {Received} false {Will receive}} {valueEth} from {aContract}{address}` }, signature: { - info: `{historic, select, true {Executed} false {Will execute}} the {method} function on the contract {address} trsansferring {ethValue}{inputLength, plural, zero {,} other {passing the following {inputLength, plural, one {parameter} other {parameters}}}}` + info: `{historic, select, true {Executed} false {Will execute}} the {method} function on the contract {address} {showEth, select, true {transferring {ethValue}} false {}} {showInputs, select, false {} true {passing the following {inputLength, plural, one {parameter} other {parameters}}}}` }, token: { transfer: `{historic, select, true {Transferred} false {Will transfer}} {value} to {address}` @@ -131,6 +143,27 @@ export default { posted: `The transaction has been posted to the network with a hash of {hashLink}`, waiting: `waiting for confirmations` }, + txList: { + txRow: { + cancel: `Cancel`, + cancelWarning: `Warning: Editing or Canceling the transaction may not succeed!`, + canceled: `Canceled`, + edit: `Edit`, + editing: `Editing`, + pendingStatus: { + blocksLeft: `{blockNumber} blocks left`, + time: `{time} left` + }, + scheduled: `Scheduled`, + submitting: `Pending`, + verify: { + cancelEditCancel: `Cancel`, + cancelEditEdit: `Edit`, + confirm: `Are you sure?`, + nevermind: `Nevermind` + } + } + }, vaultSelect: { hint: `the vault this account is attached to`, label: `associated vault` diff --git a/js/i18n/_default/walletSettings.js b/js/i18n/_default/walletSettings.js index ddeae3798..e3cae3e3f 100644 --- a/js/i18n/_default/walletSettings.js +++ b/js/i18n/_default/walletSettings.js @@ -22,8 +22,12 @@ export default { cancel: `Cancel`, close: `Close`, next: `Next`, - send: `Send`, - sending: `Sending...` + send: `Send` + }, + changeOwner: { + labelFrom: `From`, + labelTo: `To`, + title: `Change Owner` }, changes: { modificationString: `For your modifications to be taken into account, @@ -62,7 +66,6 @@ export default { details: `from {from} to {to}`, title: `Change Required Owners` }, - rejected: `The transaction #{txid} has been rejected`, removeOwner: { title: `Remove Owner` } diff --git a/js/i18n/_default/writeContract.js b/js/i18n/_default/writeContract.js index fc1100b77..62f83dd4d 100644 --- a/js/i18n/_default/writeContract.js +++ b/js/i18n/_default/writeContract.js @@ -37,7 +37,7 @@ export default { params: `An error occurred with the following description` }, input: { - abi: `ABI Interface`, + abi: `ABI Definition`, code: `Bytecode`, metadata: `Metadata`, swarm: `Swarm Metadata Hash` diff --git a/js/i18n/store.js b/js/i18n/store.js index f2506950e..79bc48fe6 100644 --- a/js/i18n/store.js +++ b/js/i18n/store.js @@ -33,8 +33,8 @@ import zhHantTWMessages from './zh-Hant-TW'; let instance = null; -const LANGUAGES = flatten({ languages }); -const MESSAGES = { +export const LANGUAGES = flatten({ languages }); +export const MESSAGES = { de: Object.assign(flatten(deMessages), LANGUAGES), en: Object.assign(flatten(enMessages), LANGUAGES), nl: Object.assign(flatten(nlMessages), LANGUAGES), @@ -75,8 +75,3 @@ export default class Store { return instance; } } - -export { - LANGUAGES, - MESSAGES -}; diff --git a/js/package-lock.json b/js/package-lock.json index bc40b0ebc..b94920fc4 100644 --- a/js/package-lock.json +++ b/js/package-lock.json @@ -1,6 +1,6 @@ { "name": "Parity", - "version": "1.8.17", + "version": "1.8.19", "lockfileVersion": 1, "requires": true, "dependencies": { diff --git a/js/package.json b/js/package.json index cc7d801c6..031ce3459 100644 --- a/js/package.json +++ b/js/package.json @@ -1,8 +1,8 @@ { "name": "Parity", - "version": "1.8.17", + "version": "1.8.19", "main": "src/index.electron.js", - "jsnext:main": "src/index.js", + "jsnext:main": "src/index.electron.js", "author": "Parity Team ", "maintainers": [ "Jaco Greeff", diff --git a/js/scripts/lint-i18n.js b/js/scripts/lint-i18n.js index 540c1e803..3afb19862 100644 --- a/js/scripts/lint-i18n.js +++ b/js/scripts/lint-i18n.js @@ -20,34 +20,57 @@ import * as defaults from '../i18n/_default'; import { LANGUAGES, MESSAGES } from '../i18n/store'; const SKIP_LANG = ['en']; -const defaultKeys = Object.keys(flatten(Object.assign({}, defaults, LANGUAGES))); +const defaultValues = flatten(Object.assign({}, defaults, LANGUAGES)); +const defaultKeys = Object.keys(defaultValues); +const results = {}; Object .keys(MESSAGES) .filter((lang) => !SKIP_LANG.includes(lang)) .forEach((lang) => { const messageKeys = Object.keys(MESSAGES[lang]); - let extra = 0; - let found = 0; - let missing = 0; + const langResults = { found: [], missing: [], extras: [] }; - console.log(`*** Checking translations for ${lang}`); + console.warn(`*** Checking translations for ${lang}`); defaultKeys.forEach((key) => { if (messageKeys.includes(key)) { - found++; + langResults.found.push(key); } else { - missing++; - console.log(` Missing ${key}`); + langResults.missing.push(key); } }); messageKeys.forEach((key) => { if (!defaultKeys.includes(key)) { - extra++; - console.log(` Extra ${key}`); + langResults.extras.push(key); } }); - console.log(`Found ${found} keys, missing ${missing} keys, ${extra} extraneous keys\n`); + // Sort keys + langResults.extras.sort((kA, kB) => kA.localeCompare(kB)); + langResults.found.sort((kA, kB) => kA.localeCompare(kB)); + langResults.missing.sort((kA, kB) => kA.localeCompare(kB)); + + // Print to stderr the missing and extra keys + langResults.missing.forEach((key) => console.warn(` Missing ${key}`)); + langResults.extras.forEach((key) => console.warn(` Extra ${key}`)); + + results[lang] = langResults; + + console.warn(`Found ${langResults.found.length} keys, missing ${langResults.missing.length} keys, ${langResults.extras.length} extraneous keys\n`); }); + +const formattedResults = Object.keys(results) + .reduce((res, lang) => { + const { missing } = results[lang]; + + res[lang] = missing.map((key) => ({ + key, + default: defaultValues[key] + })); + + return res; + }, {}); + +process.stdout.write(JSON.stringify(formattedResults, null, 2) + '\n'); diff --git a/json/src/spec/params.rs b/json/src/spec/params.rs index b90eef5ea..4561bcada 100644 --- a/json/src/spec/params.rs +++ b/json/src/spec/params.rs @@ -108,6 +108,9 @@ pub struct Params { /// Node permission contract address. #[serde(rename="nodePermissionContract")] pub node_permission_contract: Option
, + /// Transaction permission contract address. + #[serde(rename="transactionPermissionContract")] + pub transaction_permission_contract: Option
, } #[cfg(test)] diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 426c8d447..eee785102 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -21,7 +21,10 @@ use std::time::{Instant, Duration}; use std::thread::sleep; use std::sync::Arc; use rustc_hex::FromHex; -use util::{ToPretty, U256, H256, Address, Hashable}; +use hash::{keccak, KECCAK_NULL_RLP}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::{ToPretty, Address}; use rlp::PayloadInfo; use ethcore::service::ClientService; use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, BlockChainClient, BlockId}; @@ -154,6 +157,7 @@ pub fn execute(cmd: BlockchainCmd) -> Result<(), String> { fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> { use light::client::{Service as LightClientService, Config as LightClientConfig}; use light::cache::Cache as LightDataCache; + use parking_lot::Mutex; let timer = Instant::now(); @@ -187,7 +191,7 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> { // create dirs used by parity cmd.dirs.create_dirs(false, false, false)?; - let cache = Arc::new(::util::Mutex::new( + let cache = Arc::new(Mutex::new( LightDataCache::new(Default::default(), ::time::Duration::seconds(0)) )); @@ -639,13 +643,13 @@ fn execute_export_state(cmd: ExportState) -> Result<(), String> { out.write_fmt(format_args!("\n\"0x{}\": {{\"balance\": \"{:x}\", \"nonce\": \"{:x}\"", account.hex(), balance, client.nonce(&account, at).unwrap_or_else(U256::zero))).expect("Write error"); let code = client.code(&account, at).unwrap_or(None).unwrap_or_else(Vec::new); if !code.is_empty() { - out.write_fmt(format_args!(", \"code_hash\": \"0x{}\"", code.sha3().hex())).expect("Write error"); + out.write_fmt(format_args!(", \"code_hash\": \"0x{}\"", keccak(&code).hex())).expect("Write error"); if cmd.code { out.write_fmt(format_args!(", \"code\": \"{}\"", code.to_hex())).expect("Write error"); } } - let storage_root = client.storage_root(&account, at).unwrap_or(::util::SHA3_NULL_RLP); - if storage_root != ::util::SHA3_NULL_RLP { + let storage_root = client.storage_root(&account, at).unwrap_or(KECCAK_NULL_RLP); + if storage_root != KECCAK_NULL_RLP { out.write_fmt(format_args!(", \"storage_root\": \"0x{}\"", storage_root.hex())).expect("Write error"); if cmd.storage { out.write_fmt(format_args!(", \"storage\": {{")).expect("Write error"); diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index b5b949cc7..e181bf88d 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -17,387 +17,949 @@ #[macro_use] mod usage; mod presets; -use dir; usage! { { - // Commands - cmd_daemon: bool, - cmd_wallet: bool, - cmd_account: bool, - cmd_new: bool, - cmd_list: bool, - cmd_export: bool, - cmd_blocks: bool, - cmd_state: bool, - cmd_import: bool, - cmd_signer: bool, - cmd_new_token: bool, - cmd_sign: bool, - cmd_reject: bool, - cmd_snapshot: bool, - cmd_restore: bool, - cmd_ui: bool, - cmd_dapp: bool, - cmd_tools: bool, - cmd_hash: bool, - cmd_kill: bool, - cmd_db: bool, + // CLI subcommands + // Subcommands must start with cmd_ and have '_' in place of '-' + // Sub-subcommands must start with the name of the subcommand + // Arguments must start with arg_ - // Arguments - arg_pid_file: String, - arg_file: Option, - arg_path: Vec, - arg_id: Option, + CMD cmd_ui { + "Manage ui", + } - // Flags - // -- Legacy Options - flag_geth: bool, - flag_testnet: bool, - flag_import_geth_keys: bool, - flag_datadir: Option, - flag_networkid: Option, - flag_peers: Option, - flag_nodekey: Option, - flag_nodiscover: bool, - flag_jsonrpc: bool, - flag_jsonrpc_off: bool, - flag_webapp: bool, - flag_dapps_off: bool, - flag_rpc: bool, - flag_rpcaddr: Option, - flag_rpcport: Option, - flag_rpcapi: Option, - flag_rpccorsdomain: Option, - flag_ipcdisable: bool, - flag_ipc_off: bool, - flag_ipcapi: Option, - flag_ipcpath: Option, - flag_gasprice: Option, - flag_etherbase: Option, - flag_extradata: Option, - flag_cache: Option, + CMD cmd_dapp + { + "Manage dapps", - // -- Miscellaneous Options - flag_version: bool, - flag_no_config: bool, + ARG arg_dapp_path: (Option) = None, + "", + "Path to the dapps", + } + + CMD cmd_daemon + { + "Use Parity as a daemon", + + ARG arg_daemon_pid_file: (Option) = None, + "", + "Path to the pid file", + } + + CMD cmd_account + { + "Manage accounts", + + CMD cmd_account_new { + "Create a new acount", + + ARG arg_account_new_password: (Option) = None, + "--password=[FILE]", + "Path to the password file", + } + + CMD cmd_account_list { + "List existing accounts", + } + + CMD cmd_account_import + { + "Import account", + + ARG arg_account_import_path : (Option>) = None, + "...", + "Path to the accounts", + } + } + + CMD cmd_wallet + { + "Manage wallet", + + CMD cmd_wallet_import + { + "Import wallet", + + ARG arg_wallet_import_password: (Option) = None, + "--password=[FILE]", + "Path to the password file", + + ARG arg_wallet_import_path: (Option) = None, + "", + "Path to the wallet", + } + } + + CMD cmd_import + { + "Import blockchain", + + ARG arg_import_format: (Option) = None, + "--format=[FORMAT]", + "Import in a given format. FORMAT must be either 'hex' or 'binary'. (default: auto)", + + ARG arg_import_file: (Option) = None, + "[FILE]", + "Path to the file to import from", + } + + CMD cmd_export + { + "Export blockchain", + + CMD cmd_export_blocks + { + "Export blocks", + + ARG arg_export_blocks_format: (Option) = None, + "--format=[FORMAT]", + "Export in a given format. FORMAT must be either 'hex' or 'binary'. (default: binary)", + + ARG arg_export_blocks_from: (String) = "1", + "--from=[BLOCK]", + "Export from block BLOCK, which may be an index or hash.", + + ARG arg_export_blocks_to: (String) = "latest", + "--to=[BLOCK]", + "Export to (including) block BLOCK, which may be an index, hash or latest.", + + ARG arg_export_blocks_file: (Option) = None, + "[FILE]", + "Path to the exported file", + } + + CMD cmd_export_state + { + "Export state", + + FLAG flag_export_state_no_storage: (bool) = false, + "--no-storage", + "Don't export account storage.", + + FLAG flag_export_state_no_code: (bool) = false, + "--no-code", + "Don't export account code.", + + ARG arg_export_state_min_balance: (Option) = None, + "--min-balance=[WEI]", + "Don't export accounts with balance less than specified.", + + ARG arg_export_state_max_balance: (Option) = None, + "--max-balance=[WEI]", + "Don't export accounts with balance greater than specified.", + + ARG arg_export_state_at: (String) = "latest", + "--at=[BLOCK]", + "Take a snapshot at the given block, which may be an index, hash, or latest. Note that taking snapshots at non-recent blocks will only work with --pruning archive", + + ARG arg_export_state_format: (Option) = None, + "--format=[FORMAT]", + "Export in a given format. FORMAT must be either 'hex' or 'binary'. (default: binary)", + + ARG arg_export_state_file: (Option) = None, + "[FILE]", + "Path to the exported file", + } + } + + CMD cmd_signer + { + "Manage signer", + + CMD cmd_signer_new_token { + "Generate new token", + } + + CMD cmd_signer_list { + "List", + } + + CMD cmd_signer_sign + { + "Sign", + + ARG arg_signer_sign_password: (Option) = None, + "--password=[FILE]", + "Path to the password file", + + ARG arg_signer_sign_id: (Option) = None, + "[ID]", + "ID", + } + + CMD cmd_signer_reject + { + "Reject", + + ARG arg_signer_reject_id: (Option) = None, + "", + "ID", + } + } + + CMD cmd_snapshot + { + "Make a snapshot of the database", + + ARG arg_snapshot_at: (String) = "latest", + "--at=[BLOCK]", + "Take a snapshot at the given block, which may be an index, hash, or latest. Note that taking snapshots at non-recent blocks will only work with --pruning archive", + + ARG arg_snapshot_file: (Option) = None, + "", + "Path to the file to export to", + } + + CMD cmd_restore + { + "Restore database from snapshot", + + ARG arg_restore_file: (Option) = None, + "[FILE]", + "Path to the file to restore from", + } + + CMD cmd_tools + { + "Tools", + + CMD cmd_tools_hash + { + "Hash a file", + + ARG arg_tools_hash_file: (Option) = None, + "", + "File", + } + } + + CMD cmd_db + { + "Manage the database representing the state of the blockchain on this system", + + CMD cmd_db_kill { + "Clean the database", + } + } } { - // -- Operating Options - flag_mode: String = "last", or |c: &Config| otry!(c.parity).mode.clone(), - flag_mode_timeout: u64 = 300u64, or |c: &Config| otry!(c.parity).mode_timeout.clone(), - flag_mode_alarm: u64 = 3600u64, or |c: &Config| otry!(c.parity).mode_alarm.clone(), - flag_auto_update: String = "critical", or |c: &Config| otry!(c.parity).auto_update.clone(), - flag_release_track: String = "current", or |c: &Config| otry!(c.parity).release_track.clone(), - flag_public_node: bool = false, or |c: &Config| otry!(c.parity).public_node.clone(), - flag_no_download: bool = false, or |c: &Config| otry!(c.parity).no_download.clone(), - flag_no_consensus: bool = false, or |c: &Config| otry!(c.parity).no_consensus.clone(), - flag_chain: String = "foundation", or |c: &Config| otry!(c.parity).chain.clone(), - flag_keys_path: String = "$BASE/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), - flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(), - flag_light: bool = false, or |c: &Config| otry!(c.parity).light, - flag_no_persistent_txqueue: bool = false, - or |c: &Config| otry!(c.parity).no_persistent_txqueue, + // Flags and arguments + ["Operating Options"] + FLAG flag_public_node: (bool) = false, or |c: &Config| otry!(c.parity).public_node.clone(), + "--public-node", + "Start Parity as a public web server. Account storage and transaction signing will be delegated to the UI.", - // -- Convenience Options - flag_config: String = "$BASE/config.toml", or |_| None, - flag_ports_shift: u16 = 0u16, - or |c: &Config| otry!(c.misc).ports_shift, - flag_unsafe_expose: bool = false, - or |c: &Config| otry!(c.misc).unsafe_expose, + FLAG flag_no_download: (bool) = false, or |c: &Config| otry!(c.parity).no_download.clone(), + "--no-download", + "Normally new releases will be downloaded ready for updating. This disables it. Not recommended.", - // -- Account Options - flag_unlock: Option = None, - or |c: &Config| otry!(c.account).unlock.as_ref().map(|vec| Some(vec.join(","))), - flag_password: Vec = Vec::new(), - or |c: &Config| otry!(c.account).password.clone(), - flag_keys_iterations: u32 = 10240u32, - or |c: &Config| otry!(c.account).keys_iterations.clone(), - flag_no_hardware_wallets: bool = false, - or |c: &Config| otry!(c.account).disable_hardware.clone(), - flag_fast_unlock: bool = false, - or |c: &Config| otry!(c.account).fast_unlock.clone(), + FLAG flag_no_consensus: (bool) = false, or |c: &Config| otry!(c.parity).no_consensus.clone(), + "--no-consensus", + "Force the binary to run even if there are known issues regarding consensus. Not recommended.", + FLAG flag_light: (bool) = false, or |c: &Config| otry!(c.parity).light, + "--light", + "Experimental: run in light client mode. Light clients synchronize a bare minimum of data and fetch necessary data on-demand from the network. Much lower in storage, potentially higher in bandwidth. Has no effect with subcommands.", - flag_force_ui: bool = false, - or |c: &Config| otry!(c.ui).force.clone(), - flag_no_ui: bool = false, - or |c: &Config| otry!(c.ui).disable.clone(), - flag_ui_port: u16 = 8180u16, - or |c: &Config| otry!(c.ui).port.clone(), - flag_ui_interface: String = "local", - or |c: &Config| otry!(c.ui).interface.clone(), - flag_ui_hosts: String = "none", - or |c: &Config| otry!(c.ui).hosts.as_ref().map(|vec| vec.join(",")), - flag_ui_path: String = "$BASE/signer", - or |c: &Config| otry!(c.ui).path.clone(), - // NOTE [todr] For security reasons don't put this to config files - flag_ui_no_validation: bool = false, or |_| None, + FLAG flag_force_direct: (bool) = false, or |_| None, + "--force-direct", + "Run the originally installed version of Parity, ignoring any updates that have since been installed.", - // -- Networking Options - flag_no_warp: bool = false, - or |c: &Config| otry!(c.network).warp.clone().map(|w| !w), - flag_port: u16 = 30303u16, - or |c: &Config| otry!(c.network).port.clone(), - flag_min_peers: u16 = 25u16, - or |c: &Config| otry!(c.network).min_peers.clone(), - flag_max_peers: u16 = 50u16, - or |c: &Config| otry!(c.network).max_peers.clone(), - flag_max_pending_peers: u16 = 64u16, - or |c: &Config| otry!(c.network).max_pending_peers.clone(), - flag_snapshot_peers: u16 = 0u16, - or |c: &Config| otry!(c.network).snapshot_peers.clone(), - flag_nat: String = "any", - or |c: &Config| otry!(c.network).nat.clone(), - flag_allow_ips: String = "all", - or |c: &Config| otry!(c.network).allow_ips.clone(), - flag_network_id: Option = None, - or |c: &Config| otry!(c.network).id.clone().map(Some), - flag_bootnodes: Option = None, - or |c: &Config| otry!(c.network).bootnodes.as_ref().map(|vec| Some(vec.join(","))), - flag_no_discovery: bool = false, - or |c: &Config| otry!(c.network).discovery.map(|d| !d).clone(), - flag_node_key: Option = None, - or |c: &Config| otry!(c.network).node_key.clone().map(Some), - flag_reserved_peers: Option = None, - or |c: &Config| otry!(c.network).reserved_peers.clone().map(Some), - flag_reserved_only: bool = false, - or |c: &Config| otry!(c.network).reserved_only.clone(), - flag_no_ancient_blocks: bool = false, or |_| None, - flag_no_serve_light: bool = false, - or |c: &Config| otry!(c.network).no_serve_light.clone(), + ARG arg_mode: (String) = "last", or |c: &Config| otry!(c.parity).mode.clone(), + "--mode=[MODE]", + "Set the operating mode. MODE can be one of: + last - Uses the last-used mode, active if none. + active - Parity continuously syncs the chain. + passive - Parity syncs initially, then sleeps and wakes regularly to resync. + dark - Parity syncs only when the RPC is active. + offline - Parity doesn't sync.", - // -- API and Console Options - // RPC - flag_no_jsonrpc: bool = false, - or |c: &Config| otry!(c.rpc).disable.clone(), - flag_jsonrpc_port: u16 = 8545u16, - or |c: &Config| otry!(c.rpc).port.clone(), - flag_jsonrpc_interface: String = "local", - or |c: &Config| otry!(c.rpc).interface.clone(), - flag_jsonrpc_cors: Option = None, - or |c: &Config| otry!(c.rpc).cors.clone().map(Some), - flag_jsonrpc_apis: String = "web3,eth,pubsub,net,parity,parity_pubsub,traces,rpc,secretstore,shh,shh_pubsub", - or |c: &Config| otry!(c.rpc).apis.as_ref().map(|vec| vec.join(",")), - flag_jsonrpc_hosts: String = "none", - or |c: &Config| otry!(c.rpc).hosts.as_ref().map(|vec| vec.join(",")), - flag_jsonrpc_server_threads: Option = None, - or |c: &Config| otry!(c.rpc).server_threads.map(Some), - flag_jsonrpc_threads: usize = 0usize, - or |c: &Config| otry!(c.rpc).processing_threads, + ARG arg_mode_timeout: (u64) = 300u64, or |c: &Config| otry!(c.parity).mode_timeout.clone(), + "--mode-timeout=[SECS]", + "Specify the number of seconds before inactivity timeout occurs when mode is dark or passive", - // WS - flag_no_ws: bool = false, - or |c: &Config| otry!(c.websockets).disable.clone(), - flag_ws_port: u16 = 8546u16, - or |c: &Config| otry!(c.websockets).port.clone(), - flag_ws_interface: String = "local", - or |c: &Config| otry!(c.websockets).interface.clone(), - flag_ws_apis: String = "web3,eth,pubsub,net,parity,parity_pubsub,traces,rpc,secretstore,shh,shh_pubsub", - or |c: &Config| otry!(c.websockets).apis.as_ref().map(|vec| vec.join(",")), - flag_ws_origins: String = "chrome-extension://*,moz-extension://*", - or |c: &Config| otry!(c.websockets).origins.as_ref().map(|vec| vec.join(",")), - flag_ws_hosts: String = "none", - or |c: &Config| otry!(c.websockets).hosts.as_ref().map(|vec| vec.join(",")), + ARG arg_mode_alarm: (u64) = 3600u64, or |c: &Config| otry!(c.parity).mode_alarm.clone(), + "--mode-alarm=[SECS]", + "Specify the number of seconds before auto sleep reawake timeout occurs when mode is passive", - // IPC - flag_no_ipc: bool = false, - or |c: &Config| otry!(c.ipc).disable.clone(), - flag_ipc_path: String = if cfg!(windows) { r"\\.\pipe\jsonrpc.ipc" } else { "$BASE/jsonrpc.ipc" }, - or |c: &Config| otry!(c.ipc).path.clone(), - flag_ipc_apis: String = "web3,eth,pubsub,net,parity,parity_pubsub,parity_accounts,traces,rpc,secretstore,shh,shh_pubsub", - or |c: &Config| otry!(c.ipc).apis.as_ref().map(|vec| vec.join(",")), + ARG arg_auto_update: (String) = "critical", or |c: &Config| otry!(c.parity).auto_update.clone(), + "--auto-update=[SET]", + "Set a releases set to automatically update and install. + all - All updates in the our release track. + critical - Only consensus/security updates. + none - No updates will be auto-installed.", - // DAPPS - flag_no_dapps: bool = false, - or |c: &Config| otry!(c.dapps).disable.clone(), - flag_dapps_path: String = "$BASE/dapps", - or |c: &Config| otry!(c.dapps).path.clone(), + ARG arg_release_track: (String) = "current", or |c: &Config| otry!(c.parity).release_track.clone(), + "--release-track=[TRACK]", + "Set which release track we should use for updates. + stable - Stable releases. + beta - Beta releases. + nightly - Nightly releases (unstable). + testing - Testing releases (do not use). + current - Whatever track this executable was released on", - // Secret Store - flag_no_secretstore: bool = false, - or |c: &Config| otry!(c.secretstore).disable.clone(), - flag_no_secretstore_http: bool = false, - or |c: &Config| otry!(c.secretstore).disable_http.clone(), - flag_no_secretstore_acl_check: bool = false, - or |c: &Config| otry!(c.secretstore).disable_acl_check.clone(), - flag_secretstore_secret: Option = None, - or |c: &Config| otry!(c.secretstore).self_secret.clone().map(Some), - flag_secretstore_nodes: String = "", - or |c: &Config| otry!(c.secretstore).nodes.as_ref().map(|vec| vec.join(",")), - flag_secretstore_interface: String = "local", - or |c: &Config| otry!(c.secretstore).interface.clone(), - flag_secretstore_port: u16 = 8083u16, - or |c: &Config| otry!(c.secretstore).port.clone(), - flag_secretstore_http_interface: String = "local", - or |c: &Config| otry!(c.secretstore).http_interface.clone(), - flag_secretstore_http_port: u16 = 8082u16, - or |c: &Config| otry!(c.secretstore).http_port.clone(), - flag_secretstore_path: String = "$BASE/secretstore", - or |c: &Config| otry!(c.secretstore).path.clone(), + ARG arg_chain: (String) = "foundation", or |c: &Config| otry!(c.parity).chain.clone(), + "--chain=[CHAIN]", + "Specify the blockchain type. CHAIN may be either a JSON chain specification file or olympic, frontier, homestead, mainnet, morden, ropsten, classic, expanse, testnet, kovan or dev.", - // IPFS - flag_ipfs_api: bool = false, - or |c: &Config| otry!(c.ipfs).enable.clone(), - flag_ipfs_api_port: u16 = 5001u16, - or |c: &Config| otry!(c.ipfs).port.clone(), - flag_ipfs_api_interface: String = "local", - or |c: &Config| otry!(c.ipfs).interface.clone(), - flag_ipfs_api_cors: Option = None, - or |c: &Config| otry!(c.ipfs).cors.clone().map(Some), - flag_ipfs_api_hosts: String = "none", - or |c: &Config| otry!(c.ipfs).hosts.as_ref().map(|vec| vec.join(",")), + ARG arg_keys_path: (String) = "$BASE/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), + "--keys-path=[PATH]", + "Specify the path for JSON key files to be found", - // -- Sealing/Mining Options - flag_author: Option = None, - or |c: &Config| otry!(c.mining).author.clone().map(Some), - flag_engine_signer: Option = None, - or |c: &Config| otry!(c.mining).engine_signer.clone().map(Some), - flag_force_sealing: bool = false, - or |c: &Config| otry!(c.mining).force_sealing.clone(), - flag_reseal_on_txs: String = "own", - or |c: &Config| otry!(c.mining).reseal_on_txs.clone(), - flag_reseal_on_uncle: bool = false, - or |c: &Config| otry!(c.mining).reseal_on_uncle.clone(), - flag_reseal_min_period: u64 = 2000u64, - or |c: &Config| otry!(c.mining).reseal_min_period.clone(), - flag_reseal_max_period: u64 = 120000u64, - or |c: &Config| otry!(c.mining).reseal_max_period.clone(), - flag_work_queue_size: usize = 20usize, - or |c: &Config| otry!(c.mining).work_queue_size.clone(), - flag_tx_gas_limit: Option = None, - or |c: &Config| otry!(c.mining).tx_gas_limit.clone().map(Some), - flag_tx_time_limit: Option = None, - or |c: &Config| otry!(c.mining).tx_time_limit.clone().map(Some), - flag_relay_set: String = "cheap", - or |c: &Config| otry!(c.mining).relay_set.clone(), - flag_min_gas_price: Option = None, - or |c: &Config| otry!(c.mining).min_gas_price.clone().map(Some), - flag_usd_per_tx: String = "0.0025", - or |c: &Config| otry!(c.mining).usd_per_tx.clone(), - flag_usd_per_eth: String = "auto", - or |c: &Config| otry!(c.mining).usd_per_eth.clone(), - flag_price_update_period: String = "hourly", - or |c: &Config| otry!(c.mining).price_update_period.clone(), - flag_gas_floor_target: String = "4700000", - or |c: &Config| otry!(c.mining).gas_floor_target.clone(), - flag_gas_cap: String = "6283184", - or |c: &Config| otry!(c.mining).gas_cap.clone(), - flag_extra_data: Option = None, - or |c: &Config| otry!(c.mining).extra_data.clone().map(Some), - flag_tx_queue_size: usize = 8192usize, - or |c: &Config| otry!(c.mining).tx_queue_size.clone(), - flag_tx_queue_mem_limit: u32 = 2u32, - or |c: &Config| otry!(c.mining).tx_queue_mem_limit.clone(), - flag_tx_queue_gas: String = "off", - or |c: &Config| otry!(c.mining).tx_queue_gas.clone(), - flag_tx_queue_strategy: String = "gas_price", - or |c: &Config| otry!(c.mining).tx_queue_strategy.clone(), - flag_tx_queue_ban_count: u16 = 1u16, - or |c: &Config| otry!(c.mining).tx_queue_ban_count.clone(), - flag_tx_queue_ban_time: u16 = 180u16, - or |c: &Config| otry!(c.mining).tx_queue_ban_time.clone(), - flag_remove_solved: bool = false, - or |c: &Config| otry!(c.mining).remove_solved.clone(), - flag_notify_work: Option = None, - or |c: &Config| otry!(c.mining).notify_work.as_ref().map(|vec| Some(vec.join(","))), - flag_refuse_service_transactions: bool = false, - or |c: &Config| otry!(c.mining).refuse_service_transactions.clone(), + ARG arg_identity: (String) = "", or |c: &Config| otry!(c.parity).identity.clone(), + "--identity=[NAME]", + "Specify your node's name.", - flag_stratum: bool = false, - or |c: &Config| Some(c.stratum.is_some()), - flag_stratum_interface: String = "local", - or |c: &Config| otry!(c.stratum).interface.clone(), - flag_stratum_port: u16 = 8008u16, - or |c: &Config| otry!(c.stratum).port.clone(), - flag_stratum_secret: Option = None, - or |c: &Config| otry!(c.stratum).secret.clone().map(Some), + ARG arg_base_path: (Option) = None, or |c: &Config| otry!(c.parity).base_path.clone(), + "-d, --base-path=[PATH]", + "Specify the base data storage path.", - // -- Footprint Options - flag_tracing: String = "auto", - or |c: &Config| otry!(c.footprint).tracing.clone(), - flag_pruning: String = "auto", - or |c: &Config| otry!(c.footprint).pruning.clone(), - flag_pruning_history: u64 = 64u64, - or |c: &Config| otry!(c.footprint).pruning_history.clone(), - flag_pruning_memory: usize = 32usize, - or |c: &Config| otry!(c.footprint).pruning_memory.clone(), - flag_cache_size_db: u32 = 32u32, - or |c: &Config| otry!(c.footprint).cache_size_db.clone(), - flag_cache_size_blocks: u32 = 8u32, - or |c: &Config| otry!(c.footprint).cache_size_blocks.clone(), - flag_cache_size_queue: u32 = 40u32, - or |c: &Config| otry!(c.footprint).cache_size_queue.clone(), - flag_cache_size_state: u32 = 25u32, - or |c: &Config| otry!(c.footprint).cache_size_state.clone(), - flag_cache_size: Option = None, - or |c: &Config| otry!(c.footprint).cache_size.clone().map(Some), - flag_fast_and_loose: bool = false, - or |c: &Config| otry!(c.footprint).fast_and_loose.clone(), - flag_db_compaction: String = "auto", - or |c: &Config| otry!(c.footprint).db_compaction.clone(), - flag_fat_db: String = "auto", - or |c: &Config| otry!(c.footprint).fat_db.clone(), - flag_scale_verifiers: bool = false, - or |c: &Config| otry!(c.footprint).scale_verifiers.clone(), - flag_num_verifiers: Option = None, - or |c: &Config| otry!(c.footprint).num_verifiers.clone().map(Some), + ARG arg_db_path: (Option) = None, or |c: &Config| otry!(c.parity).db_path.clone(), + "--db-path=[PATH]", + "Specify the database directory path", - // -- Import/Export Options - flag_from: String = "1", or |_| None, - flag_to: String = "latest", or |_| None, - flag_format: Option = None, or |_| None, - flag_no_seal_check: bool = false, or |_| None, - flag_no_storage: bool = false, or |_| None, - flag_no_code: bool = false, or |_| None, - flag_min_balance: Option = None, or |_| None, - flag_max_balance: Option = None, or |_| None, + ["Convenience options"] + FLAG flag_unsafe_expose: (bool) = false, or |c: &Config| otry!(c.misc).unsafe_expose, + "--unsafe-expose", + "All servers will listen on external interfaces and will be remotely accessible. It's equivalent with setting the following: --{{ws,jsonrpc,ui,ipfs,secret_store,stratum}}-interface=all --*-hosts=all + This option is UNSAFE and should be used with great care!", - // -- Snapshot Optons - flag_at: String = "latest", or |_| None, - flag_no_periodic_snapshot: bool = false, - or |c: &Config| otry!(c.snapshots).disable_periodic.clone(), + ARG arg_config: (String) = "$BASE/config.toml", or |_| None, + "-c, --config=[CONFIG]", + "Specify a configuration. CONFIG may be either a configuration file or a preset: dev, insecure, dev-insecure, mining, or non-standard-ports.", - // -- Virtual Machine Options - flag_jitvm: bool = false, - or |c: &Config| otry!(c.vm).jit.clone(), + ARG arg_ports_shift: (u16) = 0u16, or |c: &Config| otry!(c.misc).ports_shift, + "--ports-shift=[SHIFT]", + "Add SHIFT to all port numbers Parity is listening on. Includes network port and all servers (RPC, WebSockets, UI, IPFS, SecretStore).", - // -- Miscellaneous Options - flag_ntp_servers: String = "0.parity.pool.ntp.org:123,1.parity.pool.ntp.org:123,2.parity.pool.ntp.org:123,3.parity.pool.ntp.org:123", - or |c: &Config| otry!(c.misc).ntp_servers.clone().map(|vec| vec.join(",")), - flag_logging: Option = None, - or |c: &Config| otry!(c.misc).logging.clone().map(Some), - flag_log_file: Option = None, - or |c: &Config| otry!(c.misc).log_file.clone().map(Some), - flag_no_color: bool = false, - or |c: &Config| otry!(c.misc).color.map(|c| !c).clone(), + ["Account options"] + FLAG flag_no_hardware_wallets: (bool) = false, or |c: &Config| otry!(c.account).disable_hardware.clone(), + "--no-hardware-wallets", + "Disables hardware wallet support.", - // -- Whisper options - flag_whisper: bool = false, - or |c: &Config| otry!(c.whisper).enabled, - flag_whisper_pool_size: usize = 10usize, - or |c: &Config| otry!(c.whisper).pool_size.clone(), + FLAG flag_fast_unlock: (bool) = false, or |c: &Config| otry!(c.account).fast_unlock.clone(), + "--fast-unlock", + "Use drasticly faster unlocking mode. This setting causes raw secrets to be stored unprotected in memory, so use with care.", + + ARG arg_keys_iterations: (u32) = 10240u32, or |c: &Config| otry!(c.account).keys_iterations.clone(), + "--keys-iterations=[NUM]", + "Specify the number of iterations to use when deriving key from the password (bigger is more secure)", + + ARG arg_unlock: (Option) = None, or |c: &Config| otry!(c.account).unlock.as_ref().map(|vec| vec.join(",")), + "--unlock=[ACCOUNTS]", + "Unlock ACCOUNTS for the duration of the execution. ACCOUNTS is a comma-delimited list of addresses. Implies --no-ui.", + + ARG arg_password: (Vec) = Vec::new(), or |c: &Config| otry!(c.account).password.clone(), + "--password=[FILE]...", + "Provide a file containing a password for unlocking an account. Leading and trailing whitespace is trimmed.", + + ["UI options"] + FLAG flag_force_ui: (bool) = false, or |c: &Config| otry!(c.ui).force.clone(), + "--force-ui", + "Enable Trusted UI WebSocket endpoint, even when --unlock is in use.", + + FLAG flag_no_ui: (bool) = false, or |c: &Config| otry!(c.ui).disable.clone(), + "--no-ui", + "Disable Trusted UI WebSocket endpoint.", + + // NOTE [todr] For security reasons don't put this to config files + FLAG flag_ui_no_validation: (bool) = false, or |_| None, + "--ui-no-validation", + "Disable Origin and Host headers validation for Trusted UI. WARNING: INSECURE. Used only for development.", + + ARG arg_ui_interface: (String) = "local", or |c: &Config| otry!(c.ui).interface.clone(), + "--ui-interface=[IP]", + "Specify the hostname portion of the Trusted UI server, IP should be an interface's IP address, or local.", + + ARG arg_ui_hosts: (String) = "none", or |c: &Config| otry!(c.ui).hosts.as_ref().map(|vec| vec.join(",")), + "--ui-hosts=[HOSTS]", + "List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\",.", + + ARG arg_ui_path: (String) = "$BASE/signer", or |c: &Config| otry!(c.ui).path.clone(), + "--ui-path=[PATH]", + "Specify directory where Trusted UIs tokens should be stored.", + + ARG arg_ui_port: (u16) = 8180u16, or |c: &Config| otry!(c.ui).port.clone(), + "--ui-port=[PORT]", + "Specify the port of Trusted UI server.", + + ["Networking options"] + FLAG flag_no_warp: (bool) = false, or |c: &Config| otry!(c.network).warp.clone().map(|w| !w), + "--no-warp", + "Disable syncing from the snapshot over the network.", + + FLAG flag_no_discovery: (bool) = false, or |c: &Config| otry!(c.network).discovery.map(|d| !d).clone(), + "--no-discovery", + "Disable new peer discovery.", + + FLAG flag_reserved_only: (bool) = false, or |c: &Config| otry!(c.network).reserved_only.clone(), + "--reserved-only", + "Connect only to reserved nodes.", + + FLAG flag_no_ancient_blocks: (bool) = false, or |_| None, + "--no-ancient-blocks", + "Disable downloading old blocks after snapshot restoration or warp sync.", + + FLAG flag_no_serve_light: (bool) = false, or |c: &Config| otry!(c.network).no_serve_light.clone(), + "--no-serve-light", + "Disable serving of light peers.", + + ARG arg_port: (u16) = 30303u16, or |c: &Config| otry!(c.network).port.clone(), + "--port=[PORT]", + "Override the port on which the node should listen.", + + ARG arg_min_peers: (u16) = 25u16, or |c: &Config| otry!(c.network).min_peers.clone(), + "--min-peers=[NUM]", + "Try to maintain at least NUM peers.", + + ARG arg_max_peers: (u16) = 50u16, or |c: &Config| otry!(c.network).max_peers.clone(), + "--max-peers=[NUM]", + "Allow up to NUM peers.", + + ARG arg_snapshot_peers: (u16) = 0u16, or |c: &Config| otry!(c.network).snapshot_peers.clone(), + "--snapshot-peers=[NUM]", + "Allow additional NUM peers for a snapshot sync.", + + ARG arg_nat: (String) = "any", or |c: &Config| otry!(c.network).nat.clone(), + "--nat=[METHOD]", + "Specify method to use for determining public address. Must be one of: any, none, upnp, extip:.", + + ARG arg_allow_ips: (String) = "all", or |c: &Config| otry!(c.network).allow_ips.clone(), + "--allow-ips=[FILTER]", + "Filter outbound connections. Must be one of: private - connect to private network IP addresses only; public - connect to public network IP addresses only; all - connect to any IP address.", + + ARG arg_max_pending_peers: (u16) = 64u16, or |c: &Config| otry!(c.network).max_pending_peers.clone(), + "--max-pending-peers=[NUM]", + "Allow up to NUM pending connections.", + + ARG arg_network_id: (Option) = None, or |c: &Config| otry!(c.network).id.clone(), + "--network-id=[INDEX]", + "Override the network identifier from the chain we are on.", + + ARG arg_bootnodes: (Option) = None, or |c: &Config| otry!(c.network).bootnodes.as_ref().map(|vec| vec.join(",")), + "--bootnodes=[NODES]", + "Override the bootnodes from our chain. NODES should be comma-delimited enodes.", + + ARG arg_node_key: (Option) = None, or |c: &Config| otry!(c.network).node_key.clone(), + "--node-key=[KEY]", + "Specify node secret key, either as 64-character hex string or input to SHA3 operation.", + + ARG arg_reserved_peers: (Option) = None, or |c: &Config| otry!(c.network).reserved_peers.clone(), + "--reserved-peers=[FILE]", + "Provide a file containing enodes, one per line. These nodes will always have a reserved slot on top of the normal maximum peers.", + + ["API and console options – RPC"] + FLAG flag_no_jsonrpc: (bool) = false, or |c: &Config| otry!(c.rpc).disable.clone(), + "--no-jsonrpc", + "Disable the JSON-RPC API server.", + + ARG arg_jsonrpc_port: (u16) = 8545u16, or |c: &Config| otry!(c.rpc).port.clone(), + "--jsonrpc-port=[PORT]", + "Specify the port portion of the JSONRPC API server.", + + ARG arg_jsonrpc_interface: (String) = "local", or |c: &Config| otry!(c.rpc).interface.clone(), + "--jsonrpc-interface=[IP]", + "Specify the hostname portion of the JSONRPC API server, IP should be an interface's IP address, or all (all interfaces) or local.", + + ARG arg_jsonrpc_apis: (String) = "web3,eth,pubsub,net,parity,parity_pubsub,traces,rpc,secretstore,shh,shh_pubsub", or |c: &Config| otry!(c.rpc).apis.as_ref().map(|vec| vec.join(",")), + "--jsonrpc-apis=[APIS]", + "Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited list of API name. Possible name are all, safe, web3, eth, net, personal, parity, parity_set, traces, rpc, parity_accounts. You can also disable a specific API by putting '-' in the front: all,-personal.", + + ARG arg_jsonrpc_hosts: (String) = "none", or |c: &Config| otry!(c.rpc).hosts.as_ref().map(|vec| vec.join(",")), + "--jsonrpc-hosts=[HOSTS]", + "List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\",.", + + ARG arg_jsonrpc_threads: (usize) = 0usize, or |c: &Config| otry!(c.rpc).processing_threads, + "--jsonrpc-threads=[THREADS]", + "Turn on additional processing threads in all RPC servers. Setting this to non-zero value allows parallel cpu-heavy queries execution.", + + ARG arg_jsonrpc_cors: (Option) = None, or |c: &Config| otry!(c.rpc).cors.clone(), + "--jsonrpc-cors=[URL]", + "Specify CORS header for JSON-RPC API responses.", + + ARG arg_jsonrpc_server_threads: (Option) = None, or |c: &Config| otry!(c.rpc).server_threads, + "--jsonrpc-server-threads=[NUM]", + "Enables experimental faster implementation of JSON-RPC server. Requires Dapps server to be disabled using --no-dapps.", + + ["API and console options – WebSockets"] + FLAG flag_no_ws: (bool) = false, or |c: &Config| otry!(c.websockets).disable.clone(), + "--no-ws", + "Disable the WebSockets server.", + + ARG arg_ws_port: (u16) = 8546u16, or |c: &Config| otry!(c.websockets).port.clone(), + "--ws-port=[PORT]", + "Specify the port portion of the WebSockets server.", + + ARG arg_ws_interface: (String) = "local", or |c: &Config| otry!(c.websockets).interface.clone(), + "--ws-interface=[IP]", + "Specify the hostname portion of the WebSockets server, IP should be an interface's IP address, or all (all interfaces) or local.", + + ARG arg_ws_apis: (String) = "web3,eth,pubsub,net,parity,parity_pubsub,traces,rpc,secretstore,shh,shh_pubsub", or |c: &Config| otry!(c.websockets).apis.as_ref().map(|vec| vec.join(",")), + "--ws-apis=[APIS]", + "Specify the APIs available through the WebSockets interface. APIS is a comma-delimited list of API name. Possible name are web3, eth, pubsub, net, personal, parity, parity_set, traces, rpc, parity_accounts..", + + ARG arg_ws_origins: (String) = "chrome-extension://*,moz-extension://*", or |c: &Config| otry!(c.websockets).origins.as_ref().map(|vec| vec.join(",")), + "--ws-origins=[URL]", + "Specify Origin header values allowed to connect. Special options: \"all\", \"none\".", + + ARG arg_ws_hosts: (String) = "none", or |c: &Config| otry!(c.websockets).hosts.as_ref().map(|vec| vec.join(",")), + "--ws-hosts=[HOSTS]", + "List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\",.", + + ["API and console options – IPC"] + FLAG flag_no_ipc: (bool) = false, or |c: &Config| otry!(c.ipc).disable.clone(), + "--no-ipc", + "Disable JSON-RPC over IPC service.", + + ARG arg_ipc_path: (String) = if cfg!(windows) { r"\\.\pipe\jsonrpc.ipc" } else { "$BASE/jsonrpc.ipc" }, or |c: &Config| otry!(c.ipc).path.clone(), + "--ipc-path=[PATH]", + "Specify custom path for JSON-RPC over IPC service.", + + ARG arg_ipc_apis: (String) = "web3,eth,pubsub,net,parity,parity_pubsub,parity_accounts,traces,rpc,secretstore,shh,shh_pubsub", or |c: &Config| otry!(c.ipc).apis.as_ref().map(|vec| vec.join(",")), + "--ipc-apis=[APIS]", + "Specify custom API set available via JSON-RPC over IPC.", + + ["API and console options – Dapps"] + FLAG flag_no_dapps: (bool) = false, or |c: &Config| otry!(c.dapps).disable.clone(), + "--no-dapps", + "Disable the Dapps server (e.g. status page).", + + ARG arg_dapps_path: (String) = "$BASE/dapps", or |c: &Config| otry!(c.dapps).path.clone(), + "--dapps-path=[PATH]", + "Specify directory where dapps should be installed.", + + ["API and console options – IPFS"] + FLAG flag_ipfs_api: (bool) = false, or |c: &Config| otry!(c.ipfs).enable.clone(), + "--ipfs-api", + "Enable IPFS-compatible HTTP API.", + + ARG arg_ipfs_api_port: (u16) = 5001u16, or |c: &Config| otry!(c.ipfs).port.clone(), + "--ipfs-api-port=[PORT]", + "Configure on which port the IPFS HTTP API should listen.", + + ARG arg_ipfs_api_interface: (String) = "local", or |c: &Config| otry!(c.ipfs).interface.clone(), + "--ipfs-api-interface=[IP]", + "Specify the hostname portion of the IPFS API server, IP should be an interface's IP address or local.", + + ARG arg_ipfs_api_hosts: (String) = "none", or |c: &Config| otry!(c.ipfs).hosts.as_ref().map(|vec| vec.join(",")), + "--ipfs-api-hosts=[HOSTS]", + "List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: \"all\", \"none\".", + + ARG arg_ipfs_api_cors: (Option) = None, or |c: &Config| otry!(c.ipfs).cors.clone(), + "--ipfs-api-cors=[URL]", + "Specify CORS header for IPFS API responses.", + + ["Secret store options"] + FLAG flag_no_secretstore: (bool) = false, or |c: &Config| otry!(c.secretstore).disable.clone(), + "--no-secretstore", + "Disable Secret Store functionality.", + + FLAG flag_no_secretstore_http: (bool) = false, or |c: &Config| otry!(c.secretstore).disable_http.clone(), + "--no-secretstore-http", + "Disable Secret Store HTTP API.", + + FLAG flag_no_secretstore_acl_check: (bool) = false, or |c: &Config| otry!(c.secretstore).disable_acl_check.clone(), + "--no-acl-check", + "Disable ACL check (useful for test environments).", + + ARG arg_secretstore_nodes: (String) = "", or |c: &Config| otry!(c.secretstore).nodes.as_ref().map(|vec| vec.join(",")), + "--secretstore-nodes=[NODES]", + "Comma-separated list of other secret store cluster nodes in form NODE_PUBLIC_KEY_IN_HEX@NODE_IP_ADDR:NODE_PORT.", + + ARG arg_secretstore_interface: (String) = "local", or |c: &Config| otry!(c.secretstore).interface.clone(), + "--secretstore-interface=[IP]", + "Specify the hostname portion for listening to Secret Store Key Server internal requests, IP should be an interface's IP address, or local.", + + ARG arg_secretstore_port: (u16) = 8083u16, or |c: &Config| otry!(c.secretstore).port.clone(), + "--secretstore-port=[PORT]", + "Specify the port portion for listening to Secret Store Key Server internal requests.", + + ARG arg_secretstore_http_interface: (String) = "local", or |c: &Config| otry!(c.secretstore).http_interface.clone(), + "--secretstore-http-interface=[IP]", + "Specify the hostname portion for listening to Secret Store Key Server HTTP requests, IP should be an interface's IP address, or local.", + + ARG arg_secretstore_http_port: (u16) = 8082u16, or |c: &Config| otry!(c.secretstore).http_port.clone(), + "--secretstore-http-port=[PORT]", + "Specify the port portion for listening to Secret Store Key Server HTTP requests.", + + ARG arg_secretstore_path: (String) = "$BASE/secretstore", or |c: &Config| otry!(c.secretstore).path.clone(), + "--secretstore-path=[PATH]", + "Specify directory where Secret Store should save its data..", + + ARG arg_secretstore_secret: (Option) = None, or |c: &Config| otry!(c.secretstore).self_secret.clone(), + "--secretstore-secret=[SECRET]", + "Hex-encoded secret key of this node.", + + ["Sealing/Mining options"] + FLAG flag_force_sealing: (bool) = false, or |c: &Config| otry!(c.mining).force_sealing.clone(), + "--force-sealing", + "Force the node to author new blocks as if it were always sealing/mining.", + + FLAG flag_reseal_on_uncle: (bool) = false, or |c: &Config| otry!(c.mining).reseal_on_uncle.clone(), + "--reseal-on-uncle", + "Force the node to author new blocks when a new uncle block is imported.", + + FLAG flag_remove_solved: (bool) = false, or |c: &Config| otry!(c.mining).remove_solved.clone(), + "--remove-solved", + "Move solved blocks from the work package queue instead of cloning them. This gives a slightly faster import speed, but means that extra solutions submitted for the same work package will go unused.", + + FLAG flag_refuse_service_transactions: (bool) = false, or |c: &Config| otry!(c.mining).refuse_service_transactions.clone(), + "--refuse-service-transactions", + "Always refuse service transactions..", + + FLAG flag_no_persistent_txqueue: (bool) = false, or |c: &Config| otry!(c.parity).no_persistent_txqueue, + "--no-persistent-txqueue", + "Don't save pending local transactions to disk to be restored whenever the node restarts.", + + FLAG flag_stratum: (bool) = false, or |c: &Config| Some(c.stratum.is_some()), + "--stratum", + "Run Stratum server for miner push notification.", + + ARG arg_reseal_on_txs: (String) = "own", or |c: &Config| otry!(c.mining).reseal_on_txs.clone(), + "--reseal-on-txs=[SET]", + "Specify which transactions should force the node to reseal a block. SET is one of: none - never reseal on new transactions; own - reseal only on a new local transaction; ext - reseal only on a new external transaction; all - reseal on all new transactions.", + + ARG arg_reseal_min_period: (u64) = 2000u64, or |c: &Config| otry!(c.mining).reseal_min_period.clone(), + "--reseal-min-period=[MS]", + "Specify the minimum time between reseals from incoming transactions. MS is time measured in milliseconds.", + + ARG arg_reseal_max_period: (u64) = 120000u64, or |c: &Config| otry!(c.mining).reseal_max_period.clone(), + "--reseal-max-period=[MS]", + "Specify the maximum time since last block to enable force-sealing. MS is time measured in milliseconds.", + + ARG arg_work_queue_size: (usize) = 20usize, or |c: &Config| otry!(c.mining).work_queue_size.clone(), + "--work-queue-size=[ITEMS]", + "Specify the number of historical work packages which are kept cached lest a solution is found for them later. High values take more memory but result in fewer unusable solutions.", + + ARG arg_relay_set: (String) = "cheap", or |c: &Config| otry!(c.mining).relay_set.clone(), + "--relay-set=[SET]", + "Set of transactions to relay. SET may be: cheap - Relay any transaction in the queue (this may include invalid transactions); strict - Relay only executed transactions (this guarantees we don't relay invalid transactions, but means we relay nothing if not mining); lenient - Same as strict when mining, and cheap when not.", + + ARG arg_usd_per_tx: (String) = "0.0025", or |c: &Config| otry!(c.mining).usd_per_tx.clone(), + "--usd-per-tx=[USD]", + "Amount of USD to be paid for a basic transaction. The minimum gas price is set accordingly.", + + ARG arg_usd_per_eth: (String) = "auto", or |c: &Config| otry!(c.mining).usd_per_eth.clone(), + "--usd-per-eth=[SOURCE]", + "USD value of a single ETH. SOURCE may be either an amount in USD, a web service or 'auto' to use each web service in turn and fallback on the last known good value.", + + ARG arg_price_update_period: (String) = "hourly", or |c: &Config| otry!(c.mining).price_update_period.clone(), + "--price-update-period=[T]", + "T will be allowed to pass between each gas price update. T may be daily, hourly, a number of seconds, or a time string of the form \"2 days\", \"30 minutes\" etc..", + + ARG arg_gas_floor_target: (String) = "4700000", or |c: &Config| otry!(c.mining).gas_floor_target.clone(), + "--gas-floor-target=[GAS]", + "Amount of gas per block to target when sealing a new block.", + + ARG arg_gas_cap: (String) = "6283184", or |c: &Config| otry!(c.mining).gas_cap.clone(), + "--gas-cap=[GAS]", + "A cap on how large we will raise the gas limit per block due to transaction volume.", + + ARG arg_tx_queue_mem_limit: (u32) = 2u32, or |c: &Config| otry!(c.mining).tx_queue_mem_limit.clone(), + "--tx-queue-mem-limit=[MB]", + "Maximum amount of memory that can be used by the transaction queue. Setting this parameter to 0 disables limiting.", + + ARG arg_tx_queue_size: (usize) = 8192usize, or |c: &Config| otry!(c.mining).tx_queue_size.clone(), + "--tx-queue-size=[LIMIT]", + "Maximum amount of transactions in the queue (waiting to be included in next block).", + + ARG arg_tx_queue_gas: (String) = "off", or |c: &Config| otry!(c.mining).tx_queue_gas.clone(), + "--tx-queue-gas=[LIMIT]", + "Maximum amount of total gas for external transactions in the queue. LIMIT can be either an amount of gas or 'auto' or 'off'. 'auto' sets the limit to be 20x the current block gas limit..", + + ARG arg_tx_queue_strategy: (String) = "gas_price", or |c: &Config| otry!(c.mining).tx_queue_strategy.clone(), + "--tx-queue-strategy=[S]", + "Prioritization strategy used to order transactions in the queue. S may be: gas - Prioritize txs with low gas limit; gas_price - Prioritize txs with high gas price; gas_factor - Prioritize txs using gas price and gas limit ratio.", + + ARG arg_tx_queue_ban_count: (u16) = 1u16, or |c: &Config| otry!(c.mining).tx_queue_ban_count.clone(), + "--tx-queue-ban-count=[C]", + "Number of times maximal time for execution (--tx-time-limit) can be exceeded before banning sender/recipient/code.", + + ARG arg_tx_queue_ban_time: (u16) = 180u16, or |c: &Config| otry!(c.mining).tx_queue_ban_time.clone(), + "--tx-queue-ban-time=[SEC]", + "Banning time (in seconds) for offenders of specified execution time limit. Also number of offending actions have to reach the threshold within that time.", + + ARG arg_stratum_interface: (String) = "local", or |c: &Config| otry!(c.stratum).interface.clone(), + "--stratum-interface=[IP]", + "Interface address for Stratum server.", + + ARG arg_stratum_port: (u16) = 8008u16, or |c: &Config| otry!(c.stratum).port.clone(), + "--stratum-port=[PORT]", + "Port for Stratum server to listen on.", + + ARG arg_min_gas_price: (Option) = None, or |c: &Config| otry!(c.mining).min_gas_price.clone(), + "--min-gas-price=[STRING]", + "Minimum amount of Wei per GAS to be paid for a transaction to be accepted for mining. Overrides --basic-tx-usd.", + + ARG arg_author: (Option) = None, or |c: &Config| otry!(c.mining).author.clone(), + "--author=[ADDRESS]", + "Specify the block author (aka \"coinbase\") address for sending block rewards from sealed blocks. NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION.", // Sealing/Mining Option + + ARG arg_engine_signer: (Option) = None, or |c: &Config| otry!(c.mining).engine_signer.clone(), + "--engine-signer=[ADDRESS]", + "Specify the address which should be used to sign consensus messages and issue blocks. Relevant only to non-PoW chains.", + + ARG arg_tx_gas_limit: (Option) = None, or |c: &Config| otry!(c.mining).tx_gas_limit.clone(), + "--tx-gas-limit=[GAS]", + "Apply a limit of GAS as the maximum amount of gas a single transaction may have for it to be mined.", + + ARG arg_tx_time_limit: (Option) = None, or |c: &Config| otry!(c.mining).tx_time_limit.clone(), + "--tx-time-limit=[MS]", + "Maximal time for processing single transaction. If enabled senders/recipients/code of transactions offending the limit will be banned from being included in transaction queue for 180 seconds.", + + ARG arg_extra_data: (Option) = None, or |c: &Config| otry!(c.mining).extra_data.clone(), + "--extra-data=[STRING]", + "Specify a custom extra-data for authored blocks, no more than 32 characters.", + + ARG arg_notify_work: (Option) = None, or |c: &Config| otry!(c.mining).notify_work.as_ref().map(|vec| vec.join(",")), + "--notify-work=[URLS]", + "URLs to which work package notifications are pushed. URLS should be a comma-delimited list of HTTP URLs.", + + ARG arg_stratum_secret: (Option) = None, or |c: &Config| otry!(c.stratum).secret.clone(), + "--stratum-secret=[STRING]", + "Secret for authorizing Stratum server for peers.", + + ["Internal Options"] + FLAG flag_can_restart: (bool) = false, or |_| None, + "--can-restart", + "Executable will auto-restart if exiting with 69", + + ["Miscellaneous options"] + FLAG flag_no_color: (bool) = false, or |c: &Config| otry!(c.misc).color.map(|c| !c).clone(), + "--no-color", + "Don't use terminal color codes in output.", + + FLAG flag_version: (bool) = false, or |_| None, + "-v, --version", + "Show information about version.", + + FLAG flag_no_config: (bool) = false, or |_| None, + "--no-config", + "Don't load a configuration file.", + + ARG arg_ntp_servers: (String) = "0.parity.pool.ntp.org:123,1.parity.pool.ntp.org:123,2.parity.pool.ntp.org:123,3.parity.pool.ntp.org:123", or |c: &Config| otry!(c.misc).ntp_servers.clone().map(|vec| vec.join(",")), + "--ntp-servers=[HOSTS]", + "Comma separated list of NTP servers to provide current time (host:port). Used to verify node health. Parity uses pool.ntp.org NTP servers; consider joining the pool: http://www.pool.ntp.org/join.html", + + ARG arg_logging: (Option) = None, or |c: &Config| otry!(c.misc).logging.clone(), + "-l, --logging=[LOGGING]", + "Specify the logging level. Must conform to the same format as RUST_LOG.", + + ARG arg_log_file: (Option) = None, or |c: &Config| otry!(c.misc).log_file.clone(), + "--log-file=[FILENAME]", + "Specify a filename into which logging should be appended.", + + ["Footprint options"] + FLAG flag_fast_and_loose: (bool) = false, or |c: &Config| otry!(c.footprint).fast_and_loose.clone(), + "--fast-and-loose", + "Disables DB WAL, which gives a significant speed up but means an unclean exit is unrecoverable.", + + FLAG flag_scale_verifiers: (bool) = false, or |c: &Config| otry!(c.footprint).scale_verifiers.clone(), + "--scale-verifiers", + "Automatically scale amount of verifier threads based on workload. Not guaranteed to be faster.", + + ARG arg_tracing: (String) = "auto", or |c: &Config| otry!(c.footprint).tracing.clone(), + "--tracing=[BOOL]", + "Indicates if full transaction tracing should be enabled. Works only if client had been fully synced with tracing enabled. BOOL may be one of auto, on, off. auto uses last used value of this option (off if it does not exist).", // footprint option + + ARG arg_pruning: (String) = "auto", or |c: &Config| otry!(c.footprint).pruning.clone(), + "--pruning=[METHOD]", + "Configure pruning of the state/storage trie. METHOD may be one of auto, archive, fast: archive - keep all state trie data. No pruning. fast - maintain journal overlay. Fast but 50MB used. auto - use the method most recently synced or default to fast if none synced.", + + ARG arg_pruning_history: (u64) = 64u64, or |c: &Config| otry!(c.footprint).pruning_history.clone(), + "--pruning-history=[NUM]", + "Set a minimum number of recent states to keep when pruning is active..", + + ARG arg_pruning_memory: (usize) = 32usize, or |c: &Config| otry!(c.footprint).pruning_memory.clone(), + "--pruning-memory=[MB]", + "The ideal amount of memory in megabytes to use to store recent states. As many states as possible will be kept within this limit, and at least --pruning-history states will always be kept.", + + ARG arg_cache_size_db: (u32) = 32u32, or |c: &Config| otry!(c.footprint).cache_size_db.clone(), + "--cache-size-db=[MB]", + "Override database cache size.", + + ARG arg_cache_size_blocks: (u32) = 8u32, or |c: &Config| otry!(c.footprint).cache_size_blocks.clone(), + "--cache-size-blocks=[MB]", + "Specify the prefered size of the blockchain cache in megabytes.", + + ARG arg_cache_size_queue: (u32) = 40u32, or |c: &Config| otry!(c.footprint).cache_size_queue.clone(), + "--cache-size-queue=[MB]", + "Specify the maximum size of memory to use for block queue.", + + ARG arg_cache_size_state: (u32) = 25u32, or |c: &Config| otry!(c.footprint).cache_size_state.clone(), + "--cache-size-state=[MB]", + "Specify the maximum size of memory to use for the state cache.", + + ARG arg_db_compaction: (String) = "auto", or |c: &Config| otry!(c.footprint).db_compaction.clone(), + "--db-compaction=[TYPE]", + "Database compaction type. TYPE may be one of: ssd - suitable for SSDs and fast HDDs; hdd - suitable for slow HDDs; auto - determine automatically.", + + ARG arg_fat_db: (String) = "auto", or |c: &Config| otry!(c.footprint).fat_db.clone(), + "--fat-db=[BOOL]", + "Build appropriate information to allow enumeration of all accounts and storage keys. Doubles the size of the state database. BOOL may be one of on, off or auto.", + + ARG arg_cache_size: (Option) = None, or |c: &Config| otry!(c.footprint).cache_size.clone(), + "--cache-size=[MB]", + "Set total amount of discretionary memory to use for the entire system, overrides other cache and queue options.", + + ARG arg_num_verifiers: (Option) = None, or |c: &Config| otry!(c.footprint).num_verifiers.clone(), + "--num-verifiers=[INT]", + "Amount of verifier threads to use or to begin with, if verifier auto-scaling is enabled.", + + ["Import/export options"] + FLAG flag_no_seal_check: (bool) = false, or |_| None, + "--no-seal-check", + "Skip block seal check.", + + ["Snapshot options"] + FLAG flag_no_periodic_snapshot: (bool) = false, or |c: &Config| otry!(c.snapshots).disable_periodic.clone(), + "--no-periodic-snapshot", + "Disable automated snapshots which usually occur once every 10000 blocks.", + + ["Virtual Machine options"] + FLAG flag_jitvm: (bool) = false, or |c: &Config| otry!(c.vm).jit.clone(), + "--jitvm", + "Enable the JIT VM.", + + ["Whisper options"] + FLAG flag_whisper: (bool) = false, or |c: &Config| otry!(c.whisper).enabled, + "--whisper", + "Enable the Whisper network.", + + ARG arg_whisper_pool_size: (usize) = 10usize, or |c: &Config| otry!(c.whisper).pool_size.clone(), + "--whisper-pool-size=[MB]", + "Target size of the whisper message pool in megabytes.", + + ["Legacy options"] + FLAG flag_dapps_apis_all: (bool) = false, or |_| None, + "--dapps-apis-all", + "Dapps server is merged with RPC server. Use --jsonrpc-apis.", + + FLAG flag_geth: (bool) = false, or |_| None, + "--geth", + "Run in Geth-compatibility mode. Sets the IPC path to be the same as Geth's. Overrides the --ipc-path and --ipcpath options. Alters RPCs to reflect Geth bugs. Includes the personal_ RPC by default.", + + FLAG flag_testnet: (bool) = false, or |_| None, + "--testnet", + "Testnet mode. Equivalent to --chain testnet. Overrides the --keys-path option.", + + FLAG flag_import_geth_keys: (bool) = false, or |_| None, + "--import-geth-keys", + "Attempt to import keys from Geth client.", + + FLAG flag_ipcdisable: (bool) = false, or |_| None, + "--ipcdisable", + "Equivalent to --no-ipc.", + + FLAG flag_ipc_off: (bool) = false, or |_| None, + "--ipc-off", + "Equivalent to --no-ipc.", + + FLAG flag_nodiscover: (bool) = false, or |_| None, + "--nodiscover", + "Equivalent to --no-discovery.", + + FLAG flag_jsonrpc: (bool) = false, or |_| None, + "-j, --jsonrpc", + "Does nothing; JSON-RPC is on by default now.", + + FLAG flag_jsonrpc_off: (bool) = false, or |_| None, + "--jsonrpc-off", + "Equivalent to --no-jsonrpc.", + + FLAG flag_webapp: (bool) = false, or |_| None, + "-w, --webapp", + "Does nothing; dapps server is on by default now.", + + FLAG flag_dapps_off: (bool) = false, or |_| None, + "--dapps-off", + "Equivalent to --no-dapps.", + + FLAG flag_rpc: (bool) = false, or |_| None, + "--rpc", + "Does nothing; JSON-RPC is on by default now.", + + ARG arg_dapps_port: (Option) = None, or |c: &Config| otry!(c.dapps).port.clone(), + "--dapps-port=[PORT]", + "Dapps server is merged with RPC server. Use --jsonrpc-port.", + + ARG arg_dapps_interface: (Option) = None, or |c: &Config| otry!(c.dapps).interface.clone(), + "--dapps-interface=[IP]", + "Dapps server is merged with RPC server. Use --jsonrpc-interface.", + + ARG arg_dapps_hosts: (Option) = None, or |c: &Config| otry!(c.dapps).hosts.as_ref().map(|vec| vec.join(",")), + "--dapps-hosts=[HOSTS]", + "Dapps server is merged with RPC server. Use --jsonrpc-hosts.", + + ARG arg_dapps_cors: (Option) = None, or |c: &Config| otry!(c.dapps).cors.clone(), + "--dapps-cors=[URL]", + "Dapps server is merged with RPC server. Use --jsonrpc-cors.", + + ARG arg_dapps_user: (Option) = None, or |c: &Config| otry!(c.dapps).user.clone(), + "--dapps-user=[USERNAME]", + "Dapps server authentication has been removed.", + + ARG arg_dapps_pass: (Option) = None, or |c: &Config| otry!(c.dapps).pass.clone(), + "--dapps-pass=[PASSWORD]", + "Dapps server authentication has been removed.", + + ARG arg_datadir: (Option) = None, or |_| None, + "--datadir=[PATH]", + "Equivalent to --base-path PATH.", + + ARG arg_networkid: (Option) = None, or |_| None, + "--networkid=[INDEX]", + "Equivalent to --network-id INDEX.", + + ARG arg_peers: (Option) = None, or |_| None, + "--peers=[NUM]", + "Equivalent to --min-peers NUM.", + + ARG arg_nodekey: (Option) = None, or |_| None, + "--nodekey=[KEY]", + "Equivalent to --node-key KEY.", + + ARG arg_rpcaddr: (Option) = None, or |_| None, + "--rpcaddr=[IP]", + "Equivalent to --jsonrpc-interface IP.", + + ARG arg_rpcport: (Option) = None, or |_| None, + "--rpcport=[PORT]", + "Equivalent to --jsonrpc-port PORT.", + + ARG arg_rpcapi: (Option) = None, or |_| None, + "--rpcapi=[APIS]", + "Equivalent to --jsonrpc-apis APIS.", + + ARG arg_rpccorsdomain: (Option) = None, or |_| None, + "--rpccorsdomain=[URL]", + "Equivalent to --jsonrpc-cors URL.", + + ARG arg_ipcapi: (Option) = None, or |_| None, + "--ipcapi=[APIS]", + "Equivalent to --ipc-apis APIS.", + + ARG arg_ipcpath: (Option) = None, or |_| None, + "--ipcpath=[PATH]", + "Equivalent to --ipc-path PATH.", + + ARG arg_gasprice: (Option) = None, or |_| None, + "--gasprice=[WEI]", + "Equivalent to --min-gas-price WEI.", + + ARG arg_etherbase: (Option) = None, or |_| None, + "--etherbase=[ADDRESS]", + "Equivalent to --author ADDRESS.", + + ARG arg_extradata: (Option) = None, or |_| None, + "--extradata=[STRING]", + "Equivalent to --extra-data STRING.", + + ARG arg_cache: (Option) = None, or |_| None, + "--cache=[MB]", + "Equivalent to --cache-size MB.", - // -- Legacy Options supported in configs - flag_dapps_port: Option = None, - or |c: &Config| otry!(c.dapps).port.clone().map(Some), - flag_dapps_interface: Option = None, - or |c: &Config| otry!(c.dapps).interface.clone().map(Some), - flag_dapps_hosts: Option = None, - or |c: &Config| otry!(c.dapps).hosts.as_ref().map(|vec| Some(vec.join(","))), - flag_dapps_cors: Option = None, - or |c: &Config| otry!(c.dapps).cors.clone().map(Some), - flag_dapps_user: Option = None, - or |c: &Config| otry!(c.dapps).user.clone().map(Some), - flag_dapps_pass: Option = None, - or |c: &Config| otry!(c.dapps).pass.clone().map(Some), - flag_dapps_apis_all: Option = None, or |_| None, - } - { - // Values with optional default value. - flag_base_path: Option, display dir::default_data_path(), or |c: &Config| otry!(c.parity).base_path.clone().map(Some), - flag_db_path: Option, display dir::CHAINS_PATH, or |c: &Config| otry!(c.parity).db_path.clone().map(Some), - flag_warp: Option, display true, or |c: &Config| Some(otry!(c.network).warp.clone()), } } @@ -635,6 +1197,65 @@ mod tests { }; use toml; + #[test] + fn should_parse_args_and_flags() { + let args = Args::parse(&["parity", "--no-warp"]).unwrap(); + assert_eq!(args.flag_no_warp, true); + + let args = Args::parse(&["parity", "--pruning", "archive"]).unwrap(); + assert_eq!(args.arg_pruning, "archive"); + + let args = Args::parse(&["parity", "export", "state", "--no-storage"]).unwrap(); + assert_eq!(args.flag_export_state_no_storage, true); + + let args = Args::parse(&["parity", "export", "state", "--min-balance","123"]).unwrap(); + assert_eq!(args.arg_export_state_min_balance, Some("123".to_string())); + } + + #[test] + fn should_use_subcommand_arg_default() { + let args = Args::parse(&["parity", "export", "state", "--at", "123"]).unwrap(); + assert_eq!(args.arg_export_state_at, "123"); + assert_eq!(args.arg_snapshot_at, "latest"); + + let args = Args::parse(&["parity", "snapshot", "--at", "123", "file.dump"]).unwrap(); + assert_eq!(args.arg_snapshot_at, "123"); + assert_eq!(args.arg_export_state_at, "latest"); + + let args = Args::parse(&["parity", "export", "state"]).unwrap(); + assert_eq!(args.arg_snapshot_at, "latest"); + assert_eq!(args.arg_export_state_at, "latest"); + + let args = Args::parse(&["parity", "snapshot", "file.dump"]).unwrap(); + assert_eq!(args.arg_snapshot_at, "latest"); + assert_eq!(args.arg_export_state_at, "latest"); + } + + #[test] + fn should_parse_multiple_values() { + let args = Args::parse(&["parity", "account", "import", "~/1", "~/2"]).unwrap(); + assert_eq!(args.arg_account_import_path, Some(vec!["~/1".to_owned(), "~/2".to_owned()])); + + let args = Args::parse(&["parity", "account", "import", "~/1,ext"]).unwrap(); + assert_eq!(args.arg_account_import_path, Some(vec!["~/1,ext".to_owned()])); + + let args = Args::parse(&["parity", "--secretstore-nodes", "abc@127.0.0.1:3333,cde@10.10.10.10:4444"]).unwrap(); + assert_eq!(args.arg_secretstore_nodes, "abc@127.0.0.1:3333,cde@10.10.10.10:4444"); + + // Arguments with a single value shouldn't accept multiple values + let args = Args::parse(&["parity", "--auto-update", "critical", "all"]); + assert!(args.is_err()); + + let args = Args::parse(&["parity", "--password", "~/.safe/1", "~/.safe/2"]).unwrap(); + assert_eq!(args.arg_password, vec!["~/.safe/1".to_owned(), "~/.safe/2".to_owned()]); + } + + #[test] + fn should_parse_global_args_with_subcommand() { + let args = Args::parse(&["parity", "--chain", "dev", "account", "list"]).unwrap(); + assert_eq!(args.arg_chain, "dev".to_owned()); + } + #[test] fn should_parse_args_and_include_config() { // given @@ -647,7 +1268,7 @@ mod tests { let args = Args::parse_with_config(&["parity"], config).unwrap(); // then - assert_eq!(args.flag_chain, "morden".to_owned()); + assert_eq!(args.arg_chain, "morden".to_owned()); } #[test] @@ -662,7 +1283,7 @@ mod tests { let args = Args::parse_with_config(&["parity", "--chain", "xyz"], config).unwrap(); // then - assert_eq!(args.flag_chain, "xyz".to_owned()); + assert_eq!(args.arg_chain, "xyz".to_owned()); } #[test] @@ -676,13 +1297,13 @@ mod tests { let args = Args::parse_with_config(&["parity"], config).unwrap(); // then - assert_eq!(args.flag_pruning_history, 128); + assert_eq!(args.arg_pruning_history, 128); } #[test] fn should_parse_full_config() { // given - let config = toml::from_str(include_str!("./config.full.toml")).unwrap(); + let config = toml::from_str(include_str!("./tests/config.full.toml")).unwrap(); // when let args = Args::parse_with_config(&["parity", "--chain", "xyz"], config).unwrap(); @@ -690,85 +1311,104 @@ mod tests { // then assert_eq!(args, Args { // Commands - cmd_daemon: false, - cmd_wallet: false, - cmd_account: false, - cmd_new: false, - cmd_list: false, - cmd_export: false, - cmd_state: false, - cmd_blocks: false, - cmd_import: false, - cmd_signer: false, - cmd_sign: false, - cmd_reject: false, - cmd_new_token: false, - cmd_snapshot: false, - cmd_restore: false, cmd_ui: false, cmd_dapp: false, + cmd_daemon: false, + cmd_account: false, + cmd_account_new: false, + cmd_account_list: false, + cmd_account_import: false, + cmd_wallet: false, + cmd_wallet_import: false, + cmd_import: false, + cmd_export: false, + cmd_export_blocks: false, + cmd_export_state: false, + cmd_signer: false, + cmd_signer_list: false, + cmd_signer_sign: false, + cmd_signer_reject: false, + cmd_signer_new_token: false, + cmd_snapshot: false, + cmd_restore: false, cmd_tools: false, - cmd_hash: false, + cmd_tools_hash: false, cmd_db: false, - cmd_kill: false, + cmd_db_kill: false, // Arguments - arg_pid_file: "".into(), - arg_file: None, - arg_id: None, - arg_path: vec![], + arg_daemon_pid_file: None, + arg_import_file: None, + arg_import_format: None, + arg_export_blocks_file: None, + arg_export_blocks_format: None, + arg_export_state_file: None, + arg_export_state_format: None, + arg_snapshot_file: None, + arg_restore_file: None, + arg_tools_hash_file: None, + + arg_account_new_password: None, + arg_signer_sign_password: None, + arg_wallet_import_password: None, + arg_signer_sign_id: None, + arg_signer_reject_id: None, + arg_dapp_path: None, + arg_account_import_path: None, + arg_wallet_import_path: None, // -- Operating Options - flag_mode: "last".into(), - flag_mode_timeout: 300u64, - flag_mode_alarm: 3600u64, - flag_auto_update: "none".into(), - flag_release_track: "current".into(), + arg_mode: "last".into(), + arg_mode_timeout: 300u64, + arg_mode_alarm: 3600u64, + arg_auto_update: "none".into(), + arg_release_track: "current".into(), flag_public_node: false, flag_no_download: false, flag_no_consensus: false, - flag_chain: "xyz".into(), - flag_base_path: Some("$HOME/.parity".into()), - flag_db_path: Some("$HOME/.parity/chains".into()), - flag_keys_path: "$HOME/.parity/keys".into(), - flag_identity: "".into(), + arg_chain: "xyz".into(), + arg_base_path: Some("$HOME/.parity".into()), + arg_db_path: Some("$HOME/.parity/chains".into()), + arg_keys_path: "$HOME/.parity/keys".into(), + arg_identity: "".into(), flag_light: false, flag_no_persistent_txqueue: false, + flag_force_direct: false, // -- Convenience Options - flag_config: "$BASE/config.toml".into(), - flag_ports_shift: 0, + arg_config: "$BASE/config.toml".into(), + arg_ports_shift: 0, flag_unsafe_expose: false, // -- Account Options - flag_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()), - flag_password: vec!["~/.safe/password.file".into()], - flag_keys_iterations: 10240u32, + arg_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()), + arg_password: vec!["~/.safe/password.file".into()], + arg_keys_iterations: 10240u32, flag_no_hardware_wallets: false, flag_fast_unlock: false, flag_force_ui: false, flag_no_ui: false, - flag_ui_port: 8180u16, - flag_ui_interface: "127.0.0.1".into(), - flag_ui_hosts: "none".into(), - flag_ui_path: "$HOME/.parity/signer".into(), + arg_ui_port: 8180u16, + arg_ui_interface: "127.0.0.1".into(), + arg_ui_hosts: "none".into(), + arg_ui_path: "$HOME/.parity/signer".into(), flag_ui_no_validation: false, // -- Networking Options flag_no_warp: false, - flag_port: 30303u16, - flag_min_peers: 25u16, - flag_max_peers: 50u16, - flag_max_pending_peers: 64u16, - flag_snapshot_peers: 0u16, - flag_allow_ips: "all".into(), - flag_nat: "any".into(), - flag_network_id: Some(1), - flag_bootnodes: Some("".into()), + arg_port: 30303u16, + arg_min_peers: 25u16, + arg_max_peers: 50u16, + arg_max_pending_peers: 64u16, + arg_snapshot_peers: 0u16, + arg_allow_ips: "all".into(), + arg_nat: "any".into(), + arg_network_id: Some(1), + arg_bootnodes: Some("".into()), flag_no_discovery: false, - flag_node_key: None, - flag_reserved_peers: Some("./path_to_file".into()), + arg_node_key: None, + arg_reserved_peers: Some("./path_to_file".into()), flag_reserved_only: false, flag_no_ancient_blocks: false, flag_no_serve_light: false, @@ -776,111 +1416,111 @@ mod tests { // -- API and Console Options // RPC flag_no_jsonrpc: false, - flag_jsonrpc_port: 8545u16, - flag_jsonrpc_interface: "local".into(), - flag_jsonrpc_cors: Some("null".into()), - flag_jsonrpc_apis: "web3,eth,net,parity,traces,rpc,secretstore".into(), - flag_jsonrpc_hosts: "none".into(), - flag_jsonrpc_server_threads: None, - flag_jsonrpc_threads: 0, + arg_jsonrpc_port: 8545u16, + arg_jsonrpc_interface: "local".into(), + arg_jsonrpc_cors: Some("null".into()), + arg_jsonrpc_apis: "web3,eth,net,parity,traces,rpc,secretstore".into(), + arg_jsonrpc_hosts: "none".into(), + arg_jsonrpc_server_threads: None, + arg_jsonrpc_threads: 0, // WS flag_no_ws: false, - flag_ws_port: 8546u16, - flag_ws_interface: "local".into(), - flag_ws_apis: "web3,eth,net,parity,traces,rpc,secretstore".into(), - flag_ws_origins: "none".into(), - flag_ws_hosts: "none".into(), + arg_ws_port: 8546u16, + arg_ws_interface: "local".into(), + arg_ws_apis: "web3,eth,net,parity,traces,rpc,secretstore".into(), + arg_ws_origins: "none".into(), + arg_ws_hosts: "none".into(), // IPC flag_no_ipc: false, - flag_ipc_path: "$HOME/.parity/jsonrpc.ipc".into(), - flag_ipc_apis: "web3,eth,net,parity,parity_accounts,personal,traces,rpc,secretstore".into(), + arg_ipc_path: "$HOME/.parity/jsonrpc.ipc".into(), + arg_ipc_apis: "web3,eth,net,parity,parity_accounts,personal,traces,rpc,secretstore".into(), // DAPPS - flag_dapps_path: "$HOME/.parity/dapps".into(), + arg_dapps_path: "$HOME/.parity/dapps".into(), flag_no_dapps: false, flag_no_secretstore: false, flag_no_secretstore_http: false, flag_no_secretstore_acl_check: false, - flag_secretstore_secret: None, - flag_secretstore_nodes: "".into(), - flag_secretstore_interface: "local".into(), - flag_secretstore_port: 8083u16, - flag_secretstore_http_interface: "local".into(), - flag_secretstore_http_port: 8082u16, - flag_secretstore_path: "$HOME/.parity/secretstore".into(), + arg_secretstore_secret: None, + arg_secretstore_nodes: "".into(), + arg_secretstore_interface: "local".into(), + arg_secretstore_port: 8083u16, + arg_secretstore_http_interface: "local".into(), + arg_secretstore_http_port: 8082u16, + arg_secretstore_path: "$HOME/.parity/secretstore".into(), // IPFS flag_ipfs_api: false, - flag_ipfs_api_port: 5001u16, - flag_ipfs_api_interface: "local".into(), - flag_ipfs_api_cors: Some("null".into()), - flag_ipfs_api_hosts: "none".into(), + arg_ipfs_api_port: 5001u16, + arg_ipfs_api_interface: "local".into(), + arg_ipfs_api_cors: Some("null".into()), + arg_ipfs_api_hosts: "none".into(), // -- Sealing/Mining Options - flag_author: Some("0xdeadbeefcafe0000000000000000000000000001".into()), - flag_engine_signer: Some("0xdeadbeefcafe0000000000000000000000000001".into()), + arg_author: Some("0xdeadbeefcafe0000000000000000000000000001".into()), + arg_engine_signer: Some("0xdeadbeefcafe0000000000000000000000000001".into()), flag_force_sealing: true, - flag_reseal_on_txs: "all".into(), - flag_reseal_min_period: 4000u64, - flag_reseal_max_period: 60000u64, + arg_reseal_on_txs: "all".into(), + arg_reseal_min_period: 4000u64, + arg_reseal_max_period: 60000u64, flag_reseal_on_uncle: false, - flag_work_queue_size: 20usize, - flag_tx_gas_limit: Some("6283184".into()), - flag_tx_time_limit: Some(100u64), - flag_relay_set: "cheap".into(), - flag_min_gas_price: Some(0u64), - flag_usd_per_tx: "0.0025".into(), - flag_usd_per_eth: "auto".into(), - flag_price_update_period: "hourly".into(), - flag_gas_floor_target: "4700000".into(), - flag_gas_cap: "6283184".into(), - flag_extra_data: Some("Parity".into()), - flag_tx_queue_size: 8192usize, - flag_tx_queue_mem_limit: 2u32, - flag_tx_queue_gas: "off".into(), - flag_tx_queue_strategy: "gas_factor".into(), - flag_tx_queue_ban_count: 1u16, - flag_tx_queue_ban_time: 180u16, + arg_work_queue_size: 20usize, + arg_tx_gas_limit: Some("6283184".into()), + arg_tx_time_limit: Some(100u64), + arg_relay_set: "cheap".into(), + arg_min_gas_price: Some(0u64), + arg_usd_per_tx: "0.0025".into(), + arg_usd_per_eth: "auto".into(), + arg_price_update_period: "hourly".into(), + arg_gas_floor_target: "4700000".into(), + arg_gas_cap: "6283184".into(), + arg_extra_data: Some("Parity".into()), + arg_tx_queue_size: 8192usize, + arg_tx_queue_mem_limit: 2u32, + arg_tx_queue_gas: "off".into(), + arg_tx_queue_strategy: "gas_factor".into(), + arg_tx_queue_ban_count: 1u16, + arg_tx_queue_ban_time: 180u16, flag_remove_solved: false, - flag_notify_work: Some("http://localhost:3001".into()), + arg_notify_work: Some("http://localhost:3001".into()), flag_refuse_service_transactions: false, flag_stratum: false, - flag_stratum_interface: "local".to_owned(), - flag_stratum_port: 8008u16, - flag_stratum_secret: None, + arg_stratum_interface: "local".to_owned(), + arg_stratum_port: 8008u16, + arg_stratum_secret: None, // -- Footprint Options - flag_tracing: "auto".into(), - flag_pruning: "auto".into(), - flag_pruning_history: 64u64, - flag_pruning_memory: 500usize, - flag_cache_size_db: 64u32, - flag_cache_size_blocks: 8u32, - flag_cache_size_queue: 50u32, - flag_cache_size_state: 25u32, - flag_cache_size: Some(128), + arg_tracing: "auto".into(), + arg_pruning: "auto".into(), + arg_pruning_history: 64u64, + arg_pruning_memory: 500usize, + arg_cache_size_db: 64u32, + arg_cache_size_blocks: 8u32, + arg_cache_size_queue: 50u32, + arg_cache_size_state: 25u32, + arg_cache_size: Some(128), flag_fast_and_loose: false, - flag_db_compaction: "ssd".into(), - flag_fat_db: "auto".into(), + arg_db_compaction: "ssd".into(), + arg_fat_db: "auto".into(), flag_scale_verifiers: true, - flag_num_verifiers: Some(6), + arg_num_verifiers: Some(6), // -- Import/Export Options - flag_from: "1".into(), - flag_to: "latest".into(), - flag_format: None, + arg_export_blocks_from: "1".into(), + arg_export_blocks_to: "latest".into(), flag_no_seal_check: false, - flag_no_code: false, - flag_no_storage: false, - flag_min_balance: None, - flag_max_balance: None, + flag_export_state_no_code: false, + flag_export_state_no_storage: false, + arg_export_state_min_balance: None, + arg_export_state_max_balance: None, // -- Snapshot Optons - flag_at: "latest".into(), + arg_export_state_at: "latest".into(), + arg_snapshot_at: "latest".into(), flag_no_periodic_snapshot: false, // -- Virtual Machine Options @@ -888,49 +1528,51 @@ mod tests { // -- Whisper options. flag_whisper: false, - flag_whisper_pool_size: 20, + arg_whisper_pool_size: 20, // -- Legacy Options flag_geth: false, flag_testnet: false, flag_import_geth_keys: false, - flag_datadir: None, - flag_networkid: None, - flag_peers: None, - flag_nodekey: None, + arg_datadir: None, + arg_networkid: None, + arg_peers: None, + arg_nodekey: None, flag_nodiscover: false, flag_jsonrpc: false, flag_jsonrpc_off: false, flag_webapp: false, flag_dapps_off: false, flag_rpc: false, - flag_rpcaddr: None, - flag_rpcport: None, - flag_rpcapi: None, - flag_rpccorsdomain: None, + arg_rpcaddr: None, + arg_rpcport: None, + arg_rpcapi: None, + arg_rpccorsdomain: None, flag_ipcdisable: false, flag_ipc_off: false, - flag_ipcapi: None, - flag_ipcpath: None, - flag_gasprice: None, - flag_etherbase: None, - flag_extradata: None, - flag_cache: None, - flag_warp: Some(true), + arg_ipcapi: None, + arg_ipcpath: None, + arg_gasprice: None, + arg_etherbase: None, + arg_extradata: None, + arg_cache: None, // Legacy-Dapps - flag_dapps_port: Some(8080), - flag_dapps_interface: Some("local".into()), - flag_dapps_hosts: Some("none".into()), - flag_dapps_cors: None, - flag_dapps_user: Some("test_user".into()), - flag_dapps_pass: Some("test_pass".into()), - flag_dapps_apis_all: None, + arg_dapps_port: Some(8080), + arg_dapps_interface: Some("local".into()), + arg_dapps_hosts: Some("none".into()), + arg_dapps_cors: None, + arg_dapps_user: Some("test_user".into()), + arg_dapps_pass: Some("test_pass".into()), + flag_dapps_apis_all: false, + + // -- Internal Options + flag_can_restart: false, // -- Miscellaneous Options - flag_ntp_servers: "0.parity.pool.ntp.org:123,1.parity.pool.ntp.org:123,2.parity.pool.ntp.org:123,3.parity.pool.ntp.org:123".into(), + arg_ntp_servers: "0.parity.pool.ntp.org:123,1.parity.pool.ntp.org:123,2.parity.pool.ntp.org:123,3.parity.pool.ntp.org:123".into(), flag_version: false, - flag_logging: Some("own_tx=trace".into()), - flag_log_file: Some("/var/log/parity.log".into()), + arg_logging: Some("own_tx=trace".into()), + arg_log_file: Some("/var/log/parity.log".into()), flag_no_color: false, flag_no_config: false, }); @@ -938,9 +1580,9 @@ mod tests { #[test] fn should_parse_config_and_return_errors() { - let config1 = Args::parse_config(include_str!("./config.invalid1.toml")); - let config2 = Args::parse_config(include_str!("./config.invalid2.toml")); - let config3 = Args::parse_config(include_str!("./config.invalid3.toml")); + let config1 = Args::parse_config(include_str!("./tests/config.invalid1.toml")); + let config2 = Args::parse_config(include_str!("./tests/config.invalid2.toml")); + let config3 = Args::parse_config(include_str!("./tests/config.invalid3.toml")); match (config1, config2, config3) { (Err(ArgsError::Decode(_)), Err(ArgsError::Decode(_)), Err(ArgsError::Decode(_))) => {}, @@ -952,7 +1594,7 @@ mod tests { #[test] fn should_deserialize_toml_file() { - let config: Config = toml::from_str(include_str!("./config.toml")).unwrap(); + let config: Config = toml::from_str(include_str!("./tests/config.toml")).unwrap(); assert_eq!(config, Config { parity: Some(Operating { diff --git a/parity/cli/config.full.toml b/parity/cli/tests/config.full.toml similarity index 100% rename from parity/cli/config.full.toml rename to parity/cli/tests/config.full.toml diff --git a/parity/cli/config.invalid1.toml b/parity/cli/tests/config.invalid1.toml similarity index 100% rename from parity/cli/config.invalid1.toml rename to parity/cli/tests/config.invalid1.toml diff --git a/parity/cli/config.invalid2.toml b/parity/cli/tests/config.invalid2.toml similarity index 100% rename from parity/cli/config.invalid2.toml rename to parity/cli/tests/config.invalid2.toml diff --git a/parity/cli/config.invalid3.toml b/parity/cli/tests/config.invalid3.toml similarity index 100% rename from parity/cli/config.invalid3.toml rename to parity/cli/tests/config.invalid3.toml diff --git a/parity/cli/config.toml b/parity/cli/tests/config.toml similarity index 100% rename from parity/cli/config.toml rename to parity/cli/tests/config.toml diff --git a/parity/cli/usage.rs b/parity/cli/usage.rs index 182efca92..b6a4b723e 100644 --- a/parity/cli/usage.rs +++ b/parity/cli/usage.rs @@ -22,7 +22,7 @@ macro_rules! println_stderr( ); macro_rules! otry { - ($e: expr) => ( + ($e:expr) => ( match $e { Some(ref v) => v, None => { @@ -31,21 +31,107 @@ macro_rules! otry { } ) } + +macro_rules! if_option { + (Option<$type:ty>, THEN {$($then:tt)*} ELSE {$($otherwise:tt)*}) => ( + $($then)* + ); + ($type:ty, THEN {$($then:tt)*} ELSE {$($otherwise:tt)*}) => ( + $($otherwise)* + ); +} + +macro_rules! if_vec { + (Vec<$type:ty>, THEN {$($then:tt)*} ELSE {$($otherwise:tt)*}) => ( + $($then)* + ); + ($type:ty, THEN {$($then:tt)*} ELSE {$($otherwise:tt)*}) => ( + $($otherwise)* + ); +} + +macro_rules! if_option_vec { + (Option>, THEN {$then:expr} ELSE {$otherwise:expr}) => ( + $then + ); + (Option<$type:ty>, THEN {$then:expr} ELSE {$otherwise:expr}) => ( + $otherwise + ); +} + +macro_rules! inner_option_type { + (Option<$type:ty>) => ( + $type + ) +} + +macro_rules! inner_vec_type { + (Vec<$type:ty>) => ( + $type + ) +} + +macro_rules! inner_option_vec_type { + (Option>) => ( + String + ) +} + +macro_rules! usage_with_ident { + ($name:expr, $usage:expr, $help:expr) => ( + if $usage.contains("<") { + format!("<{}> {} '{}'",$name, $usage, $help) + } else { + format!("[{}] {} '{}'",$name, $usage, $help) + } + ); +} + +macro_rules! underscore_to_hyphen { + ($e:expr) => ( + str::replace($e, "_", "-") + ) +} + macro_rules! usage { ( { $( - $field_a:ident : $typ_a:ty, + CMD $subc:ident + { + $subc_help:expr, + + $( + CMD $subc_subc:ident + { + $subc_subc_help:expr, + $( + FLAG $subc_subc_flag:ident : (bool) = false, $subc_subc_flag_usage:expr, $subc_subc_flag_help:expr, + )* + $( + ARG $subc_subc_arg:ident : ($($subc_subc_arg_type_tt:tt)+) = $subc_subc_arg_default:expr, $subc_subc_arg_usage:expr, $subc_subc_arg_help:expr, + )* + } + )* + + $( + FLAG $subc_flag:ident : (bool) = false, $subc_flag_usage:expr, $subc_flag_help:expr, + )* + $( + ARG $subc_arg:ident : ($($subc_arg_type_tt:tt)+) = $subc_arg_default:expr, $subc_arg_usage:expr, $subc_arg_help:expr, + )* + } )* } { $( - $field:ident : $typ:ty = $default:expr, or $from_config:expr, - )* - } - { - $( - $field_s:ident : $typ_s:ty, display $default_s:expr, or $from_config_s:expr, + [$group_name:expr] + $( + FLAG $flag:ident : (bool) = false, or $flag_from_config:expr, $flag_usage:expr, $flag_help:expr, + )* + $( + ARG $arg:ident : ($($arg_type_tt:tt)+) = $arg_default:expr, or $arg_from_config:expr, $arg_usage:expr, $arg_help:expr, + )* )* } ) => { @@ -53,12 +139,17 @@ macro_rules! usage { use std::{fs, io, process}; use std::io::{Read, Write}; use util::version; - use docopt::{Docopt, Error as DocoptError}; + use clap::{Arg, App, SubCommand, AppSettings, Error as ClapError}; use helpers::replace_home; + use std::ffi::OsStr; + use std::collections::HashMap; + + #[cfg(test)] + use regex::Regex; #[derive(Debug)] pub enum ArgsError { - Docopt(DocoptError), + Clap(ClapError), Decode(toml::de::Error), Config(String, io::Error), } @@ -66,7 +157,7 @@ macro_rules! usage { impl ArgsError { pub fn exit(self) -> ! { match self { - ArgsError::Docopt(e) => e.exit(), + ArgsError::Clap(e) => e.exit(), ArgsError::Decode(e) => { println_stderr!("You might have supplied invalid parameters in config file."); println_stderr!("{}", e); @@ -81,9 +172,9 @@ macro_rules! usage { } } - impl From for ArgsError { - fn from(e: DocoptError) -> Self { - ArgsError::Docopt(e) + impl From for ArgsError { + fn from(e: ClapError) -> Self { + ArgsError::Clap(e) } } @@ -96,15 +187,33 @@ macro_rules! usage { #[derive(Debug, PartialEq)] pub struct Args { $( - pub $field_a: $typ_a, + pub $subc: bool, + + $( + pub $subc_subc: bool, + $( + pub $subc_subc_flag: bool, + )* + $( + pub $subc_subc_arg: $($subc_subc_arg_type_tt)+, + )* + )* + + $( + pub $subc_flag: bool, + )* + $( + pub $subc_arg: $($subc_arg_type_tt)+, + )* )* $( - pub $field: $typ, - )* - - $( - pub $field_s: $typ_s, + $( + pub $flag: bool, + )* + $( + pub $arg: $($arg_type_tt)+, + )* )* } @@ -112,15 +221,32 @@ macro_rules! usage { fn default() -> Self { Args { $( - $field_a: Default::default(), + $subc: Default::default(), + $( + $subc_subc: Default::default(), + $( + $subc_subc_flag: Default::default(), + )* + $( + $subc_subc_arg: Default::default(), + )* + )* + + $( + $subc_flag: Default::default(), + )* + $( + $subc_arg: Default::default(), + )* )* $( - $field: $default.into(), - )* - - $( - $field_s: Default::default(), + $( + $flag: Default::default(), + )* + $( + $arg: Default::default(), + )* )* } } @@ -129,13 +255,46 @@ macro_rules! usage { #[derive(Default, Debug, PartialEq, Clone, Deserialize)] struct RawArgs { $( - $field_a: $typ_a, + $subc: bool, + + $( + $subc_subc: bool, + $( + $subc_subc_flag: bool, + )* + $( + $subc_subc_arg: if_option!( + $($subc_subc_arg_type_tt)+, + THEN { $($subc_subc_arg_type_tt)+ } + ELSE { Option<$($subc_subc_arg_type_tt)+> } + ), + )* + )* + + $( + $subc_flag: bool, + )* + $( + $subc_arg: if_option!( + $($subc_arg_type_tt)+, + THEN { $($subc_arg_type_tt)+ } + ELSE { Option<$($subc_arg_type_tt)+> } + ), + )* + )* $( - $field: Option<$typ>, - )* - $( - $field_s: Option<$typ_s>, + $( + $flag: bool, + )* + + $( + $arg: if_option!( + $($arg_type_tt)+, + THEN { $($arg_type_tt)+ } + ELSE { Option<$($arg_type_tt)+> } + ), + )* )* } @@ -149,9 +308,9 @@ macro_rules! usage { return Ok(raw_args.into_args(Config::default())); } - let config_file = raw_args.flag_config.clone().unwrap_or_else(|| raw_args.clone().into_args(Config::default()).flag_config); + let config_file = raw_args.arg_config.clone().unwrap_or_else(|| raw_args.clone().into_args(Config::default()).arg_config); let config_file = replace_home(&::dir::default_data_path(), &config_file); - match (fs::File::open(&config_file), raw_args.flag_config.clone()) { + match (fs::File::open(&config_file), raw_args.arg_config.clone()) { // Load config file (Ok(mut file), _) => { println_stderr!("Loading config file from {}", &config_file); @@ -178,7 +337,7 @@ macro_rules! usage { #[cfg(test)] fn parse_with_config>(command: &[S], config: Config) -> Result { - RawArgs::parse(command).map(|raw| raw.into_args(config)).map_err(ArgsError::Docopt) + RawArgs::parse(command).map(|raw| raw.into_args(config)).map_err(ArgsError::Clap) } fn parse_config(config: &str) -> Result { @@ -188,41 +347,346 @@ macro_rules! usage { pub fn print_version() -> String { format!(include_str!("./version.txt"), version()) } + + #[allow(unused_mut)] // subc_subc_exist may be assigned true by the macro + #[allow(unused_assignments)] // Rust issue #22630 + pub fn print_help() -> String { + let mut help : String = include_str!("./usage_header.txt").to_owned(); + + help.push_str("\n\n"); + + // Subcommands + help.push_str("parity [options]\n"); + $( + { + let mut subc_subc_exist = false; + + $( + subc_subc_exist = true; + let subc_subc_usages : Vec<&str> = vec![ + $( + concat!("[",$subc_subc_flag_usage,"]"), + )* + $( + $subc_subc_arg_usage, + )* + ]; + + if subc_subc_usages.is_empty() { + help.push_str(&format!("parity [options] {} {}\n", underscore_to_hyphen!(&stringify!($subc)[4..]), underscore_to_hyphen!(&stringify!($subc_subc)[stringify!($subc).len()+1..]))); + } else { + help.push_str(&format!("parity [options] {} {} {}\n", underscore_to_hyphen!(&stringify!($subc)[4..]), underscore_to_hyphen!(&stringify!($subc_subc)[stringify!($subc).len()+1..]), subc_subc_usages.join(" "))); + } + )* + + // Print the subcommand on its own only if it has no subsubcommands + if !subc_subc_exist { + let subc_usages : Vec<&str> = vec![ + $( + concat!("[",$subc_flag_usage,"]"), + )* + $( + $subc_arg_usage, + )* + ]; + + if subc_usages.is_empty() { + help.push_str(&format!("parity [options] {}\n", underscore_to_hyphen!(&stringify!($subc)[4..]))); + } else { + help.push_str(&format!("parity [options] {} {}\n", underscore_to_hyphen!(&stringify!($subc)[4..]), subc_usages.join(" "))); + } + } + } + )* + + // Arguments and flags + $( + help.push_str("\n"); + help.push_str($group_name); help.push_str(":\n"); + + $( + help.push_str(&format!("\t{}\n\t\t{}\n", $flag_usage, $flag_help)); + )* + + $( + if_option!( + $($arg_type_tt)+, + THEN { + if_option_vec!( + $($arg_type_tt)+, + THEN { + help.push_str(&format!("\t{}\n\t\t{} (default: {:?})\n", $arg_usage, $arg_help, {let x : inner_option_type!($($arg_type_tt)+)> = $arg_default; x})) + } + ELSE { + help.push_str(&format!("\t{}\n\t\t{}{}\n", $arg_usage, $arg_help, $arg_default.map(|x: inner_option_type!($($arg_type_tt)+)| format!(" (default: {})",x)).unwrap_or("".to_owned()))) + } + ) + } + ELSE { + if_vec!( + $($arg_type_tt)+, + THEN { + help.push_str(&format!("\t{}\n\t\t{} (default: {:?})\n", $arg_usage, $arg_help, {let x : $($arg_type_tt)+ = $arg_default; x})) + } + ELSE { + help.push_str(&format!("\t{}\n\t\t{} (default: {})\n", $arg_usage, $arg_help, $arg_default)) + } + ) + } + ); + )* + + )* + + help + } } impl RawArgs { fn into_args(self, config: Config) -> Args { let mut args = Args::default(); $( - args.$field_a = self.$field_a; + args.$subc = self.$subc; + + $( + args.$subc_subc = self.$subc_subc; + $( + args.$subc_subc_flag = self.$subc_subc_flag; + )* + $( + args.$subc_subc_arg = if_option!( + $($subc_subc_arg_type_tt)+, + THEN { self.$subc_subc_arg.or($subc_subc_arg_default) } + ELSE { self.$subc_subc_arg.unwrap_or($subc_subc_arg_default.into()) } + ); + )* + )* + + $( + args.$subc_flag = self.$subc_flag; + )* + $( + args.$subc_arg = if_option!( + $($subc_arg_type_tt)+, + THEN { self.$subc_arg.or($subc_arg_default) } + ELSE { self.$subc_arg.unwrap_or($subc_arg_default.into()) } + ); + )* )* + $( - args.$field = self.$field.or_else(|| $from_config(&config)).unwrap_or_else(|| $default.into()); - )* - $( - args.$field_s = self.$field_s.or_else(|| $from_config_s(&config)).unwrap_or(None); + $( + args.$flag = self.$flag || $flag_from_config(&config).unwrap_or(false); + )* + $( + args.$arg = if_option!( + $($arg_type_tt)+, + THEN { self.$arg.or_else(|| $arg_from_config(&config)).or_else(|| $arg_default.into()) } + ELSE { self.$arg.or_else(|| $arg_from_config(&config)).unwrap_or_else(|| $arg_default.into()) } + ); + )* )* args } - pub fn parse>(command: &[S]) -> Result { - Docopt::new(Self::usage()).and_then(|d| d.argv(command).deserialize()) + #[allow(unused_variables)] // the submatches of arg-less subcommands aren't used + pub fn parse>(command: &[S]) -> Result { + + let usages = vec![ + $( + $( + usage_with_ident!(stringify!($arg), $arg_usage, $arg_help), + )* + $( + usage_with_ident!(stringify!($flag), $flag_usage, $flag_help), + )* + )* + ]; + + // Hash of subc|subc_subc => Vec + let mut subc_usages = HashMap::new(); + $( + { + let this_subc_usages = vec![ + $( + usage_with_ident!(stringify!($subc_flag), $subc_flag_usage, $subc_flag_help), + )* + $( + usage_with_ident!(stringify!($subc_arg), $subc_arg_usage, $subc_arg_help), + )* + ]; + + subc_usages.insert(stringify!($subc),this_subc_usages); + + $( + { + let this_subc_subc_usages = vec![ + $( + usage_with_ident!(stringify!($subc_subc_flag), $subc_subc_flag_usage, $subc_subc_flag_help), + )* + $( + usage_with_ident!(stringify!($subc_subc_arg), $subc_subc_arg_usage, $subc_subc_arg_help), + )* + ]; + + subc_usages.insert(stringify!($subc_subc), this_subc_subc_usages); + } + )* + } + )* + + let matches = App::new("Parity") + .global_setting(AppSettings::VersionlessSubcommands) + .global_setting(AppSettings::AllowLeadingHyphen) // allow for example --allow-ips -10.0.0.0/8 + .global_setting(AppSettings::DisableHelpSubcommand) + .help(Args::print_help().as_ref()) + .args(&usages.iter().map(|u| Arg::from_usage(u).use_delimiter(false)).collect::>()) + $( + .subcommand( + SubCommand::with_name(&underscore_to_hyphen!(&stringify!($subc)[4..])) + .about($subc_help) + .args(&subc_usages.get(stringify!($subc)).unwrap().iter().map(|u| Arg::from_usage(u).use_delimiter(false)).collect::>()) + $( + .setting(AppSettings::SubcommandRequired) // prevent from running `parity account` + .subcommand( + SubCommand::with_name(&underscore_to_hyphen!(&stringify!($subc_subc)[stringify!($subc).len()+1..])) + .about($subc_subc_help) + .args(&subc_usages.get(stringify!($subc_subc)).unwrap().iter().map(|u| Arg::from_usage(u).use_delimiter(false)).collect::>()) + ) + )* + ) + )* + .get_matches_from_safe(command.iter().map(|x| OsStr::new(x.as_ref())))?; + + let mut raw_args : RawArgs = Default::default(); + $( + $( + raw_args.$flag = matches.is_present(stringify!($flag)); + )* + $( + raw_args.$arg = if_option!( + $($arg_type_tt)+, + THEN { + if_option_vec!( + $($arg_type_tt)+, + THEN { values_t!(matches, stringify!($arg), inner_option_vec_type!($($arg_type_tt)+)).ok() } + ELSE { value_t!(matches, stringify!($arg), inner_option_type!($($arg_type_tt)+)).ok() } + ) + } + ELSE { + if_vec!( + $($arg_type_tt)+, + THEN { values_t!(matches, stringify!($arg), inner_vec_type!($($arg_type_tt)+)).ok() } + ELSE { value_t!(matches, stringify!($arg), $($arg_type_tt)+).ok() } + ) + } + ); + )* + )* + + // Subcommands + $( + if let Some(submatches) = matches.subcommand_matches(&underscore_to_hyphen!(&stringify!($subc)[4..])) { + raw_args.$subc = true; + + // Subcommand flags + $( + raw_args.$subc_flag = submatches.is_present(&stringify!($subc_flag)); + )* + // Subcommand arguments + $( + raw_args.$subc_arg = if_option!( + $($subc_arg_type_tt)+, + THEN { + if_option_vec!( + $($subc_arg_type_tt)+, + THEN { values_t!(submatches, stringify!($subc_arg), inner_option_vec_type!($($subc_arg_type_tt)+)).ok() } + ELSE { value_t!(submatches, stringify!($subc_arg), inner_option_type!($($subc_arg_type_tt)+)).ok() } + ) + } + ELSE { + if_vec!( + $($subc_arg_type_tt)+, + THEN { values_t!(submatches, stringify!($subc_arg), inner_vec_type!($($subc_arg_type_tt)+)).ok() } + ELSE { value_t!(submatches, stringify!($subc_arg), $($subc_arg_type_tt)+).ok() } + ) + } + ); + )* + + // Sub-subcommands + $( + if let Some(subsubmatches) = submatches.subcommand_matches(&underscore_to_hyphen!(&stringify!($subc_subc)[stringify!($subc).len()+1..])) { + raw_args.$subc_subc = true; + + // Sub-subcommand flags + $( + raw_args.$subc_subc_flag = subsubmatches.is_present(&stringify!($subc_subc_flag)); + )* + // Sub-subcommand arguments + $( + raw_args.$subc_subc_arg = if_option!( + $($subc_subc_arg_type_tt)+, + THEN { + if_option_vec!( + $($subc_subc_arg_type_tt)+, + THEN { values_t!(subsubmatches, stringify!($subc_subc_arg), inner_option_vec_type!($($subc_subc_arg_type_tt)+)).ok() } + ELSE { value_t!(subsubmatches, stringify!($subc_subc_arg), inner_option_type!($($subc_subc_arg_type_tt)+)).ok() } + ) + } + ELSE { + if_vec!( + $($subc_subc_arg_type_tt)+, + THEN { values_t!(subsubmatches, stringify!($subc_subc_arg), inner_vec_type!($($subc_subc_arg_type_tt)+)).ok() } + ELSE { value_t!(subsubmatches, stringify!($subc_subc_arg), $($subc_subc_arg_type_tt)+).ok() } + ) + } + ); + )* + } + else { + raw_args.$subc_subc = false; + } + )* + } + else { + raw_args.$subc = false; + } + )* + + Ok(raw_args) } - fn usage() -> String { - format!( - include_str!("./usage.txt"), + } + + #[test] + fn usages_valid() { + let re = Regex::new(r"^(?:(-[a-zA-Z-]+, )?--[a-z-]+(=\[[a-zA-Z]+\](\.\.\.)?|=<[a-zA-Z]+>(\.\.\.)?)?)|(?:\[[a-zA-Z-]+\])(\.\.\.)?|(?:<[a-zA-Z-]+>)(\.\.\.)?$").unwrap(); + + let usages = vec![ + $( $( - $field={ let v: $typ = $default.into(); v }, - // Uncomment this to debug - // "named argument never used" error - // $field = $default, + $( + $subc_subc_arg_usage, + )* )* $( - $field_s = $default_s, + $subc_arg_usage, )* - ) + )* + $( + $( + $flag_usage, + )* + $( + $arg_usage, + )* + )* + ]; + + for usage in &usages { + assert!(re.is_match(usage)); } } - }; + } } diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt deleted file mode 100644 index dc1205ecf..000000000 --- a/parity/cli/usage.txt +++ /dev/null @@ -1,499 +0,0 @@ -Parity. Ethereum Client. - By Wood/Paronyan/Kotewicz/DrwiÄ™ga/Volf et al. - Copyright 2015, 2016, 2017 Parity Technologies (UK) Ltd - -Usage: - parity [options] - parity ui [options] - parity dapp [options] - parity daemon [options] - parity account (new | list ) [options] - parity account import ... [options] - parity wallet import --password FILE [options] - parity import [ ] [options] - parity export (blocks | state) [ ] [options] - parity signer new-token [options] - parity signer list [options] - parity signer sign [ ] [ --password FILE ] [options] - parity signer reject [options] - parity snapshot [options] - parity restore [ ] [options] - parity tools hash - parity db kill [options] - -Operating Options: - --mode MODE Set the operating mode. MODE can be one of: - last - Uses the last-used mode, active if none. - active - Parity continuously syncs the chain. - passive - Parity syncs initially, then sleeps and - wakes regularly to resync. - dark - Parity syncs only when the RPC is active. - offline - Parity doesn't sync. (default: {flag_mode}). - --mode-timeout SECS Specify the number of seconds before inactivity - timeout occurs when mode is dark or passive - (default: {flag_mode_timeout}). - --mode-alarm SECS Specify the number of seconds before auto sleep - reawake timeout occurs when mode is passive - (default: {flag_mode_alarm}). - --auto-update SET Set a releases set to automatically update and - install. - all - All updates in the our release track. - critical - Only consensus/security updates. - none - No updates will be auto-installed. - (default: {flag_auto_update}). - --release-track TRACK Set which release track we should use for updates. - stable - Stable releases. - beta - Beta releases. - nightly - Nightly releases (unstable). - testing - Testing releases (do not use). - current - Whatever track this executable was - released on (default: {flag_release_track}). - --public-node Start Parity as a public web server. Account storage - and transaction signing will be delegated to the UI. - (default: {flag_public_node}). - --no-download Normally new releases will be downloaded ready for - updating. This disables it. Not recommended. - (default: {flag_no_download}). - --no-consensus Force the binary to run even if there are known - issues regarding consensus. Not recommended. - (default: {flag_no_consensus}). - --force-direct Run the originally installed version of Parity, - ignoring any updates that have since been installed. - --chain CHAIN Specify the blockchain type. CHAIN may be either a - JSON chain specification file or olympic, frontier, - homestead, mainnet, morden, ropsten, classic, expanse, - testnet, kovan or dev (default: {flag_chain}). - -d --base-path PATH Specify the base data storage path. - (default: {flag_base_path}). - --db-path PATH Specify the database directory path - (default: {flag_db_path}). - --keys-path PATH Specify the path for JSON key files to be found - (default: {flag_keys_path}). - --identity NAME Specify your node's name. (default: {flag_identity}) - --light Experimental: run in light client mode. Light clients - synchronize a bare minimum of data and fetch necessary - data on-demand from the network. Much lower in storage, - potentially higher in bandwidth. Has no effect with - subcommands (default: {flag_light}). - -Convenience Options: --c --config CONFIG Specify a configuration. CONFIG may be either a - configuration file or a preset: dev, insecure, dev-insecure, - mining, or non-standard-ports. - (default: {flag_config}). - --ports-shift SHIFT Add SHIFT to all port numbers Parity is listening on. - Includes network port and all servers (RPC, WebSockets, UI, IPFS, SecretStore). - (default: {flag_ports_shift}) - --unsafe-expose All servers will listen on external interfaces and will - be remotely accessible. It's equivalent with setting - the following: --{{ws,jsonrpc,ui,ipfs,secret_store,stratum}}-interface=all --*-hosts=all - This option is UNSAFE and should be used with great care! - (default: {flag_unsafe_expose}) - -Account Options: - --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. - ACCOUNTS is a comma-delimited list of addresses. - Implies --no-ui. (default: {flag_unlock:?}) - --password FILE Provide a file containing a password for unlocking - an account. Leading and trailing whitespace is trimmed. - (default: {flag_password:?}) - --keys-iterations NUM Specify the number of iterations to use when - deriving key from the password (bigger is more - secure) (default: {flag_keys_iterations}). - --no-hardware-wallets Disables hardware wallet support. (default: {flag_no_hardware_wallets}) - --fast-unlock Use drasticly faster unlocking mode. This setting causes - raw secrets to be stored unprotected in memory, - so use with care. (default: {flag_fast_unlock}) - -UI Options: - --force-ui Enable Trusted UI WebSocket endpoint, - even when --unlock is in use. (default: {flag_force_ui}) - --no-ui Disable Trusted UI WebSocket endpoint. - (default: {flag_no_ui}) - --ui-port PORT Specify the port of Trusted UI server - (default: {flag_ui_port}). - --ui-interface IP Specify the hostname portion of the Trusted UI - server, IP should be an interface's IP address, - or local (default: {flag_ui_interface}). - --ui-hosts HOSTS List of allowed Host header values. This option will - validate the Host header sent by the browser, it - is additional security against some attack - vectors. Special options: "all", "none", - (default: {flag_ui_hosts}). - --ui-path PATH Specify directory where Trusted UIs tokens should - be stored. (default: {flag_ui_path}) - --ui-no-validation Disable Origin and Host headers validation for - Trusted UI. WARNING: INSECURE. Used only for - development. (default: {flag_ui_no_validation}) - -Networking Options: - --no-warp Disable syncing from the snapshot over the network. (default: {flag_no_warp}) - --port PORT Override the port on which the node should listen - (default: {flag_port}). - --min-peers NUM Try to maintain at least NUM peers (default: {flag_min_peers}). - --max-peers NUM Allow up to NUM peers (default: {flag_max_peers}). - --snapshot-peers NUM Allow additional NUM peers for a snapshot sync - (default: {flag_snapshot_peers}). - --nat METHOD Specify method to use for determining public - address. Must be one of: any, none, upnp, - extip: (default: {flag_nat}). - --network-id INDEX Override the network identifier from the chain we - are on. (default: {flag_network_id:?}) - --bootnodes NODES Override the bootnodes from our chain. NODES should - be comma-delimited enodes. (default: {flag_bootnodes:?}) - --no-discovery Disable new peer discovery. (default: {flag_no_discovery}) - --node-key KEY Specify node secret key, either as 64-character hex - string or input to SHA3 operation. (default: {flag_node_key:?}) - --reserved-peers FILE Provide a file containing enodes, one per line. - These nodes will always have a reserved slot on top - of the normal maximum peers. (default: {flag_reserved_peers:?}) - --reserved-only Connect only to reserved nodes. (default: {flag_reserved_only}) - --allow-ips FILTER Filter outbound connections. FILTER can be one of: - private - connect to private network IP addresses only; - public - connect to public network IP addresses only; - all - connect to any IP address; - none - block all (for use with a custom filter as below); - a custom filter list in the format: "private ip_range1 -ip_range2 ...". - Where ip_range1 would be allowed and ip_range2 blocked; - Custom blocks ("-ip_range") override custom allows ("ip_range"); - (default: {flag_allow_ips}). - --max-pending-peers NUM Allow up to NUM pending connections. (default: {flag_max_pending_peers}) - --no-ancient-blocks Disable downloading old blocks after snapshot restoration - or warp sync. (default: {flag_no_ancient_blocks}) - --no-serve-light Disable serving of light peers. (default: {flag_no_serve_light}) - -API and Console Options: - --no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc}) - --jsonrpc-port PORT Specify the port portion of the JSONRPC API server - (default: {flag_jsonrpc_port}). - --jsonrpc-interface IP Specify the hostname portion of the JSONRPC API - server, IP should be an interface's IP address, or - all (all interfaces) or local (default: {flag_jsonrpc_interface}). - --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses. - (default: {flag_jsonrpc_cors:?}) - --jsonrpc-apis APIS Specify the APIs available through the JSONRPC - interface. APIS is a comma-delimited list of API - name. Possible name are all, safe, web3, eth, net, personal, - parity, parity_set, traces, rpc, parity_accounts. - You can also disable a specific API by putting '-' in the front: all,-personal - (default: {flag_jsonrpc_apis}). - --jsonrpc-hosts HOSTS List of allowed Host header values. This option will - validate the Host header sent by the browser, it - is additional security against some attack - vectors. Special options: "all", "none", - (default: {flag_jsonrpc_hosts}). - --jsonrpc-server-threads NUM Enables experimental faster implementation of JSON-RPC server. - Requires Dapps server to be disabled - using --no-dapps. (default: {flag_jsonrpc_server_threads:?}) - --jsonrpc-threads THREADS Turn on additional processing threads in all RPC servers. - Setting this to non-zero value allows parallel cpu-heavy queries - execution. (default: {flag_jsonrpc_threads}) - - --no-ws Disable the WebSockets server. (default: {flag_no_ws}) - --ws-port PORT Specify the port portion of the WebSockets server - (default: {flag_ws_port}). - --ws-interface IP Specify the hostname portion of the WebSockets - server, IP should be an interface's IP address, or - all (all interfaces) or local (default: {flag_ws_interface}). - --ws-apis APIS Specify the APIs available through the WebSockets - interface. APIS is a comma-delimited list of API - name. Possible name are web3, eth, pubsub, net, personal, - parity, parity_set, traces, rpc, parity_accounts. - (default: {flag_ws_apis}). - --ws-origins URL Specify Origin header values allowed to connect. - Special options: "all", "none". - (default: {flag_ws_origins}) - --ws-hosts HOSTS List of allowed Host header values. This option will - validate the Host header sent by the browser, it - is additional security against some attack - vectors. Special options: "all", "none", - (default: {flag_ws_hosts}). - - --no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc}) - --ipc-path PATH Specify custom path for JSON-RPC over IPC service - (default: {flag_ipc_path}). - --ipc-apis APIS Specify custom API set available via JSON-RPC over - IPC (default: {flag_ipc_apis}). - - --no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps}) - --dapps-path PATH Specify directory where dapps should be installed. - (default: {flag_dapps_path}) - --ipfs-api Enable IPFS-compatible HTTP API. (default: {flag_ipfs_api}) - --ipfs-api-port PORT Configure on which port the IPFS HTTP API should listen. - (default: {flag_ipfs_api_port}) - --ipfs-api-interface IP Specify the hostname portion of the IPFS API server, - IP should be an interface's IP address or local. - (default: {flag_ipfs_api_interface}) - --ipfs-api-cors URL Specify CORS header for IPFS API responses. - (default: {flag_ipfs_api_cors:?}) - --ipfs-api-hosts HOSTS List of allowed Host header values. This option will - validate the Host header sent by the browser, it - is additional security against some attack - vectors. Special options: "all", "none" - (default: {flag_ipfs_api_hosts}). - -Secret Store Options: - --no-secretstore Disable Secret Store functionality. (default: {flag_no_secretstore}) - --no-secretstore-http Disable Secret Store HTTP API. (default: {flag_no_secretstore_http}) - --no-acl-check Disable ACL check (useful for test environments). (default: {flag_no_secretstore_acl_check}) - --secretstore-secret SECRET Hex-encoded secret key of this node. - (required, default: {flag_secretstore_secret:?}). - --secretstore-nodes NODES Comma-separated list of other secret store cluster nodes in form - NODE_PUBLIC_KEY_IN_HEX@NODE_IP_ADDR:NODE_PORT. - (required, default: {flag_secretstore_nodes}). - --secretstore-interface IP Specify the hostname portion for listening to Secret Store Key Server - internal requests, IP should be an interface's IP address, or local - (default: {flag_secretstore_interface}). - --secretstore-port PORT Specify the port portion for listening to Secret Store Key Server - internal requests (default: {flag_secretstore_port}). - --secretstore-http-interface IP Specify the hostname portion for listening to Secret Store Key Server - HTTP requests, IP should be an interface's IP address, or local - (default: {flag_secretstore_http_interface}). - --secretstore-http-port PORT Specify the port portion for listening to Secret Store Key Server - HTTP requests (default: {flag_secretstore_http_port}). - --secretstore-path PATH Specify directory where Secret Store should save its data. - (default: {flag_secretstore_path}). - -Sealing/Mining Options: - --author ADDRESS Specify the block author (aka "coinbase") address - for sending block rewards from sealed blocks. - NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION. - (default: {flag_author:?}) - --engine-signer ADDRESS Specify the address which should be used to - sign consensus messages and issue blocks. - Relevant only to non-PoW chains. - (default: {flag_engine_signer:?}) - --force-sealing Force the node to author new blocks as if it were - always sealing/mining. - (default: {flag_force_sealing}) - --reseal-on-txs SET Specify which transactions should force the node - to reseal a block. SET is one of: - none - never reseal on new transactions; - own - reseal only on a new local transaction; - ext - reseal only on a new external transaction; - all - reseal on all new transactions - (default: {flag_reseal_on_txs}). - --reseal-on-uncle Force the node to author new blocks when a new uncle - block is imported. - (default: {flag_reseal_on_uncle}) - --reseal-min-period MS Specify the minimum time between reseals from - incoming transactions. MS is time measured in - milliseconds (default: {flag_reseal_min_period}). - --reseal-max-period MS Specify the maximum time since last block to enable - force-sealing. MS is time measured in - milliseconds (default: {flag_reseal_max_period}). - --work-queue-size ITEMS Specify the number of historical work packages - which are kept cached lest a solution is found for - them later. High values take more memory but result - in fewer unusable solutions (default: {flag_work_queue_size}). - --tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas - a single transaction may have for it to be mined. - (default: {flag_tx_gas_limit:?}) - --tx-time-limit MS Maximal time for processing single transaction. - If enabled senders/recipients/code of transactions - offending the limit will be banned from being included - in transaction queue for 180 seconds. - (default: {flag_tx_time_limit:?}) - --relay-set SET Set of transactions to relay. SET may be: - cheap - Relay any transaction in the queue (this - may include invalid transactions); - strict - Relay only executed transactions (this - guarantees we don't relay invalid transactions, but - means we relay nothing if not mining); - lenient - Same as strict when mining, and cheap - when not (default: {flag_relay_set}). - --min-gas-price WEI Minimum amount of Wei per GAS to be paid for a - transaction to be accepted for mining. Overrides - --basic-tx-usd. - (default: {flag_min_gas_price:?}) - --usd-per-tx USD Amount of USD to be paid for a basic transaction - (default: {flag_usd_per_tx}). The minimum gas price is set - accordingly. - --usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an - amount in USD, a web service or 'auto' to use each - web service in turn and fallback on the last known - good value (default: {flag_usd_per_eth}). - --price-update-period T T will be allowed to pass between each gas price - update. T may be daily, hourly, a number of seconds, - or a time string of the form "2 days", "30 minutes" - etc. (default: {flag_price_update_period}). - --gas-floor-target GAS Amount of gas per block to target when sealing a new - block (default: {flag_gas_floor_target}). - --gas-cap GAS A cap on how large we will raise the gas limit per - block due to transaction volume (default: {flag_gas_cap}). - --extra-data STRING Specify a custom extra-data for authored blocks, no - more than 32 characters. (default: {flag_extra_data:?}) - --tx-queue-mem-limit MB Maximum amount of memory that can be used by the - transaction queue. Setting this parameter to 0 - disables limiting (default: {flag_tx_queue_mem_limit}). - --tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting - to be included in next block) (default: {flag_tx_queue_size}). - --tx-queue-gas LIMIT Maximum amount of total gas for external transactions in - the queue. LIMIT can be either an amount of gas or - 'auto' or 'off'. 'auto' sets the limit to be 20x - the current block gas limit. (default: {flag_tx_queue_gas}). - --tx-queue-strategy S Prioritization strategy used to order transactions - in the queue. S may be: - gas - Prioritize txs with low gas limit; - gas_price - Prioritize txs with high gas price; - gas_factor - Prioritize txs using gas price - and gas limit ratio (default: {flag_tx_queue_strategy}). - --tx-queue-ban-count C Number of times maximal time for execution (--tx-time-limit) - can be exceeded before banning sender/recipient/code. - (default: {flag_tx_queue_ban_count}) - --tx-queue-ban-time SEC Banning time (in seconds) for offenders of specified - execution time limit. Also number of offending actions - have to reach the threshold within that time. - (default: {flag_tx_queue_ban_time} seconds) - --no-persistent-txqueue Don't save pending local transactions to disk to be - restored whenever the node restarts. - (default: {flag_no_persistent_txqueue}). - --remove-solved Move solved blocks from the work package queue - instead of cloning them. This gives a slightly - faster import speed, but means that extra solutions - submitted for the same work package will go unused. - (default: {flag_remove_solved}) - --notify-work URLS URLs to which work package notifications are pushed. - URLS should be a comma-delimited list of HTTP URLs. - (default: {flag_notify_work:?}) - --refuse-service-transactions Always refuse service transactions. - (default: {flag_refuse_service_transactions}). - --stratum Run Stratum server for miner push notification. (default: {flag_stratum}) - --stratum-interface IP Interface address for Stratum server. (default: {flag_stratum_interface}) - --stratum-port PORT Port for Stratum server to listen on. (default: {flag_stratum_port}) - --stratum-secret STRING Secret for authorizing Stratum server for peers. - (default: {flag_stratum_secret:?}) - -Footprint Options: - --tracing BOOL Indicates if full transaction tracing should be - enabled. Works only if client had been fully synced - with tracing enabled. BOOL may be one of auto, on, - off. auto uses last used value of this option (off - if it does not exist) (default: {flag_tracing}). - --pruning METHOD Configure pruning of the state/storage trie. METHOD - may be one of auto, archive, fast: - archive - keep all state trie data. No pruning. - fast - maintain journal overlay. Fast but 50MB used. - auto - use the method most recently synced or - default to fast if none synced (default: {flag_pruning}). - --pruning-history NUM Set a minimum number of recent states to keep when pruning - is active. (default: {flag_pruning_history}). - --pruning-memory MB The ideal amount of memory in megabytes to use to store - recent states. As many states as possible will be kept - within this limit, and at least --pruning-history states - will always be kept. (default: {flag_pruning_memory}) - --cache-size-db MB Override database cache size (default: {flag_cache_size_db}). - --cache-size-blocks MB Specify the prefered size of the blockchain cache in - megabytes (default: {flag_cache_size_blocks}). - --cache-size-queue MB Specify the maximum size of memory to use for block - queue (default: {flag_cache_size_queue}). - --cache-size-state MB Specify the maximum size of memory to use for - the state cache (default: {flag_cache_size_state}). - --cache-size MB Set total amount of discretionary memory to use for - the entire system, overrides other cache and queue - options. (default: {flag_cache_size:?}) - --fast-and-loose Disables DB WAL, which gives a significant speed up - but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose}) - --db-compaction TYPE Database compaction type. TYPE may be one of: - ssd - suitable for SSDs and fast HDDs; - hdd - suitable for slow HDDs; - auto - determine automatically (default: {flag_db_compaction}). - --fat-db BOOL Build appropriate information to allow enumeration - of all accounts and storage keys. Doubles the size - of the state database. BOOL may be one of on, off - or auto. (default: {flag_fat_db}) - --scale-verifiers Automatically scale amount of verifier threads based on - workload. Not guaranteed to be faster. - (default: {flag_scale_verifiers}) - --num-verifiers INT Amount of verifier threads to use or to begin with, if verifier - auto-scaling is enabled. (default: {flag_num_verifiers:?}) - -Import/Export Options: - --from BLOCK Export from block BLOCK, which may be an index or - hash (default: {flag_from}). - --to BLOCK Export to (including) block BLOCK, which may be an - index, hash or 'latest' (default: {flag_to}). - --format FORMAT For import/export in given format. FORMAT must be - one of 'hex' and 'binary'. - (default: {flag_format:?} = Import: auto, Export: binary) - --no-seal-check Skip block seal check. (default: {flag_no_seal_check}) - --at BLOCK Export state at the given block, which may be an - index, hash, or 'latest'. (default: {flag_at}) - --no-storage Don't export account storage. (default: {flag_no_storage}) - --no-code Don't export account code. (default: {flag_no_code}) - --min-balance WEI Don't export accounts with balance less than specified. - (default: {flag_min_balance:?}) - --max-balance WEI Don't export accounts with balance greater than specified. - (default: {flag_max_balance:?}) - -Snapshot Options: - --at BLOCK Take a snapshot at the given block, which may be an - index, hash, or 'latest'. Note that taking snapshots at - non-recent blocks will only work with --pruning archive - (default: {flag_at}) - --no-periodic-snapshot Disable automated snapshots which usually occur once - every 10000 blocks. (default: {flag_no_periodic_snapshot}) - -Virtual Machine Options: - --jitvm Enable the JIT VM. (default: {flag_jitvm}) - -Whisper Options: - --whisper Enable the Whisper network. (default: {flag_whisper}) - --whisper-pool-size MB Target size of the whisper message pool in megabytes. - (default: {flag_whisper_pool_size}) - -Legacy Options: - --geth Run in Geth-compatibility mode. Sets the IPC path - to be the same as Geth's. Overrides the --ipc-path - and --ipcpath options. Alters RPCs to reflect Geth - bugs. Includes the personal_ RPC by default. - --testnet Testnet mode. Equivalent to --chain testnet. - Overrides the --keys-path option. - --import-geth-keys Attempt to import keys from Geth client. - --datadir PATH Equivalent to --base-path PATH. - --networkid INDEX Equivalent to --network-id INDEX. - --peers NUM Equivalent to --min-peers NUM. - --nodekey KEY Equivalent to --node-key KEY. - --nodiscover Equivalent to --no-discovery. - -j --jsonrpc Does nothing; JSON-RPC is on by default now. - --jsonrpc-off Equivalent to --no-jsonrpc. - -w --webapp Does nothing; dapps server is on by default now. - --dapps-off Equivalent to --no-dapps. - --dapps-user USERNAME Dapps server authentication has been removed. (default: {flag_dapps_user:?}) - --dapps-pass PASSWORD Dapps server authentication has been removed. (default: {flag_dapps_pass:?}) - --dapps-apis-all Dapps server is merged with RPC server. Use --jsonrpc-apis. (default: {flag_dapps_apis_all:?}) - --dapps-cors URL Dapps server is merged with RPC server. Use --jsonrpc-cors. (default: {flag_dapps_cors:?}) - --dapps-hosts HOSTS Dapps server is merged with RPC server. Use --jsonrpc-hosts. (default: {flag_dapps_hosts:?}) - --dapps-interface IP Dapps server is merged with RPC server. Use --jsonrpc-interface. (default: {flag_dapps_interface:?}) - --dapps-port PORT Dapps server is merged with RPC server. Use --jsonrpc-port. (default: {flag_dapps_port:?}) - --rpc Does nothing; JSON-RPC is on by default now. - --warp Does nothing; Warp sync is on by default. (default: {flag_warp}) - --rpcaddr IP Equivalent to --jsonrpc-interface IP. - --rpcport PORT Equivalent to --jsonrpc-port PORT. - --rpcapi APIS Equivalent to --jsonrpc-apis APIS. - --rpccorsdomain URL Equivalent to --jsonrpc-cors URL. - --ipcdisable Equivalent to --no-ipc. - --ipc-off Equivalent to --no-ipc. - --ipcapi APIS Equivalent to --ipc-apis APIS. - --ipcpath PATH Equivalent to --ipc-path PATH. - --gasprice WEI Equivalent to --min-gas-price WEI. - --etherbase ADDRESS Equivalent to --author ADDRESS. - --extradata STRING Equivalent to --extra-data STRING. - --cache MB Equivalent to --cache-size MB. - -Internal Options: - --can-restart Executable will auto-restart if exiting with 69. - -Miscellaneous Options: - --ntp-servers HOSTS Comma separated list of NTP servers to provide current time (host:port). - Used to verify node health. Parity uses pool.ntp.org NTP servers, - consider joining the pool: http://www.pool.ntp.org/join.html - (default: {flag_ntp_servers}) - -l --logging LOGGING Specify the logging level. Must conform to the same - format as RUST_LOG. (default: {flag_logging:?}) - --log-file FILENAME Specify a filename into which logging should be - appended. (default: {flag_log_file:?}) - --no-config Don't load a configuration file. - --no-color Don't use terminal color codes in output. (default: {flag_no_color}) - -v --version Show information about version. - -h --help Show this screen. diff --git a/parity/cli/usage_header.txt b/parity/cli/usage_header.txt new file mode 100644 index 000000000..bcc0f93cb --- /dev/null +++ b/parity/cli/usage_header.txt @@ -0,0 +1,3 @@ +Parity. Ethereum Client. + By Wood/Paronyan/Kotewicz/DrwiÄ™ga/Volf et al. + Copyright 2015, 2016, 2017 Parity Technologies (UK) Ltd \ No newline at end of file diff --git a/parity/cli/version.txt b/parity/cli/version.txt index a4febdcbd..855dbb5cf 100644 --- a/parity/cli/version.txt +++ b/parity/cli/version.txt @@ -7,4 +7,3 @@ There is NO WARRANTY, to the extent permitted by law. By Wood/Paronyan/Kotewicz/DrwiÄ™ga/Volf Habermeier/Czaban/Greeff/Gotchac/Redmann - diff --git a/parity/configuration.rs b/parity/configuration.rs index 681804346..88e5e8d02 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -22,9 +22,12 @@ use std::collections::BTreeMap; use std::cmp::max; use std::str::FromStr; use cli::{Args, ArgsError}; -use util::{Hashable, H256, U256, Bytes, version_data, Address}; +use hash::keccak; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::{Bytes, version_data, Address}; use util::journaldb::Algorithm; -use util::Colour; +use ansi_term::Colour; use ethsync::{NetworkConfiguration, is_valid_node_url}; use ethcore::ethstore::ethkey::{Secret, Public}; use ethcore::client::{VMType}; @@ -103,13 +106,13 @@ impl Configuration { pub fn into_command(self) -> Result { let dirs = self.directories(); - let pruning = self.args.flag_pruning.parse()?; - let pruning_history = self.args.flag_pruning_history; + let pruning = self.args.arg_pruning.parse()?; + let pruning_history = self.args.arg_pruning_history; let vm_type = self.vm_type()?; let spec = self.chain().parse()?; - let mode = match self.args.flag_mode.as_ref() { + let mode = match self.args.arg_mode.as_ref() { "last" => None, - mode => Some(to_mode(&mode, self.args.flag_mode_timeout, self.args.flag_mode_alarm)?), + mode => Some(to_mode(&mode, self.args.arg_mode_timeout, self.args.arg_mode_alarm)?), }; let update_policy = self.update_policy()?; let logger_config = self.logger_config(); @@ -120,18 +123,21 @@ impl Configuration { let ui_conf = self.ui_config(); let network_id = self.network_id(); let cache_config = self.cache_config(); - let tracing = self.args.flag_tracing.parse()?; - let fat_db = self.args.flag_fat_db.parse()?; - let compaction = self.args.flag_db_compaction.parse()?; + let tracing = self.args.arg_tracing.parse()?; + let fat_db = self.args.arg_fat_db.parse()?; + let compaction = self.args.arg_db_compaction.parse()?; let wal = !self.args.flag_fast_and_loose; - match self.args.flag_warp { - // Logging is not initialized yet, so we print directly to stderr - Some(true) if fat_db == Switch::On => writeln!(&mut stderr(), "Warning: Warp Sync is disabled because Fat DB is turned on").expect("Error writing to stderr"), - Some(true) if tracing == Switch::On => writeln!(&mut stderr(), "Warning: Warp Sync is disabled because tracing is turned on").expect("Error writing to stderr"), - Some(true) if pruning == Pruning::Specific(Algorithm::Archive) => writeln!(&mut stderr(), "Warning: Warp Sync is disabled because pruning mode is set to archive").expect("Error writing to stderr"), - _ => {}, - }; let public_node = self.args.flag_public_node; + if !self.args.flag_no_warp { + // Logging is not initialized yet, so we print directly to stderr + if fat_db == Switch::On { + writeln!(&mut stderr(), "Warning: Warp Sync is disabled because Fat DB is turned on").expect("Error writing to stderr"); + } else if tracing == Switch::On { + writeln!(&mut stderr(), "Warning: Warp Sync is disabled because tracing is turned on").expect("Error writing to stderr"); + } else if pruning == Pruning::Specific(Algorithm::Archive) { + writeln!(&mut stderr(), "Warning: Warp Sync is disabled because pruning mode is set to archive").expect("Error writing to stderr"); + } + } let warp_sync = !self.args.flag_no_warp && fat_db != Switch::On && tracing != Switch::On && pruning != Pruning::Specific(Algorithm::Archive); let geth_compatibility = self.args.flag_geth; let mut dapps_conf = self.dapps_config(); @@ -139,9 +145,9 @@ impl Configuration { let secretstore_conf = self.secretstore_config()?; let format = self.format()?; - if self.args.flag_jsonrpc_server_threads.is_some() && dapps_conf.enabled { + if self.args.arg_jsonrpc_server_threads.is_some() && dapps_conf.enabled { dapps_conf.enabled = false; - writeln!(&mut stderr(), "Warning: Disabling Dapps server because fast RPC server was enabled.").expect("Error writing to stderr.") + writeln!(&mut stderr(), "Warning: Disabling Dapps server because fast RPC server was enabled.").expect("Error writing to stderr."); } let cmd = if self.args.flag_version { @@ -149,25 +155,25 @@ impl Configuration { } else if self.args.cmd_signer { let authfile = ::signer::codes_path(&ws_conf.signer_path); - if self.args.cmd_new_token { + if self.args.cmd_signer_new_token { Cmd::SignerToken(ws_conf, ui_conf, logger_config.clone()) - } else if self.args.cmd_sign { - let pwfile = self.args.flag_password.get(0).map(|pwfile| { + } else if self.args.cmd_signer_sign { + let pwfile = self.args.arg_signer_sign_password.map(|pwfile| { PathBuf::from(pwfile) }); Cmd::SignerSign { - id: self.args.arg_id, + id: self.args.arg_signer_sign_id, pwfile: pwfile, port: ws_conf.port, authfile: authfile, } - } else if self.args.cmd_reject { + } else if self.args.cmd_signer_reject { Cmd::SignerReject { - id: self.args.arg_id, + id: self.args.arg_signer_reject_id, port: ws_conf.port, authfile: authfile, } - } else if self.args.cmd_list { + } else if self.args.cmd_signer_list { Cmd::SignerList { port: ws_conf.port, authfile: authfile, @@ -175,32 +181,32 @@ impl Configuration { } else { unreachable!(); } - } else if self.args.cmd_tools && self.args.cmd_hash { - Cmd::Hash(self.args.arg_file) - } else if self.args.cmd_db && self.args.cmd_kill { + } else if self.args.cmd_tools && self.args.cmd_tools_hash { + Cmd::Hash(self.args.arg_tools_hash_file) + } else if self.args.cmd_db && self.args.cmd_db_kill { Cmd::Blockchain(BlockchainCmd::Kill(KillBlockchain { spec: spec, dirs: dirs, pruning: pruning, })) } else if self.args.cmd_account { - let account_cmd = if self.args.cmd_new { + let account_cmd = if self.args.cmd_account_new { let new_acc = NewAccount { - iterations: self.args.flag_keys_iterations, + iterations: self.args.arg_keys_iterations, path: dirs.keys, spec: spec, - password_file: self.args.flag_password.first().cloned(), + password_file: self.args.arg_account_new_password.clone(), }; AccountCmd::New(new_acc) - } else if self.args.cmd_list { + } else if self.args.cmd_account_list { let list_acc = ListAccounts { path: dirs.keys, spec: spec, }; AccountCmd::List(list_acc) - } else if self.args.cmd_import { + } else if self.args.cmd_account_import { let import_acc = ImportAccounts { - from: self.args.arg_path.clone(), + from: self.args.arg_account_import_path.expect("CLI argument is required; qed").clone(), to: dirs.keys, spec: spec, }; @@ -220,11 +226,11 @@ impl Configuration { Cmd::Account(account_cmd) } else if self.args.cmd_wallet { let presale_cmd = ImportWallet { - iterations: self.args.flag_keys_iterations, + iterations: self.args.arg_keys_iterations, path: dirs.keys, spec: spec, - wallet_path: self.args.arg_path.first().unwrap().clone(), - password_file: self.args.flag_password.first().cloned(), + wallet_path: self.args.arg_wallet_import_path.unwrap().clone(), + password_file: self.args.arg_wallet_import_password, }; Cmd::ImportPresaleWallet(presale_cmd) } else if self.args.cmd_import { @@ -232,11 +238,11 @@ impl Configuration { spec: spec, cache_config: cache_config, dirs: dirs, - file_path: self.args.arg_file.clone(), + file_path: self.args.arg_import_file.clone(), format: format, pruning: pruning, pruning_history: pruning_history, - pruning_memory: self.args.flag_pruning_memory, + pruning_memory: self.args.arg_pruning_memory, compaction: compaction, wal: wal, tracing: tracing, @@ -249,44 +255,44 @@ impl Configuration { }; Cmd::Blockchain(BlockchainCmd::Import(import_cmd)) } else if self.args.cmd_export { - if self.args.cmd_blocks { + if self.args.cmd_export_blocks { let export_cmd = ExportBlockchain { spec: spec, cache_config: cache_config, dirs: dirs, - file_path: self.args.arg_file.clone(), + file_path: self.args.arg_export_blocks_file.clone(), format: format, pruning: pruning, pruning_history: pruning_history, - pruning_memory: self.args.flag_pruning_memory, + pruning_memory: self.args.arg_pruning_memory, compaction: compaction, wal: wal, tracing: tracing, fat_db: fat_db, - from_block: to_block_id(&self.args.flag_from)?, - to_block: to_block_id(&self.args.flag_to)?, + from_block: to_block_id(&self.args.arg_export_blocks_from)?, + to_block: to_block_id(&self.args.arg_export_blocks_to)?, check_seal: !self.args.flag_no_seal_check, }; Cmd::Blockchain(BlockchainCmd::Export(export_cmd)) - } else if self.args.cmd_state { + } else if self.args.cmd_export_state { let export_cmd = ExportState { spec: spec, cache_config: cache_config, dirs: dirs, - file_path: self.args.arg_file.clone(), + file_path: self.args.arg_export_state_file.clone(), format: format, pruning: pruning, pruning_history: pruning_history, - pruning_memory: self.args.flag_pruning_memory, + pruning_memory: self.args.arg_pruning_memory, compaction: compaction, wal: wal, tracing: tracing, fat_db: fat_db, - at: to_block_id(&self.args.flag_at)?, - storage: !self.args.flag_no_storage, - code: !self.args.flag_no_code, - min_balance: self.args.flag_min_balance.and_then(|s| to_u256(&s).ok()), - max_balance: self.args.flag_max_balance.and_then(|s| to_u256(&s).ok()), + at: to_block_id(&self.args.arg_export_state_at)?, + storage: !self.args.flag_export_state_no_storage, + code: !self.args.flag_export_state_no_code, + min_balance: self.args.arg_export_state_min_balance.and_then(|s| to_u256(&s).ok()), + max_balance: self.args.arg_export_state_max_balance.and_then(|s| to_u256(&s).ok()), }; Cmd::Blockchain(BlockchainCmd::ExportState(export_cmd)) } else { @@ -299,14 +305,14 @@ impl Configuration { spec: spec, pruning: pruning, pruning_history: pruning_history, - pruning_memory: self.args.flag_pruning_memory, + pruning_memory: self.args.arg_pruning_memory, tracing: tracing, fat_db: fat_db, compaction: compaction, - file_path: self.args.arg_file.clone(), + file_path: self.args.arg_snapshot_file.clone(), wal: wal, kind: snapshot::Kind::Take, - block_at: to_block_id(&self.args.flag_at)?, + block_at: to_block_id(&self.args.arg_snapshot_at)?, }; Cmd::Snapshot(snapshot_cmd) } else if self.args.cmd_restore { @@ -316,11 +322,11 @@ impl Configuration { spec: spec, pruning: pruning, pruning_history: pruning_history, - pruning_memory: self.args.flag_pruning_memory, + pruning_memory: self.args.arg_pruning_memory, tracing: tracing, fat_db: fat_db, compaction: compaction, - file_path: self.args.arg_file.clone(), + file_path: self.args.arg_restore_file.clone(), wal: wal, kind: snapshot::Kind::Restore, block_at: to_block_id("latest")?, // unimportant. @@ -328,7 +334,7 @@ impl Configuration { Cmd::Snapshot(restore_cmd) } else { let daemon = if self.args.cmd_daemon { - Some(self.args.arg_pid_file.clone()) + Some(self.args.arg_daemon_pid_file.clone().expect("CLI argument is required; qed")) } else { None }; @@ -342,10 +348,10 @@ impl Configuration { spec: spec, pruning: pruning, pruning_history: pruning_history, - pruning_memory: self.args.flag_pruning_memory, + pruning_memory: self.args.arg_pruning_memory, daemon: daemon, logger_config: logger_config.clone(), - miner_options: self.miner_options(self.args.flag_reseal_min_period)?, + miner_options: self.miner_options(self.args.arg_reseal_min_period)?, ntp_servers: self.ntp_servers(), ws_conf: ws_conf, http_conf: http_conf, @@ -373,8 +379,8 @@ impl Configuration { secretstore_conf: secretstore_conf, dapp: self.dapp_to_open()?, ui: self.args.cmd_ui, - name: self.args.flag_identity, - custom_bootnodes: self.args.flag_bootnodes.is_some(), + name: self.args.arg_identity, + custom_bootnodes: self.args.arg_bootnodes.is_some(), no_periodic_snapshot: self.args.flag_no_periodic_snapshot, check_seal: !self.args.flag_no_seal_check, download_old_blocks: !self.args.flag_no_ancient_blocks, @@ -405,8 +411,8 @@ impl Configuration { let extras = MinerExtras { author: self.author()?, extra_data: self.extra_data()?, - gas_floor_target: to_u256(&self.args.flag_gas_floor_target)?, - gas_ceil_target: to_u256(&self.args.flag_gas_cap)?, + gas_floor_target: to_u256(&self.args.arg_gas_floor_target)?, + gas_ceil_target: to_u256(&self.args.arg_gas_cap)?, engine_signer: self.engine_signer()?, }; @@ -414,37 +420,39 @@ impl Configuration { } fn author(&self) -> Result { - to_address(self.args.flag_etherbase.clone().or(self.args.flag_author.clone())) + to_address(self.args.arg_etherbase.clone().or(self.args.arg_author.clone())) } fn engine_signer(&self) -> Result { - to_address(self.args.flag_engine_signer.clone()) + to_address(self.args.arg_engine_signer.clone()) } fn format(&self) -> Result, String> { - match self.args.flag_format { + match self.args.arg_import_format.clone() + .or(self.args.arg_export_blocks_format.clone()) + .or(self.args.arg_export_state_format.clone()) { Some(ref f) => Ok(Some(f.parse()?)), None => Ok(None), } } fn cache_config(&self) -> CacheConfig { - match self.args.flag_cache_size.or(self.args.flag_cache) { + match self.args.arg_cache_size.or(self.args.arg_cache) { Some(size) => CacheConfig::new_with_total_cache_size(size), None => CacheConfig::new( - self.args.flag_cache_size_db, - self.args.flag_cache_size_blocks, - self.args.flag_cache_size_queue, - self.args.flag_cache_size_state, + self.args.arg_cache_size_db, + self.args.arg_cache_size_blocks, + self.args.arg_cache_size_queue, + self.args.arg_cache_size_state, ), } } fn logger_config(&self) -> LogConfig { LogConfig { - mode: self.args.flag_logging.clone(), + mode: self.args.arg_logging.clone(), color: !self.args.flag_no_color && !cfg!(windows), - file: self.args.flag_log_file.clone(), + file: self.args.arg_log_file.clone(), } } @@ -455,44 +463,44 @@ impl Configuration { else if self.args.flag_testnet { "testnet".to_owned() } else { - self.args.flag_chain.clone() + self.args.arg_chain.clone() } } fn max_peers(&self) -> u32 { - let peers = self.args.flag_max_peers as u32; + let peers = self.args.arg_max_peers as u32; max(self.min_peers(), peers) } fn ip_filter(&self) -> Result { - match IpFilter::parse(self.args.flag_allow_ips.as_str()) { + match IpFilter::parse(self.args.arg_allow_ips.as_str()) { Ok(allow_ip) => Ok(allow_ip), Err(_) => Err("Invalid IP filter value".to_owned()), } } fn min_peers(&self) -> u32 { - self.args.flag_peers.unwrap_or(self.args.flag_min_peers) as u32 + self.args.arg_peers.unwrap_or(self.args.arg_min_peers) as u32 } fn max_pending_peers(&self) -> u32 { - self.args.flag_max_pending_peers as u32 + self.args.arg_max_pending_peers as u32 } fn snapshot_peers(&self) -> u32 { - self.args.flag_snapshot_peers as u32 + self.args.arg_snapshot_peers as u32 } fn work_notify(&self) -> Vec { - self.args.flag_notify_work.as_ref().map_or_else(Vec::new, |s| s.split(',').map(|s| s.to_owned()).collect()) + self.args.arg_notify_work.as_ref().map_or_else(Vec::new, |s| s.split(',').map(|s| s.to_owned()).collect()) } fn accounts_config(&self) -> Result { let cfg = AccountsConfig { - iterations: self.args.flag_keys_iterations, + iterations: self.args.arg_keys_iterations, testnet: self.args.flag_testnet, - password_files: self.args.flag_password.clone(), - unlocked_accounts: to_addresses(&self.args.flag_unlock)?, + password_files: self.args.arg_password.clone(), + unlocked_accounts: to_addresses(&self.args.arg_unlock)?, enable_hardware_wallets: !self.args.flag_no_hardware_wallets, enable_fast_unlock: self.args.flag_fast_unlock, }; @@ -505,8 +513,8 @@ impl Configuration { Ok(Some(StratumOptions { io_path: self.directories().db, listen_addr: self.stratum_interface(), - port: self.args.flag_ports_shift + self.args.flag_stratum_port, - secret: self.args.flag_stratum_secret.as_ref().map(|s| s.parse::().unwrap_or_else(|_| s.sha3())), + port: self.args.arg_ports_shift + self.args.arg_stratum_port, + secret: self.args.arg_stratum_secret.as_ref().map(|s| s.parse::().unwrap_or_else(|_| keccak(s))), })) } else { Ok(None) } } @@ -516,7 +524,7 @@ impl Configuration { return Err("Force sealing can't be used with reseal_min_period = 0".into()); } - let reseal = self.args.flag_reseal_on_txs.parse::()?; + let reseal = self.args.arg_reseal_on_txs.parse::()?; let options = MinerOptions { new_work_notify: self.work_notify(), @@ -524,26 +532,26 @@ impl Configuration { reseal_on_external_tx: reseal.external, reseal_on_own_tx: reseal.own, reseal_on_uncle: self.args.flag_reseal_on_uncle, - tx_gas_limit: match self.args.flag_tx_gas_limit { + tx_gas_limit: match self.args.arg_tx_gas_limit { Some(ref d) => to_u256(d)?, None => U256::max_value(), }, - tx_queue_size: self.args.flag_tx_queue_size, - tx_queue_memory_limit: if self.args.flag_tx_queue_mem_limit > 0 { - Some(self.args.flag_tx_queue_mem_limit as usize * 1024 * 1024) + tx_queue_size: self.args.arg_tx_queue_size, + tx_queue_memory_limit: if self.args.arg_tx_queue_mem_limit > 0 { + Some(self.args.arg_tx_queue_mem_limit as usize * 1024 * 1024) } else { None }, - tx_queue_gas_limit: to_gas_limit(&self.args.flag_tx_queue_gas)?, - tx_queue_strategy: to_queue_strategy(&self.args.flag_tx_queue_strategy)?, - pending_set: to_pending_set(&self.args.flag_relay_set)?, + tx_queue_gas_limit: to_gas_limit(&self.args.arg_tx_queue_gas)?, + tx_queue_strategy: to_queue_strategy(&self.args.arg_tx_queue_strategy)?, + pending_set: to_pending_set(&self.args.arg_relay_set)?, reseal_min_period: Duration::from_millis(reseal_min_period), - reseal_max_period: Duration::from_millis(self.args.flag_reseal_max_period), - work_queue_size: self.args.flag_work_queue_size, + reseal_max_period: Duration::from_millis(self.args.arg_reseal_max_period), + work_queue_size: self.args.arg_work_queue_size, enable_resubmission: !self.args.flag_remove_solved, - tx_queue_banning: match self.args.flag_tx_time_limit { + tx_queue_banning: match self.args.arg_tx_time_limit { Some(limit) => Banning::Enabled { - min_offends: self.args.flag_tx_queue_ban_count, + min_offends: self.args.arg_tx_queue_ban_count, offend_threshold: Duration::from_millis(limit), - ban_duration: Duration::from_secs(self.args.flag_tx_queue_ban_time as u64), + ban_duration: Duration::from_secs(self.args.arg_tx_queue_ban_time as u64), }, None => Banning::Disabled, }, @@ -554,11 +562,11 @@ impl Configuration { } fn ui_port(&self) -> u16 { - self.args.flag_ports_shift + self.args.flag_ui_port + self.args.arg_ports_shift + self.args.arg_ui_port } fn ntp_servers(&self) -> Vec { - self.args.flag_ntp_servers.split(",").map(str::to_owned).collect() + self.args.arg_ntp_servers.split(",").map(str::to_owned).collect() } fn ui_config(&self) -> UiConfiguration { @@ -578,7 +586,7 @@ impl Configuration { enabled: self.dapps_enabled(), dapps_path: PathBuf::from(self.directories().dapps), extra_dapps: if self.args.cmd_dapp { - self.args.arg_path.iter().map(|path| PathBuf::from(path)).collect() + self.args.arg_dapp_path.iter().map(|path| PathBuf::from(path)).collect() } else { vec![] }, @@ -613,9 +621,9 @@ impl Configuration { self_secret: self.secretstore_self_secret()?, nodes: self.secretstore_nodes()?, interface: self.secretstore_interface(), - port: self.args.flag_ports_shift + self.args.flag_secretstore_port, + port: self.args.arg_ports_shift + self.args.arg_secretstore_port, http_interface: self.secretstore_http_interface(), - http_port: self.args.flag_ports_shift + self.args.flag_secretstore_http_port, + http_port: self.args.arg_ports_shift + self.args.arg_secretstore_http_port, data_path: self.directories().secretstore, }) } @@ -623,7 +631,7 @@ impl Configuration { fn ipfs_config(&self) -> IpfsConfiguration { IpfsConfiguration { enabled: self.args.flag_ipfs_api, - port: self.args.flag_ports_shift + self.args.flag_ipfs_api_port, + port: self.args.arg_ports_shift + self.args.arg_ipfs_api_port, interface: self.ipfs_interface(), cors: self.ipfs_cors(), hosts: self.ipfs_hosts(), @@ -634,7 +642,7 @@ impl Configuration { if !self.args.cmd_dapp { return Ok(None); } - let path = self.args.arg_path.get(0).map(String::as_str).unwrap_or("."); + let path = self.args.arg_dapp_path.as_ref().map(String::as_str).unwrap_or("."); let path = Path::new(path).canonicalize() .map_err(|e| format!("Invalid path: {}. Error: {:?}", path, e))?; let name = path.file_name() @@ -651,14 +659,14 @@ impl Configuration { U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap() } - if let Some(dec) = self.args.flag_gasprice.as_ref() { + if let Some(dec) = self.args.arg_gasprice.as_ref() { return Ok(GasPricerConfig::Fixed(to_u256(dec)?)); - } else if let Some(dec) = self.args.flag_min_gas_price { + } else if let Some(dec) = self.args.arg_min_gas_price { return Ok(GasPricerConfig::Fixed(U256::from(dec))); } - let usd_per_tx = to_price(&self.args.flag_usd_per_tx)?; - if "auto" == self.args.flag_usd_per_eth.as_str() { + let usd_per_tx = to_price(&self.args.arg_usd_per_tx)?; + if "auto" == self.args.arg_usd_per_eth.as_str() { // Just a very rough estimate to avoid accepting // ZGP transactions before the price is fetched // if user does not want it. @@ -666,11 +674,11 @@ impl Configuration { return Ok(GasPricerConfig::Calibrated { initial_minimum: wei_per_gas(usd_per_tx, last_known_usd_per_eth), usd_per_tx: usd_per_tx, - recalibration_period: to_duration(self.args.flag_price_update_period.as_str())?, + recalibration_period: to_duration(self.args.arg_price_update_period.as_str())?, }); } - let usd_per_eth = to_price(&self.args.flag_usd_per_eth)?; + let usd_per_eth = to_price(&self.args.arg_usd_per_eth)?; let wei_per_gas = wei_per_gas(usd_per_tx, usd_per_eth); info!( @@ -683,7 +691,7 @@ impl Configuration { } fn extra_data(&self) -> Result { - match self.args.flag_extradata.as_ref().or(self.args.flag_extra_data.as_ref()) { + match self.args.arg_extradata.as_ref().or(self.args.arg_extra_data.as_ref()) { Some(x) if x.len() <= 32 => Ok(x.as_bytes().to_owned()), None => Ok(version_data()), Some(_) => Err("Extra data must be at most 32 characters".into()), @@ -693,7 +701,7 @@ impl Configuration { fn init_reserved_nodes(&self) -> Result, String> { use std::fs::File; - match self.args.flag_reserved_peers { + match self.args.arg_reserved_peers { Some(ref path) => { let mut buffer = String::new(); let mut node_file = File::open(path).map_err(|e| format!("Error opening reserved nodes file: {}", e))?; @@ -709,10 +717,10 @@ impl Configuration { } fn net_addresses(&self) -> Result<(SocketAddr, Option), String> { - let port = self.args.flag_ports_shift + self.args.flag_port; + let port = self.args.arg_ports_shift + self.args.arg_port; let listen_address = SocketAddr::new("0.0.0.0".parse().unwrap(), port); - let public_address = if self.args.flag_nat.starts_with("extip:") { - let host = &self.args.flag_nat[6..]; + let public_address = if self.args.arg_nat.starts_with("extip:") { + let host = &self.args.arg_nat[6..]; let host = host.parse().map_err(|_| format!("Invalid host given with `--nat extip:{}`", host))?; Some(SocketAddr::new(host, port)) } else { @@ -723,13 +731,13 @@ impl Configuration { fn net_config(&self) -> Result { let mut ret = NetworkConfiguration::new(); - ret.nat_enabled = self.args.flag_nat == "any" || self.args.flag_nat == "upnp"; - ret.boot_nodes = to_bootnodes(&self.args.flag_bootnodes)?; + ret.nat_enabled = self.args.arg_nat == "any" || self.args.arg_nat == "upnp"; + ret.boot_nodes = to_bootnodes(&self.args.arg_bootnodes)?; let (listen, public) = self.net_addresses()?; ret.listen_address = Some(format!("{}", listen)); ret.public_address = public.map(|p| format!("{}", p)); - ret.use_secret = match self.args.flag_node_key.as_ref() - .map(|s| s.parse::().or_else(|_| Secret::from_unsafe_slice(&s.sha3())).map_err(|e| format!("Invalid key: {:?}", e)) + ret.use_secret = match self.args.arg_node_key.as_ref() + .map(|s| s.parse::().or_else(|_| Secret::from_unsafe_slice(&keccak(s))).map_err(|e| format!("Invalid key: {:?}", e)) ) { None => None, Some(Ok(key)) => Some(key), @@ -750,13 +758,13 @@ impl Configuration { } fn network_id(&self) -> Option { - self.args.flag_network_id.or(self.args.flag_networkid) + self.args.arg_network_id.or(self.args.arg_networkid) } fn rpc_apis(&self) -> String { - let mut apis: Vec<&str> = self.args.flag_rpcapi + let mut apis: Vec<&str> = self.args.arg_rpcapi .as_ref() - .unwrap_or(&self.args.flag_jsonrpc_apis) + .unwrap_or(&self.args.arg_jsonrpc_apis) .split(",") .collect(); @@ -772,12 +780,12 @@ impl Configuration { } fn rpc_cors(&self) -> Option> { - let cors = self.args.flag_jsonrpc_cors.as_ref().or(self.args.flag_rpccorsdomain.as_ref()); + let cors = self.args.arg_jsonrpc_cors.as_ref().or(self.args.arg_rpccorsdomain.as_ref()); Self::cors(cors) } fn ipfs_cors(&self) -> Option> { - Self::cors(self.args.flag_ipfs_api_cors.as_ref()) + Self::cors(self.args.arg_ipfs_api_cors.as_ref()) } fn hosts(&self, hosts: &str, interface: &str) -> Option> { @@ -803,15 +811,15 @@ impl Configuration { } fn ui_hosts(&self) -> Option> { - self.hosts(&self.args.flag_ui_hosts, &self.ui_interface()) + self.hosts(&self.args.arg_ui_hosts, &self.ui_interface()) } fn rpc_hosts(&self) -> Option> { - self.hosts(&self.args.flag_jsonrpc_hosts, &self.rpc_interface()) + self.hosts(&self.args.arg_jsonrpc_hosts, &self.rpc_interface()) } fn ws_hosts(&self) -> Option> { - self.hosts(&self.args.flag_ws_hosts, &self.ws_interface()) + self.hosts(&self.args.arg_ws_hosts, &self.ws_interface()) } fn ws_origins(&self) -> Option> { @@ -819,11 +827,11 @@ impl Configuration { return None; } - Self::parse_hosts(&self.args.flag_ws_origins) + Self::parse_hosts(&self.args.arg_ws_origins) } fn ipfs_hosts(&self) -> Option> { - self.hosts(&self.args.flag_ipfs_api_hosts, &self.ipfs_interface()) + self.hosts(&self.args.arg_ipfs_api_hosts, &self.ipfs_interface()) } fn ipc_config(&self) -> Result { @@ -831,7 +839,7 @@ impl Configuration { enabled: !(self.args.flag_ipcdisable || self.args.flag_ipc_off || self.args.flag_no_ipc), socket_addr: self.ipc_path(), apis: { - let mut apis = self.args.flag_ipcapi.clone().unwrap_or(self.args.flag_ipc_apis.clone()); + let mut apis = self.args.arg_ipcapi.clone().unwrap_or(self.args.arg_ipc_apis.clone()); if self.args.flag_geth { if !apis.is_empty() { apis.push_str(","); @@ -849,19 +857,19 @@ impl Configuration { let conf = HttpConfiguration { enabled: self.rpc_enabled(), interface: self.rpc_interface(), - port: self.args.flag_ports_shift + self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), + port: self.args.arg_ports_shift + self.args.arg_rpcport.unwrap_or(self.args.arg_jsonrpc_port), apis: match self.args.flag_public_node { false => self.rpc_apis().parse()?, true => self.rpc_apis().parse::()?.retain(ApiSet::PublicContext), }, hosts: self.rpc_hosts(), cors: self.rpc_cors(), - server_threads: match self.args.flag_jsonrpc_server_threads { + server_threads: match self.args.arg_jsonrpc_server_threads { Some(threads) if threads > 0 => Some(threads), None => None, _ => return Err("--jsonrpc-server-threads number needs to be positive.".into()), }, - processing_threads: self.args.flag_jsonrpc_threads, + processing_threads: self.args.arg_jsonrpc_threads, }; Ok(conf) @@ -873,8 +881,8 @@ impl Configuration { let conf = WsConfiguration { enabled: self.ws_enabled(), interface: self.ws_interface(), - port: self.args.flag_ports_shift + self.args.flag_ws_port, - apis: self.args.flag_ws_apis.parse()?, + port: self.args.arg_ports_shift + self.args.arg_ws_port, + apis: self.args.arg_ws_apis.parse()?, hosts: self.ws_hosts(), origins: self.ws_origins(), signer_path: self.directories().signer.into(), @@ -889,7 +897,7 @@ impl Configuration { let http_conf = self.http_config()?; let net_addresses = self.net_addresses()?; Ok(NetworkSettings { - name: self.args.flag_identity.clone(), + name: self.args.arg_identity.clone(), chain: self.chain(), network_port: net_addresses.0.port(), rpc_enabled: http_conf.enabled, @@ -902,13 +910,13 @@ impl Configuration { Ok(UpdatePolicy { enable_downloading: !self.args.flag_no_download, require_consensus: !self.args.flag_no_consensus, - filter: match self.args.flag_auto_update.as_ref() { + filter: match self.args.arg_auto_update.as_ref() { "none" => UpdateFilter::None, "critical" => UpdateFilter::Critical, "all" => UpdateFilter::All, _ => return Err("Invalid value for `--auto-update`. See `--help` for more information.".into()), }, - track: match self.args.flag_release_track.as_ref() { + track: match self.args.arg_release_track.as_ref() { "stable" => ReleaseTrack::Stable, "beta" => ReleaseTrack::Beta, "nightly" => ReleaseTrack::Nightly, @@ -924,23 +932,23 @@ impl Configuration { use path; let local_path = default_local_path(); - let base_path = self.args.flag_base_path.as_ref().or_else(|| self.args.flag_datadir.as_ref()).map_or_else(|| default_data_path(), |s| s.clone()); + let base_path = self.args.arg_base_path.as_ref().or_else(|| self.args.arg_datadir.as_ref()).map_or_else(|| default_data_path(), |s| s.clone()); let data_path = replace_home("", &base_path); - let is_using_base_path = self.args.flag_base_path.is_some(); + let is_using_base_path = self.args.arg_base_path.is_some(); // If base_path is set and db_path is not we default to base path subdir instead of LOCAL. - let base_db_path = if is_using_base_path && self.args.flag_db_path.is_none() { + let base_db_path = if is_using_base_path && self.args.arg_db_path.is_none() { "$BASE/chains" } else { - self.args.flag_db_path.as_ref().map_or(dir::CHAINS_PATH, |s| &s) + self.args.arg_db_path.as_ref().map_or(dir::CHAINS_PATH, |s| &s) }; let cache_path = if is_using_base_path { "$BASE/cache" } else { dir::CACHE_PATH }; let db_path = replace_home_and_local(&data_path, &local_path, &base_db_path); let cache_path = replace_home_and_local(&data_path, &local_path, cache_path); - let keys_path = replace_home(&data_path, &self.args.flag_keys_path); - let dapps_path = replace_home(&data_path, &self.args.flag_dapps_path); - let secretstore_path = replace_home(&data_path, &self.args.flag_secretstore_path); - let ui_path = replace_home(&data_path, &self.args.flag_ui_path); + let keys_path = replace_home(&data_path, &self.args.arg_keys_path); + let dapps_path = replace_home(&data_path, &self.args.arg_dapps_path); + let secretstore_path = replace_home(&data_path, &self.args.arg_secretstore_path); + let ui_path = replace_home(&data_path, &self.args.arg_ui_path); if self.args.flag_geth && !cfg!(windows) { let geth_root = if self.chain() == "testnet".to_owned() { path::ethereum::test() } else { path::ethereum::default() }; @@ -974,8 +982,8 @@ impl Configuration { } else { parity_ipc_path( &self.directories().base, - &self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone()), - self.args.flag_ports_shift, + &self.args.arg_ipcpath.clone().unwrap_or(self.args.arg_ipc_path.clone()), + self.args.arg_ports_shift, ) } } @@ -993,32 +1001,32 @@ impl Configuration { } fn ui_interface(&self) -> String { - self.interface(&self.args.flag_ui_interface) + self.interface(&self.args.arg_ui_interface) } fn rpc_interface(&self) -> String { - let rpc_interface = self.args.flag_rpcaddr.clone().unwrap_or(self.args.flag_jsonrpc_interface.clone()); + let rpc_interface = self.args.arg_rpcaddr.clone().unwrap_or(self.args.arg_jsonrpc_interface.clone()); self.interface(&rpc_interface) } fn ws_interface(&self) -> String { - self.interface(&self.args.flag_ws_interface) + self.interface(&self.args.arg_ws_interface) } fn ipfs_interface(&self) -> String { - self.interface(&self.args.flag_ipfs_api_interface) + self.interface(&self.args.arg_ipfs_api_interface) } fn secretstore_interface(&self) -> String { - self.interface(&self.args.flag_secretstore_interface) + self.interface(&self.args.arg_secretstore_interface) } fn secretstore_http_interface(&self) -> String { - self.interface(&self.args.flag_secretstore_http_interface) + self.interface(&self.args.arg_secretstore_http_interface) } fn secretstore_self_secret(&self) -> Result, String> { - match self.args.flag_secretstore_secret { + match self.args.arg_secretstore_secret { Some(ref s) if s.len() == 64 => Ok(Some(NodeSecretKey::Plain(s.parse() .map_err(|e| format!("Invalid secret store secret: {}. Error: {:?}", s, e))?))), Some(ref s) if s.len() == 40 => Ok(Some(NodeSecretKey::KeyStore(s.parse() @@ -1030,7 +1038,7 @@ impl Configuration { fn secretstore_nodes(&self) -> Result, String> { let mut nodes = BTreeMap::new(); - for node in self.args.flag_secretstore_nodes.split(',').filter(|n| n != &"") { + for node in self.args.arg_secretstore_nodes.split(',').filter(|n| n != &"") { let public_and_addr: Vec<_> = node.split('@').collect(); if public_and_addr.len() != 2 { return Err(format!("Invalid secret store node: {}", node)); @@ -1053,7 +1061,7 @@ impl Configuration { } fn stratum_interface(&self) -> String { - self.interface(&self.args.flag_stratum_interface) + self.interface(&self.args.arg_stratum_interface) } fn rpc_enabled(&self) -> bool { @@ -1085,7 +1093,7 @@ impl Configuration { return true; } - let ui_disabled = self.args.flag_unlock.is_some() || + let ui_disabled = self.args.arg_unlock.is_some() || self.args.flag_geth || self.args.flag_no_ui; @@ -1095,7 +1103,7 @@ impl Configuration { fn verifier_settings(&self) -> VerifierSettings { let mut settings = VerifierSettings::default(); settings.scale_verifiers = self.args.flag_scale_verifiers; - if let Some(num_verifiers) = self.args.flag_num_verifiers { + if let Some(num_verifiers) = self.args.arg_num_verifiers { settings.num_verifiers = num_verifiers; } @@ -1105,7 +1113,7 @@ impl Configuration { fn whisper_config(&self) -> ::whisper::Config { ::whisper::Config { enabled: self.args.flag_whisper, - target_message_pool_size: self.args.flag_whisper_pool_size * 1024 * 1024, + target_message_pool_size: self.args.arg_whisper_pool_size * 1024 * 1024, } } } @@ -1401,7 +1409,7 @@ mod tests { let conf3 = parse(&["parity", "--tx-queue-strategy", "gas"]); // then - let min_period = conf0.args.flag_reseal_min_period; + let min_period = conf0.args.arg_reseal_min_period; assert_eq!(conf0.miner_options(min_period).unwrap(), mining_options); mining_options.tx_queue_strategy = PrioritizationStrategy::GasFactorAndGasPrice; assert_eq!(conf1.miner_options(min_period).unwrap(), mining_options); @@ -1560,10 +1568,10 @@ mod tests { // given // when - let conf0 = parse(&["parity", "--ui-path", "signer"]); - let conf1 = parse(&["parity", "--ui-path", "signer", "--ui-no-validation"]); - let conf2 = parse(&["parity", "--ui-path", "signer", "--ui-port", "3123"]); - let conf3 = parse(&["parity", "--ui-path", "signer", "--ui-interface", "test"]); + let conf0 = parse(&["parity", "--ui-path=signer"]); + let conf1 = parse(&["parity", "--ui-path=signer", "--ui-no-validation"]); + let conf2 = parse(&["parity", "--ui-path=signer", "--ui-port", "3123"]); + let conf3 = parse(&["parity", "--ui-path=signer", "--ui-interface", "test"]); // then assert_eq!(conf0.directories().signer, "signer".to_owned()); diff --git a/parity/deprecated.rs b/parity/deprecated.rs index 820181efa..d80ea3357 100644 --- a/parity/deprecated.rs +++ b/parity/deprecated.rs @@ -65,40 +65,40 @@ pub fn find_deprecated(args: &Args) -> Vec { result.push(Deprecated::Replaced("--ipc-off", "--no-ipc")); } - if args.flag_etherbase.is_some() { + if args.arg_etherbase.is_some() { result.push(Deprecated::Replaced("--etherbase", "--author")); } - if args.flag_extradata.is_some() { + if args.arg_extradata.is_some() { result.push(Deprecated::Replaced("--extradata", "--extra-data")); } // Removed in 1.7 - if args.flag_dapps_port.is_some() { + if args.arg_dapps_port.is_some() { result.push(Deprecated::Replaced("--dapps-port", "--jsonrpc-port")); } - if args.flag_dapps_interface.is_some() { + if args.arg_dapps_interface.is_some() { result.push(Deprecated::Replaced("--dapps-interface", "--jsonrpc-interface")); } - if args.flag_dapps_hosts.is_some() { + if args.arg_dapps_hosts.is_some() { result.push(Deprecated::Replaced("--dapps-hosts", "--jsonrpc-hosts")); } - if args.flag_dapps_cors.is_some() { + if args.arg_dapps_cors.is_some() { result.push(Deprecated::Replaced("--dapps-cors", "--jsonrpc-cors")); } - if args.flag_dapps_user.is_some() { + if args.arg_dapps_user.is_some() { result.push(Deprecated::Removed("--dapps-user")); } - if args.flag_dapps_pass.is_some() { + if args.arg_dapps_pass.is_some() { result.push(Deprecated::Removed("--dapps-pass")); } - if args.flag_dapps_apis_all.is_some() { + if args.flag_dapps_apis_all { result.push(Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis")); } @@ -124,15 +124,15 @@ mod tests { args.flag_dapps_off = true; args.flag_ipcdisable = true; args.flag_ipc_off = true; - args.flag_etherbase = Some(Default::default()); - args.flag_extradata = Some(Default::default()); - args.flag_dapps_port = Some(Default::default()); - args.flag_dapps_interface = Some(Default::default()); - args.flag_dapps_hosts = Some(Default::default()); - args.flag_dapps_cors = Some(Default::default()); - args.flag_dapps_user = Some(Default::default()); - args.flag_dapps_pass = Some(Default::default()); - args.flag_dapps_apis_all = Some(Default::default()); + args.arg_etherbase = Some(Default::default()); + args.arg_extradata = Some(Default::default()); + args.arg_dapps_port = Some(Default::default()); + args.arg_dapps_interface = Some(Default::default()); + args.arg_dapps_hosts = Some(Default::default()); + args.arg_dapps_cors = Some(Default::default()); + args.arg_dapps_user = Some(Default::default()); + args.arg_dapps_pass = Some(Default::default()); + args.flag_dapps_apis_all = true; args }), vec![ Deprecated::DoesNothing("--jsonrpc"), diff --git a/parity/dir.rs b/parity/dir.rs index d254886b9..4046e48a6 100644 --- a/parity/dir.rs +++ b/parity/dir.rs @@ -16,7 +16,7 @@ use std::fs; use std::path::{PathBuf, Path}; -use util::{H64, H256}; +use bigint::hash::{H64, H256}; use util::journaldb::Algorithm; use helpers::{replace_home, replace_home_and_local}; use app_dirs::{AppInfo, get_app_root, AppDataType}; diff --git a/parity/helpers.rs b/parity/helpers.rs index 2a1b3156d..9bcaf897e 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -18,7 +18,9 @@ use std::{io, env}; use std::io::{Write, BufReader, BufRead}; use std::time::Duration; use std::fs::File; -use util::{clean_0x, U256, Address, CompactionProfile}; +use bigint::prelude::U256; +use bigint::hash::clean_0x; +use util::{Address, CompactionProfile}; use util::journaldb::Algorithm; use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType}; use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy}; @@ -341,7 +343,7 @@ mod tests { use std::fs::File; use std::io::Write; use devtools::RandomTempPath; - use util::{U256}; + use bigint::prelude::U256; use ethcore::client::{Mode, BlockId}; use ethcore::miner::PendingSet; use super::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_address, to_addresses, to_price, geth_ipc_path, to_bootnodes, password_from_file}; diff --git a/parity/informant.rs b/parity/informant.rs index 1935ec9b6..2c356a039 100644 --- a/parity/informant.rs +++ b/parity/informant.rs @@ -16,7 +16,7 @@ extern crate ansi_term; use self::ansi_term::Colour::{White, Yellow, Green, Cyan, Blue}; -use self::ansi_term::Style; +use self::ansi_term::{Colour, Style}; use std::sync::{Arc}; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; @@ -35,7 +35,9 @@ use light::client::LightChainClient; use number_prefix::{binary_prefix, Standalone, Prefixed}; use parity_rpc::{is_major_importing}; use parity_rpc::informant::RpcStats; -use util::{RwLock, Mutex, H256, Colour, Bytes}; +use bigint::hash::H256; +use util::Bytes; +use parking_lot::{RwLock, Mutex}; /// Format byte counts to standard denominations. pub fn format_bytes(b: usize) -> String { diff --git a/parity/light_helpers/queue_cull.rs b/parity/light_helpers/queue_cull.rs index 982e69e48..eaf0ca9c5 100644 --- a/parity/light_helpers/queue_cull.rs +++ b/parity/light_helpers/queue_cull.rs @@ -31,7 +31,7 @@ use futures::{future, Future}; use parity_reactor::Remote; -use util::RwLock; +use parking_lot::RwLock; // Attepmt to cull once every 10 minutes. const TOKEN: TimerToken = 1; diff --git a/parity/main.rs b/parity/main.rs index d79fb0064..1f065d838 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -26,6 +26,8 @@ extern crate ansi_term; extern crate app_dirs; extern crate ctrlc; extern crate docopt; +#[macro_use] +extern crate clap; extern crate env_logger; extern crate fdlimit; extern crate futures; @@ -34,6 +36,7 @@ extern crate isatty; extern crate jsonrpc_core; extern crate num_cpus; extern crate number_prefix; +extern crate parking_lot; extern crate regex; extern crate rlp; extern crate rpassword; @@ -55,6 +58,7 @@ extern crate ethcore_ipc_nano as nanoipc; extern crate ethcore_light as light; extern crate ethcore_logger; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate ethcore_network as network; extern crate ethkey; extern crate ethsync; @@ -70,6 +74,7 @@ extern crate parity_whisper; extern crate path; extern crate rpc_cli; extern crate node_filter; +extern crate hash; #[macro_use] extern crate log as rlog; @@ -130,7 +135,7 @@ use std::collections::HashMap; use std::io::{self as stdio, BufReader, Read, Write}; use std::fs::{remove_file, metadata, File, create_dir_all}; use std::path::PathBuf; -use util::sha3::sha3; +use hash::keccak_buffer; use cli::Args; use configuration::{Cmd, Execute, Configuration}; use deprecated::find_deprecated; @@ -140,7 +145,7 @@ use dir::default_hypervisor_path; fn print_hash_of(maybe_file: Option) -> Result { if let Some(file) = maybe_file { let mut f = BufReader::new(File::open(&file).map_err(|_| "Unable to open file".to_owned())?); - let hash = sha3(&mut f).map_err(|_| "Unable to read from file".to_owned())?; + let hash = keccak_buffer(&mut f).map_err(|_| "Unable to read from file".to_owned())?; Ok(hash.hex()) } else { Err("Streaming from standard input not yet supported. Specify a file.".to_owned()) diff --git a/parity/params.rs b/parity/params.rs index 3054db48f..fda57f99a 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -16,7 +16,8 @@ use std::{str, fs, fmt, path}; use std::time::Duration; -use util::{Address, U256, version_data}; +use bigint::prelude::U256; +use util::{Address, version_data}; use util::journaldb::Algorithm; use ethcore::spec::Spec; use ethcore::ethereum; diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 7948d9b20..d3171f381 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -38,7 +38,7 @@ use parity_rpc::dispatch::{FullDispatcher, LightDispatcher}; use parity_rpc::informant::{ActivityNotifier, ClientNotifier}; use parity_rpc::{Metadata, NetworkSettings}; use updater::Updater; -use util::{Mutex, RwLock}; +use parking_lot::{Mutex, RwLock}; #[derive(Debug, PartialEq, Clone, Eq, Hash)] pub enum Api { diff --git a/parity/run.rs b/parity/run.rs index a81d61ba0..c9c1283ca 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -37,7 +37,9 @@ use node_health; use parity_reactor::EventLoop; use parity_rpc::{NetworkSettings, informant, is_major_importing}; use updater::{UpdatePolicy, Updater}; -use util::{Colour, version, Mutex, Condvar}; +use ansi_term::Colour; +use util::version; +use parking_lot::{Condvar, Mutex}; use node_filter::NodeFilter; use params::{ @@ -170,7 +172,7 @@ impl ::local_store::NodeInfo for FullNodeInfo { fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result<(bool, Option), String> { use light::client as light_client; use ethsync::{LightSyncParams, LightSync, ManageNetwork}; - use util::RwLock; + use parking_lot::{Mutex, RwLock}; // load spec let spec = cmd.spec.spec(&cmd.dirs.cache)?; @@ -205,7 +207,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> // TODO: configurable cache size. let cache = LightDataCache::new(Default::default(), ::time::Duration::minutes(GAS_CORPUS_EXPIRATION_MINUTES)); - let cache = Arc::new(::util::Mutex::new(cache)); + let cache = Arc::new(Mutex::new(cache)); // start client and create transaction queue. let mut config = light_client::Config { diff --git a/parity/snapshot.rs b/parity/snapshot.rs index dc786a2ea..34aa3252f 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -20,6 +20,7 @@ use std::time::Duration; use std::path::{Path, PathBuf}; use std::sync::Arc; +use hash::keccak; use ethcore::snapshot::{Progress, RestorationStatus, SnapshotService as SS}; use ethcore::snapshot::io::{SnapshotReader, PackedReader, PackedWriter}; use ethcore::snapshot::service::Service as SnapshotService; @@ -65,8 +66,6 @@ pub struct SnapshotCommand { // helper for reading chunks from arbitrary reader and feeding them into the // service. fn restore_using(snapshot: Arc, reader: &R, recover: bool) -> Result<(), String> { - use util::sha3::Hashable; - let manifest = reader.manifest(); info!("Restoring to block #{} (0x{:?})", manifest.block_number, manifest.block_hash); @@ -95,7 +94,7 @@ fn restore_using(snapshot: Arc, reader: &R, let chunk = reader.chunk(state_hash) .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e))?; - let hash = chunk.sha3(); + let hash = keccak(&chunk); if hash != state_hash { return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", state_hash, hash)); } @@ -112,7 +111,7 @@ fn restore_using(snapshot: Arc, reader: &R, let chunk = reader.chunk(block_hash) .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e))?; - let hash = chunk.sha3(); + let hash = keccak(&chunk); if hash != block_hash { return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", block_hash, hash)); } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index a5a371cdf..c213af8df 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -8,12 +8,14 @@ authors = ["Parity Technologies "] [lib] [dependencies] +ansi_term = "0.9" cid = "0.2" futures = "0.1" futures-cpupool = "0.1" log = "0.3" multihash ="0.6" order-stat = "0.1" +parking_lot = "0.4" rand = "0.3" rust-crypto = "0.2" rustc-hex = "1.0" @@ -37,6 +39,7 @@ jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = " ethcore-io = { path = "../util/io" } ethcore-ipc = { path = "../ipc/rpc" } ethcore-util = { path = "../util" } +ethcore-bigint = { path = "../util/bigint" } ethcore = { path = "../ethcore" } ethcrypto = { path = "../ethcrypto" } ethkey = { path = "../ethkey" } @@ -54,6 +57,7 @@ parity-updater = { path = "../updater" } rlp = { path = "../util/rlp" } stats = { path = "../util/stats" } vm = { path = "../ethcore/vm" } +hash = { path = "../util/hash" } clippy = { version = "0.0.103", optional = true} pretty_assertions = "0.1" diff --git a/rpc/src/authcodes.rs b/rpc/src/authcodes.rs index 4427eda78..c8a0731e3 100644 --- a/rpc/src/authcodes.rs +++ b/rpc/src/authcodes.rs @@ -21,7 +21,8 @@ use std::{fs, time, mem}; use itertools::Itertools; use rand::Rng; use rand::os::OsRng; -use util::{H256, Hashable}; +use hash::keccak; +use bigint::hash::H256; /// Providing current time in seconds pub trait TimeProvider { @@ -162,7 +163,7 @@ impl AuthCodes { return false; } - let as_token = |code| format!("{}:{}", code, time).sha3(); + let as_token = |code| keccak(format!("{}:{}", code, time)); // Check if it's the initial token. if self.is_empty() { @@ -231,12 +232,13 @@ mod tests { use std::io::{Read, Write}; use std::{time, fs}; use std::cell::Cell; + use hash::keccak; - use util::{H256, Hashable}; + use bigint::hash::H256; use super::*; fn generate_hash(val: &str, time: u64) -> H256 { - format!("{}:{}", val, time).sha3() + keccak(format!("{}:{}", val, time)) } #[test] diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index c429471f3..821b4cfd8 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -20,6 +20,7 @@ #![cfg_attr(feature="dev", feature(plugin))] #![cfg_attr(feature="dev", plugin(clippy))] +extern crate ansi_term; extern crate cid; extern crate crypto as rust_crypto; extern crate futures; @@ -27,6 +28,7 @@ extern crate futures_cpupool; extern crate itertools; extern crate multihash; extern crate order_stat; +extern crate parking_lot; extern crate rand; extern crate rustc_hex; extern crate semver; @@ -44,6 +46,7 @@ extern crate jsonrpc_pubsub; extern crate ethash; extern crate ethcore; +extern crate ethcore_bigint as bigint; extern crate ethcore_devtools as devtools; extern crate ethcore_io as io; extern crate ethcore_ipc; @@ -60,6 +63,7 @@ extern crate parity_reactor; extern crate parity_updater as updater; extern crate rlp; extern crate stats; +extern crate hash; #[macro_use] extern crate log; diff --git a/rpc/src/tests/ws.rs b/rpc/src/tests/ws.rs index 77fb1ea2c..6d4874845 100644 --- a/rpc/src/tests/ws.rs +++ b/rpc/src/tests/ws.rs @@ -56,7 +56,7 @@ pub fn request(server: Server, request: &str) -> http_client::Respon #[cfg(test)] mod testing { use std::time; - use util::Hashable; + use hash::keccak; use devtools::http_client; use super::{serve, request}; @@ -125,7 +125,7 @@ mod testing { {{}} ", port, - format!("{}:{}", code, timestamp).sha3(), + keccak(format!("{}:{}", code, timestamp)), timestamp, ) ); @@ -155,7 +155,7 @@ mod testing { {{}} ", port, - format!("{}:{}", code, timestamp).sha3(), + keccak(format!("{}:{}", code, timestamp)), timestamp, ) ); @@ -171,7 +171,7 @@ mod testing { {{}} ", port, - format!("{}:{}", code, timestamp).sha3(), + keccak(format!("{}:{}", code, timestamp)), timestamp, ) ); diff --git a/rpc/src/v1/extractors.rs b/rpc/src/v1/extractors.rs index ab1fad4da..7685b110e 100644 --- a/rpc/src/v1/extractors.rs +++ b/rpc/src/v1/extractors.rs @@ -25,7 +25,7 @@ use ipc; use jsonrpc_core as core; use jsonrpc_pubsub::Session; use ws; -use util::H256; +use bigint::hash::H256; use v1::{Metadata, Origin}; use v1::informant::RpcStats; diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index ab8dd655a..370c909d9 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -26,8 +26,11 @@ use light::client::LightChainClient; use light::on_demand::{request, OnDemand}; use light::TransactionQueue as LightTransactionQueue; use rlp; -use util::{Address, H520, H256, U256, Bytes, Mutex, RwLock}; -use util::sha3::Hashable; +use hash::keccak; +use bigint::prelude::U256; +use bigint::hash::{H256, H520}; +use util::{Address, Bytes}; +use parking_lot::{Mutex, RwLock}; use stats::Corpus; use ethkey::Signature; @@ -226,7 +229,7 @@ pub fn eth_data_hash(mut data: Bytes) -> H256 { format!("\x19Ethereum Signed Message:\n{}", data.len()) .into_bytes(); message_data.append(&mut data); - message_data.sha3() + keccak(message_data) } /// Dispatcher for light clients -- fetches default gas price, next nonce, etc. from network. diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index 559f3a6b9..23cbbfefc 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -315,6 +315,7 @@ pub fn transaction_message(error: TransactionError) -> String { SenderBanned => "Sender is banned in local queue.".into(), RecipientBanned => "Recipient is banned in local queue.".into(), CodeBanned => "Code is banned in local queue.".into(), + NotAllowed => "Transaction is not permitted.".into(), } } diff --git a/rpc/src/v1/helpers/fake_sign.rs b/rpc/src/v1/helpers/fake_sign.rs index 2bbaef0ee..02259a9db 100644 --- a/rpc/src/v1/helpers/fake_sign.rs +++ b/rpc/src/v1/helpers/fake_sign.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use ethcore::client::MiningBlockChainClient; use ethcore::miner::MinerService; use ethcore::transaction::{Transaction, SignedTransaction, Action}; -use util::U256; +use bigint::prelude::U256; use jsonrpc_core::Error; use v1::helpers::CallRequest; diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 972315272..8211ffac0 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -37,7 +37,9 @@ use light::on_demand::{request, OnDemand, HeaderRef, Request as OnDemandRequest, use light::request::Field; use ethsync::LightSync; -use util::{Address, Mutex, U256}; +use bigint::prelude::U256; +use util::Address; +use parking_lot::Mutex; use v1::helpers::{CallRequest as CallRequestHelper, errors, dispatch}; use v1::types::{BlockNumber, CallRequest, Log}; diff --git a/rpc/src/v1/helpers/poll_filter.rs b/rpc/src/v1/helpers/poll_filter.rs index faae75c98..ba769ee4e 100644 --- a/rpc/src/v1/helpers/poll_filter.rs +++ b/rpc/src/v1/helpers/poll_filter.rs @@ -1,7 +1,7 @@ //! Helper type with all filter state data. use std::collections::HashSet; -use util::hash::H256; +use bigint::hash::H256; use v1::types::{Filter, Log}; pub type BlockNumber = u64; diff --git a/rpc/src/v1/helpers/requests.rs b/rpc/src/v1/helpers/requests.rs index aa3a4c3d4..8c5307f5f 100644 --- a/rpc/src/v1/helpers/requests.rs +++ b/rpc/src/v1/helpers/requests.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::{Address, U256, Bytes}; +use bigint::prelude::U256; +use util::{Address, Bytes}; use v1::types::{Origin, TransactionCondition}; /// Transaction request coming from RPC diff --git a/rpc/src/v1/helpers/signer.rs b/rpc/src/v1/helpers/signer.rs index 81d21eb82..6d9606f87 100644 --- a/rpc/src/v1/helpers/signer.rs +++ b/rpc/src/v1/helpers/signer.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use std::ops::Deref; use http::Origin; -use util::Mutex; +use parking_lot::Mutex; use transient_hashmap::TransientHashMap; use ethstore::random_string; diff --git a/rpc/src/v1/helpers/signing_queue.rs b/rpc/src/v1/helpers/signing_queue.rs index e9e6313f4..dfe198e4a 100644 --- a/rpc/src/v1/helpers/signing_queue.rs +++ b/rpc/src/v1/helpers/signing_queue.rs @@ -19,7 +19,9 @@ use std::cell::RefCell; use std::sync::Arc; use std::collections::BTreeMap; use jsonrpc_core; -use util::{Mutex, RwLock, U256, Address}; +use bigint::prelude::U256; +use util::Address; +use parking_lot::{Mutex, RwLock}; use ethcore::account_provider::DappId; use v1::helpers::{ConfirmationRequest, ConfirmationPayload}; use v1::types::{ConfirmationResponse, H160 as RpcH160, Origin, DappId as RpcDappId}; @@ -297,7 +299,9 @@ mod test { use std::time::Duration; use std::thread; use std::sync::{mpsc, Arc}; - use util::{Address, U256, Mutex}; + use bigint::prelude::U256; + use util::Address; + use parking_lot::Mutex; use v1::helpers::{SigningQueue, ConfirmationsQueue, QueueEvent, FilledTransactionRequest, ConfirmationPayload}; use v1::types::ConfirmationResponse; diff --git a/rpc/src/v1/helpers/subscription_manager.rs b/rpc/src/v1/helpers/subscription_manager.rs index c5e4216f4..f529d4810 100644 --- a/rpc/src/v1/helpers/subscription_manager.rs +++ b/rpc/src/v1/helpers/subscription_manager.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use std::sync::atomic::{self, AtomicBool}; -use util::Mutex; +use parking_lot::Mutex; use jsonrpc_core::futures::future::{self, Either}; use jsonrpc_core::futures::sync::mpsc; diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 0cf70bf23..29d18188c 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -23,9 +23,10 @@ use std::sync::Arc; use futures::{self, future, BoxFuture, Future}; use rlp::{self, UntrustedRlp}; use time::get_time; -use util::{H160, H256, Address, U256, H64}; -use util::sha3::Hashable; -use util::Mutex; +use bigint::prelude::U256; +use bigint::hash::{H64, H160, H256}; +use util::Address; +use parking_lot::Mutex; use ethash::SeedHashCompute; use ethcore::account_provider::{AccountProvider, DappId}; @@ -149,7 +150,7 @@ impl EthClient where let view = block.header_view(); Ok(Some(RichBlock { inner: Block { - hash: Some(view.sha3().into()), + hash: Some(view.hash().into()), size: Some(block.rlp().as_raw().len().into()), parent_hash: view.parent_hash().into(), uncles_hash: view.uncles_hash().into(), diff --git a/rpc/src/v1/impls/eth_filter.rs b/rpc/src/v1/impls/eth_filter.rs index 8f448feb5..d8fccc39c 100644 --- a/rpc/src/v1/impls/eth_filter.rs +++ b/rpc/src/v1/impls/eth_filter.rs @@ -23,7 +23,8 @@ use jsonrpc_core::*; use ethcore::miner::MinerService; use ethcore::filter::Filter as EthcoreFilter; use ethcore::client::{BlockChainClient, BlockId}; -use util::{H256, Mutex}; +use bigint::hash::H256; +use parking_lot::Mutex; use futures::{future, Future, BoxFuture}; diff --git a/rpc/src/v1/impls/eth_pubsub.rs b/rpc/src/v1/impls/eth_pubsub.rs index f918a44ea..f279e2f81 100644 --- a/rpc/src/v1/impls/eth_pubsub.rs +++ b/rpc/src/v1/impls/eth_pubsub.rs @@ -39,7 +39,9 @@ use light::cache::Cache; use light::on_demand::OnDemand; use light::client::{LightChainClient, LightChainNotify}; use parity_reactor::Remote; -use util::{RwLock, Mutex, H256, Bytes}; +use bigint::hash::H256; +use util::Bytes; +use parking_lot::{RwLock, Mutex}; type Client = Sink; diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index d33b5a06f..45c55346b 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -38,8 +38,9 @@ use ethcore::filter::Filter as EthcoreFilter; use ethcore::transaction::{Action, SignedTransaction, Transaction as EthTransaction}; use ethsync::LightSync; use rlp::UntrustedRlp; -use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; -use util::{RwLock, Mutex, U256}; +use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP}; +use bigint::prelude::U256; +use parking_lot::{RwLock, Mutex}; use futures::{future, Future, BoxFuture, IntoFuture}; use futures::sync::oneshot; @@ -295,7 +296,7 @@ impl Eth for EthClient { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); self.fetcher().header(BlockId::Hash(hash.into())).and_then(move |hdr| { - if hdr.transactions_root() == SHA3_NULL_RLP { + if hdr.transactions_root() == KECCAK_NULL_RLP { future::ok(Some(U256::from(0).into())).boxed() } else { sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) @@ -311,7 +312,7 @@ impl Eth for EthClient { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); self.fetcher().header(num.into()).and_then(move |hdr| { - if hdr.transactions_root() == SHA3_NULL_RLP { + if hdr.transactions_root() == KECCAK_NULL_RLP { future::ok(Some(U256::from(0).into())).boxed() } else { sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) @@ -327,7 +328,7 @@ impl Eth for EthClient { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); self.fetcher().header(BlockId::Hash(hash.into())).and_then(move |hdr| { - if hdr.uncles_hash() == SHA3_EMPTY_LIST_RLP { + if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP { future::ok(Some(U256::from(0).into())).boxed() } else { sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) @@ -343,7 +344,7 @@ impl Eth for EthClient { let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); self.fetcher().header(num.into()).and_then(move |hdr| { - if hdr.uncles_hash() == SHA3_EMPTY_LIST_RLP { + if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP { future::ok(Some(U256::from(0).into())).boxed() } else { sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) @@ -472,7 +473,7 @@ impl Filterable for EthClient { self.client.block_hash(id).map(Into::into) } - fn pending_transactions_hashes(&self, _block_number: u64) -> Vec<::util::H256> { + fn pending_transactions_hashes(&self, _block_number: u64) -> Vec<::bigint::hash::H256> { Vec::new() } diff --git a/rpc/src/v1/impls/light/parity_set.rs b/rpc/src/v1/impls/light/parity_set.rs index b97327168..bac6556e3 100644 --- a/rpc/src/v1/impls/light/parity_set.rs +++ b/rpc/src/v1/impls/light/parity_set.rs @@ -23,7 +23,7 @@ use std::sync::Arc; use ethsync::ManageNetwork; use fetch::Fetch; use futures::{BoxFuture, Future}; -use util::sha3; +use hash::keccak_buffer; use jsonrpc_core::Error; use v1::helpers::dapps::DappsService; @@ -129,7 +129,7 @@ impl ParitySet for ParitySetClient { result .map_err(errors::fetch) .and_then(|response| { - sha3(&mut io::BufReader::new(response)).map_err(errors::fetch) + keccak_buffer(&mut io::BufReader::new(response)).map_err(errors::fetch) }) .map(Into::into) })) diff --git a/rpc/src/v1/impls/parity_set.rs b/rpc/src/v1/impls/parity_set.rs index 540964e63..38890d6d1 100644 --- a/rpc/src/v1/impls/parity_set.rs +++ b/rpc/src/v1/impls/parity_set.rs @@ -24,7 +24,7 @@ use ethcore::mode::Mode; use ethsync::ManageNetwork; use fetch::{self, Fetch}; use futures::{BoxFuture, Future}; -use util::sha3; +use hash::keccak_buffer; use updater::{Service as UpdateService}; use jsonrpc_core::Error; @@ -170,7 +170,7 @@ impl ParitySet for ParitySetClient where result .map_err(errors::fetch) .and_then(|response| { - sha3(&mut io::BufReader::new(response)).map_err(errors::fetch) + keccak_buffer(&mut io::BufReader::new(response)).map_err(errors::fetch) }) .map(Into::into) })) diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index 302190fd4..b606d3e58 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -20,7 +20,8 @@ use std::sync::Arc; use ethcore::account_provider::AccountProvider; use ethcore::transaction::PendingTransaction; -use util::{Address, U128, ToPretty}; +use bigint::prelude::U128; +use util::{Address, ToPretty}; use futures::{future, Future, BoxFuture}; use jsonrpc_core::Error; diff --git a/rpc/src/v1/impls/pubsub.rs b/rpc/src/v1/impls/pubsub.rs index 616905f4a..44b9fcbeb 100644 --- a/rpc/src/v1/impls/pubsub.rs +++ b/rpc/src/v1/impls/pubsub.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use std::time::Duration; -use util::RwLock; +use parking_lot::RwLock; use futures::{self, BoxFuture, Future, Stream, Sink}; use jsonrpc_core::{self as core, Error, MetaIoHandler}; diff --git a/rpc/src/v1/impls/signer.rs b/rpc/src/v1/impls/signer.rs index 79f4f0e38..a01b66364 100644 --- a/rpc/src/v1/impls/signer.rs +++ b/rpc/src/v1/impls/signer.rs @@ -24,7 +24,7 @@ use ethkey; use futures::{future, BoxFuture, Future, IntoFuture}; use parity_reactor::Remote; use rlp::UntrustedRlp; -use util::Mutex; +use parking_lot::Mutex; use jsonrpc_core::{futures, Error}; use jsonrpc_pubsub::SubscriptionId; diff --git a/rpc/src/v1/impls/signing.rs b/rpc/src/v1/impls/signing.rs index 23ce3c78a..725580352 100644 --- a/rpc/src/v1/impls/signing.rs +++ b/rpc/src/v1/impls/signing.rs @@ -18,7 +18,8 @@ use std::sync::Arc; use transient_hashmap::TransientHashMap; -use util::{U256, Mutex}; +use bigint::prelude::U256; +use parking_lot::Mutex; use ethcore::account_provider::AccountProvider; diff --git a/rpc/src/v1/impls/web3.rs b/rpc/src/v1/impls/web3.rs index 859c44567..d0be2db81 100644 --- a/rpc/src/v1/impls/web3.rs +++ b/rpc/src/v1/impls/web3.rs @@ -15,11 +15,11 @@ // along with Parity. If not, see . //! Web3 rpc implementation. +use hash::keccak; use jsonrpc_core::*; use util::version; use v1::traits::Web3; use v1::types::{H256, Bytes}; -use util::sha3::Hashable; /// Web3 rpc implementation. pub struct Web3Client; @@ -35,6 +35,6 @@ impl Web3 for Web3Client { } fn sha3(&self, data: Bytes) -> Result { - Ok(data.0.sha3().into()) + Ok(keccak(&data.0).into()) } } diff --git a/rpc/src/v1/informant.rs b/rpc/src/v1/informant.rs index 160f0ea9f..0cbe7d449 100644 --- a/rpc/src/v1/informant.rs +++ b/rpc/src/v1/informant.rs @@ -24,7 +24,7 @@ use futures::Future; use futures_cpupool as pool; use jsonrpc_core as rpc; use order_stat; -use util::RwLock; +use parking_lot::RwLock; pub use self::pool::CpuPool; diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index cbbfa8dc0..c7a580c58 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -29,7 +29,9 @@ use ethcore::miner::{MinerOptions, Banning, GasPricer, MinerService, ExternalMin use ethcore::account_provider::AccountProvider; use ethjson::blockchain::BlockChain; use io::IoChannel; -use util::{U256, H256, Address, Hashable}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::Address; use jsonrpc_core::IoHandler; use v1::impls::{EthClient, SigningUnsafeClient}; @@ -431,7 +433,7 @@ fn verify_transaction_counts(name: String, chain: BlockChain) { for b in chain.blocks_rlp().iter().filter(|b| Block::is_good(b)).map(|b| BlockView::new(b)) { let count = b.transactions_count(); - let hash = b.sha3(); + let hash = b.hash(); let number = b.header_view().number(); let (req, res) = by_hash(hash, count, &mut id); diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/rpc/src/v1/tests/helpers/miner_service.rs index 7139d636b..65a0c25b7 100644 --- a/rpc/src/v1/tests/helpers/miner_service.rs +++ b/rpc/src/v1/tests/helpers/miner_service.rs @@ -18,7 +18,10 @@ use std::collections::{BTreeMap, HashMap}; use std::collections::hash_map::Entry; -use util::{Address, H256, Bytes, U256, RwLock, Mutex}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::{Address, Bytes}; +use parking_lot::{RwLock, Mutex}; use ethcore::error::Error; use ethcore::client::MiningBlockChainClient; use ethcore::block::ClosedBlock; diff --git a/rpc/src/v1/tests/helpers/snapshot_service.rs b/rpc/src/v1/tests/helpers/snapshot_service.rs index cfb80619a..b81e05e7e 100644 --- a/rpc/src/v1/tests/helpers/snapshot_service.rs +++ b/rpc/src/v1/tests/helpers/snapshot_service.rs @@ -16,8 +16,9 @@ use ethcore::snapshot::{ManifestData, RestorationStatus, SnapshotService}; -use util::{Bytes, Mutex}; -use util::hash::H256; +use util::Bytes; +use bigint::hash::H256; +use parking_lot::Mutex; /// Mocked snapshot service (used for sync info extensions). pub struct TestSnapshotService { diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs index 83c7db015..97bfb9eec 100644 --- a/rpc/src/v1/tests/helpers/sync_provider.rs +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -17,7 +17,8 @@ //! Test implementation of SyncProvider. use std::collections::BTreeMap; -use util::{H256, RwLock}; +use bigint::hash::H256; +use parking_lot::RwLock; use ethsync::{SyncProvider, EthProtocolInfo, SyncStatus, SyncState, PeerInfo, TransactionStats}; /// TestSyncProvider config. diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index d1835948f..69804235b 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -22,7 +22,10 @@ use rustc_hex::{FromHex, ToHex}; use time::get_time; use rlp; -use util::{U256, Address, H256, Mutex}; +use bigint::prelude::U256; +use bigint::hash::H256; +use util::Address; +use parking_lot::Mutex; use ethkey::Secret; use ethcore::account_provider::AccountProvider; use ethcore::client::{TestBlockChainClient, EachBlockWith, Executed, TransactionId}; @@ -533,7 +536,7 @@ fn rpc_eth_transaction_count_by_number_pending() { #[test] fn rpc_eth_pending_transaction_by_hash() { - use util::H256; + use bigint::hash::H256; use rlp; use ethcore::transaction::SignedTransaction; diff --git a/rpc/src/v1/tests/mocked/parity.rs b/rpc/src/v1/tests/mocked/parity.rs index 3c98ea8ef..bb7765446 100644 --- a/rpc/src/v1/tests/mocked/parity.rs +++ b/rpc/src/v1/tests/mocked/parity.rs @@ -485,7 +485,8 @@ fn rpc_parity_local_transactions() { #[test] fn rpc_parity_chain_status() { - use util::{H256, U256}; + use bigint::prelude::U256; + use bigint::hash::H256; let deps = Dependencies::new(); let io = deps.default_client(); @@ -523,7 +524,7 @@ fn rpc_parity_cid() { #[test] fn rpc_parity_call() { - use util::U256; + use bigint::prelude::U256; let deps = Dependencies::new(); deps.client.set_execution_result(Ok(Executed { diff --git a/rpc/src/v1/tests/mocked/parity_set.rs b/rpc/src/v1/tests/mocked/parity_set.rs index 22e0388e2..ed27862ac 100644 --- a/rpc/src/v1/tests/mocked/parity_set.rs +++ b/rpc/src/v1/tests/mocked/parity_set.rs @@ -17,7 +17,8 @@ use std::sync::Arc; use std::str::FromStr; use rustc_hex::FromHex; -use util::{U256, Address}; +use bigint::prelude::U256; +use util::Address; use ethcore::miner::MinerService; use ethcore::client::TestBlockChainClient; diff --git a/rpc/src/v1/tests/mocked/personal.rs b/rpc/src/v1/tests/mocked/personal.rs index c7205d6ef..d34d734c0 100644 --- a/rpc/src/v1/tests/mocked/personal.rs +++ b/rpc/src/v1/tests/mocked/personal.rs @@ -21,7 +21,8 @@ use ethcore::account_provider::AccountProvider; use ethcore::client::TestBlockChainClient; use ethcore::transaction::{Action, Transaction}; use jsonrpc_core::IoHandler; -use util::{U256, Address}; +use bigint::prelude::U256; +use util::Address; use v1::{PersonalClient, Personal, Metadata}; use v1::helpers::dispatch::FullDispatcher; diff --git a/rpc/src/v1/tests/mocked/signer.rs b/rpc/src/v1/tests/mocked/signer.rs index 6827334de..55f2eae77 100644 --- a/rpc/src/v1/tests/mocked/signer.rs +++ b/rpc/src/v1/tests/mocked/signer.rs @@ -16,7 +16,8 @@ use std::sync::Arc; use std::str::FromStr; -use util::{U256, Address, ToPretty}; +use bigint::prelude::U256; +use util::{Address, ToPretty}; use ethcore::account_provider::AccountProvider; use ethcore::client::TestBlockChainClient; diff --git a/rpc/src/v1/tests/mocked/signing.rs b/rpc/src/v1/tests/mocked/signing.rs index 4c6951f1a..ee8787915 100644 --- a/rpc/src/v1/tests/mocked/signing.rs +++ b/rpc/src/v1/tests/mocked/signing.rs @@ -28,7 +28,8 @@ use v1::types::ConfirmationResponse; use v1::tests::helpers::TestMinerService; use v1::tests::mocked::parity; -use util::{Address, U256, ToPretty}; +use bigint::prelude::U256; +use util::{Address, ToPretty}; use ethkey::Secret; use ethcore::account_provider::AccountProvider; use ethcore::client::TestBlockChainClient; diff --git a/rpc/src/v1/types/confirmations.rs b/rpc/src/v1/types/confirmations.rs index bbc8b350e..39bf8d0a1 100644 --- a/rpc/src/v1/types/confirmations.rs +++ b/rpc/src/v1/types/confirmations.rs @@ -18,7 +18,7 @@ use std::fmt; use serde::{Serialize, Serializer}; -use util::Colour; +use ansi_term::Colour; use util::bytes::ToPretty; use v1::types::{U256, TransactionRequest, RichRawTransaction, H160, H256, H520, Bytes, TransactionCondition, Origin}; diff --git a/rpc/src/v1/types/filter.rs b/rpc/src/v1/types/filter.rs index 058d14d7e..f552212b0 100644 --- a/rpc/src/v1/types/filter.rs +++ b/rpc/src/v1/types/filter.rs @@ -124,7 +124,7 @@ impl Serialize for FilterChanges { mod tests { use serde_json; use std::str::FromStr; - use util::hash::H256; + use bigint::hash::H256; use super::{VariadicValue, Topic, Filter}; use v1::types::BlockNumber; use ethcore::filter::Filter as EthFilter; diff --git a/rpc/src/v1/types/hash.rs b/rpc/src/v1/types/hash.rs index 1816c96d5..bf381f3b6 100644 --- a/rpc/src/v1/types/hash.rs +++ b/rpc/src/v1/types/hash.rs @@ -20,7 +20,7 @@ use std::cmp::Ordering; use std::hash::{Hash, Hasher}; use serde; use rustc_hex::{ToHex, FromHex}; -use util::{H64 as Eth64, H160 as Eth160, H256 as Eth256, H520 as Eth520, H512 as Eth512, H2048 as Eth2048}; +use bigint::hash::{H64 as Eth64, H160 as Eth160, H256 as Eth256, H520 as Eth520, H512 as Eth512, H2048 as Eth2048}; macro_rules! impl_hash { ($name: ident, $other: ident, $size: expr) => { diff --git a/rpc/src/v1/types/histogram.rs b/rpc/src/v1/types/histogram.rs index 55d8ae835..2f7d210af 100644 --- a/rpc/src/v1/types/histogram.rs +++ b/rpc/src/v1/types/histogram.rs @@ -29,8 +29,8 @@ pub struct Histogram { pub counts: Vec, } -impl From<::stats::Histogram<::util::U256>> for Histogram { - fn from(h: ::stats::Histogram<::util::U256>) -> Self { +impl From<::stats::Histogram<::bigint::prelude::U256>> for Histogram { + fn from(h: ::stats::Histogram<::bigint::prelude::U256>) -> Self { Histogram { bucket_bounds: h.bucket_bounds.into_iter().map(Into::into).collect(), counts: h.counts diff --git a/rpc/src/v1/types/transaction_request.rs b/rpc/src/v1/types/transaction_request.rs index cb74e071b..7f1c16287 100644 --- a/rpc/src/v1/types/transaction_request.rs +++ b/rpc/src/v1/types/transaction_request.rs @@ -18,7 +18,7 @@ use v1::types::{Bytes, H160, U256, TransactionCondition}; use v1::helpers; -use util::Colour; +use ansi_term::Colour; use std::fmt; diff --git a/rpc/src/v1/types/uint.rs b/rpc/src/v1/types/uint.rs index e783aac1f..2de1ad46c 100644 --- a/rpc/src/v1/types/uint.rs +++ b/rpc/src/v1/types/uint.rs @@ -17,7 +17,7 @@ use std::str::FromStr; use std::fmt; use serde; -use util::{U256 as EthU256, U128 as EthU128}; +use bigint::prelude::{U256 as EthU256, U128 as EthU128}; macro_rules! impl_uint { ($name: ident, $other: ident, $size: expr) => { diff --git a/rpc_client/Cargo.toml b/rpc_client/Cargo.toml index 5cc688fb5..1f1840848 100644 --- a/rpc_client/Cargo.toml +++ b/rpc_client/Cargo.toml @@ -15,7 +15,9 @@ serde_json = "1.0" tempdir = "0.3.5" url = "1.2.0" matches = "0.1" +parking_lot = "0.4" jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } parity-rpc = { path = "../rpc" } ethcore-util = { path = "../util" } +hash = { path = "../util/hash" } diff --git a/rpc_client/src/client.rs b/rpc_client/src/client.rs index 3f55fbe73..27b72ab78 100644 --- a/rpc_client/src/client.rs +++ b/rpc_client/src/client.rs @@ -9,7 +9,8 @@ use std::thread; use std::time; use std::path::PathBuf; -use util::{Hashable, Mutex}; +use hash::keccak; +use parking_lot::Mutex; use url::Url; use std::fs::File; @@ -72,7 +73,7 @@ impl Handler for RpcHandler { WsError::new(WsErrorKind::Internal, format!("{}", err)) })?; let secs = timestamp.as_secs(); - let hashed = format!("{}:{}", self.auth_code, secs).sha3(); + let hashed = keccak(format!("{}:{}", self.auth_code, secs)); let proto = format!("{:?}_{}", hashed, secs); r.add_protocol(&proto); Ok(r) diff --git a/rpc_client/src/lib.rs b/rpc_client/src/lib.rs index d1967ccbd..7459ca9a4 100644 --- a/rpc_client/src/lib.rs +++ b/rpc_client/src/lib.rs @@ -6,11 +6,13 @@ extern crate futures; extern crate jsonrpc_core; extern crate jsonrpc_ws_server as ws; extern crate parity_rpc as rpc; +extern crate parking_lot; extern crate rand; extern crate serde; extern crate serde_json; extern crate tempdir; extern crate url; +extern crate hash; #[macro_use] extern crate log; diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index 19f342aa9..9143b289a 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -29,6 +29,8 @@ ethabi = "2.0" ethcore = { path = "../ethcore" } ethcore-devtools = { path = "../devtools" } ethcore-util = { path = "../util" } +ethcore-bigint = { path = "../util/bigint" } +hash = { path = "../util/hash" } ethcore-ipc = { path = "../ipc/rpc" } ethcore-ipc-nano = { path = "../ipc/nano" } ethcore-logger = { path = "../logger" } diff --git a/secret_store/src/acl_storage.rs b/secret_store/src/acl_storage.rs index 5b8284c6f..0a30a1a60 100644 --- a/secret_store/src/acl_storage.rs +++ b/secret_store/src/acl_storage.rs @@ -21,7 +21,8 @@ use parking_lot::{Mutex, RwLock}; use ethkey::public_to_address; use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; use native_contracts::SecretStoreAclStorage; -use util::{H256, Address, Bytes}; +use bigint::hash::H256; +use util::{Address, Bytes}; use types::all::{Error, ServerKeyId, Public}; const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker"; diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index f9fad19df..550983eb8 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -51,8 +51,8 @@ impl KeyServerImpl { }) } - #[cfg(test)] /// Get cluster client reference. + #[cfg(test)] pub fn cluster(&self) -> Arc { self.data.lock().cluster.clone() } @@ -201,7 +201,7 @@ pub mod tests { use node_key_pair::PlainNodeKeyPair; use key_server_set::tests::MapKeyServerSet; use key_server_cluster::math; - use util::H256; + use bigint::hash::H256; use types::all::{Error, Public, ClusterConfiguration, NodeAddress, RequestSignature, ServerKeyId, EncryptedDocumentKey, EncryptedDocumentKeyShadow, MessageHash, EncryptedMessageSignature}; use traits::{ServerKeyGenerator, DocumentKeyServer, MessageSigner, KeyServer}; diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index c267d1259..c85ee9d93 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -27,7 +27,7 @@ use tokio_io::IoFuture; use tokio_core::reactor::{Handle, Remote, Interval}; use tokio_core::net::{TcpListener, TcpStream}; use ethkey::{Public, KeyPair, Signature, Random, Generator}; -use util::H256; +use bigint::hash::H256; use key_server_cluster::{Error, NodeId, SessionId, AclStorage, KeyStorage, KeyServerSet, NodeKeyPair}; use key_server_cluster::cluster_sessions::{ClusterSession, ClusterSessions, GenerationSessionWrapper, EncryptionSessionWrapper, DecryptionSessionWrapper, SigningSessionWrapper}; @@ -72,14 +72,14 @@ pub trait ClusterClient: Send + Sync { /// Start new signing session. fn new_signing_session(&self, session_id: SessionId, requestor_signature: Signature, message_hash: H256) -> Result, Error>; - #[cfg(test)] /// Ask node to make 'faulty' generation sessions. + #[cfg(test)] fn make_faulty_generation_sessions(&self); - #[cfg(test)] /// Get active generation session with given id. - fn generation_session(&self, session_id: &SessionId) -> Option>; #[cfg(test)] + fn generation_session(&self, session_id: &SessionId) -> Option>; /// Try connect to disconnected nodes. + #[cfg(test)] fn connect(&self); } @@ -91,8 +91,8 @@ pub trait Cluster: Send + Sync { fn send(&self, to: &NodeId, message: Message) -> Result<(), Error>; } -#[derive(Clone)] /// Cluster initialization parameters. +#[derive(Clone)] pub struct ClusterConfiguration { /// Number of threads reserved by cluster. pub threads: usize, @@ -214,14 +214,14 @@ impl ClusterCore { Arc::new(ClusterClientImpl::new(self.data.clone())) } - #[cfg(test)] /// Get cluster configuration. + #[cfg(test)] pub fn config(&self) -> &ClusterConfiguration { &self.data.config } - #[cfg(test)] /// Get connection to given node. + #[cfg(test)] pub fn connection(&self, node: &NodeId) -> Option> { self.data.connection(node) } diff --git a/secret_store/src/key_server_cluster/decryption_session.rs b/secret_store/src/key_server_cluster/decryption_session.rs index bc3c6aad0..cc19fa0c5 100644 --- a/secret_store/src/key_server_cluster/decryption_session.rs +++ b/secret_store/src/key_server_cluster/decryption_session.rs @@ -170,26 +170,26 @@ impl SessionImpl { }) } - #[cfg(test)] /// Get this node id. + #[cfg(test)] pub fn node(&self) -> &NodeId { &self.core.meta.self_node_id } - #[cfg(test)] /// Get this session access key. + #[cfg(test)] pub fn access_key(&self) -> &Secret { &self.core.access_key } - #[cfg(test)] /// Get session state. + #[cfg(test)] pub fn state(&self) -> ConsensusSessionState { self.data.lock().consensus_session.state() } - #[cfg(test)] /// Get decrypted secret + #[cfg(test)] pub fn decrypted_secret(&self) -> Option> { self.data.lock().result.clone() } @@ -281,10 +281,13 @@ impl SessionImpl { return Ok(()); } - self.core.cluster.broadcast(Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(DecryptionSessionCompleted { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - })))?; + // send compeltion signal to all nodes, except for rejected nodes + for node in data.consensus_session.consensus_non_rejected_nodes() { + self.core.cluster.send(&node, Message::Decryption(DecryptionMessage::DecryptionSessionCompleted(DecryptionSessionCompleted { + session: self.core.meta.id.clone().into(), + sub_session: self.core.access_key.clone().into(), + })))?; + } data.result = Some(Ok(data.consensus_session.result()?)); self.core.completed.notify_all(); diff --git a/secret_store/src/key_server_cluster/encryption_session.rs b/secret_store/src/key_server_cluster/encryption_session.rs index b61594925..9a1b9e330 100644 --- a/secret_store/src/key_server_cluster/encryption_session.rs +++ b/secret_store/src/key_server_cluster/encryption_session.rs @@ -73,8 +73,8 @@ pub struct SessionParams { pub cluster: Arc, } -#[derive(Debug)] /// Mutable data of encryption (distributed key generation) session. +#[derive(Debug)] struct SessionData { /// Current state of the session. state: SessionState, @@ -84,16 +84,16 @@ struct SessionData { result: Option>, } -#[derive(Debug, Clone)] /// Mutable node-specific data. +#[derive(Debug, Clone)] struct NodeData { // === Values, filled during initialization phase === /// Flags marking that node has confirmed session initialization. pub initialization_confirmed: bool, } -#[derive(Debug, Clone, PartialEq)] /// Encryption (distributed key generation) session state. +#[derive(Debug, Clone, PartialEq)] pub enum SessionState { // === Initialization states === /// Every node starts in this state. diff --git a/secret_store/src/key_server_cluster/generation_session.rs b/secret_store/src/key_server_cluster/generation_session.rs index ade78bc57..dfc441bdb 100644 --- a/secret_store/src/key_server_cluster/generation_session.rs +++ b/secret_store/src/key_server_cluster/generation_session.rs @@ -72,8 +72,8 @@ pub struct SessionParams { pub cluster: Arc, } -#[derive(Debug)] /// Mutable data of distributed key generation session. +#[derive(Debug)] struct SessionData { /// Current state of the session. state: SessionState, @@ -110,8 +110,8 @@ struct SessionData { joint_public_and_secret: Option>, } -#[derive(Debug, Clone)] /// Mutable node-specific data. +#[derive(Debug, Clone)] struct NodeData { /// Random unique scalar. Persistent. pub id_number: Secret, @@ -137,8 +137,8 @@ struct NodeData { pub completion_confirmed: bool, } -#[derive(Debug, Clone, PartialEq)] /// Schedule for visiting other nodes of cluster. +#[derive(Debug, Clone, PartialEq)] pub struct EveryOtherNodeVisitor { /// Already visited nodes. visited: BTreeSet, @@ -148,8 +148,8 @@ pub struct EveryOtherNodeVisitor { in_progress: BTreeSet, } -#[derive(Debug, Clone, PartialEq)] /// Distributed key generation session state. +#[derive(Debug, Clone, PartialEq)] pub enum SessionState { // === Initialization states === /// Every node starts in this state. @@ -209,8 +209,8 @@ impl SessionImpl { &self.self_node_id } - #[cfg(test)] /// Get derived point. + #[cfg(test)] pub fn derived_point(&self) -> Option { self.data.lock().derived_point.clone() } diff --git a/secret_store/src/key_server_cluster/io/deadline.rs b/secret_store/src/key_server_cluster/io/deadline.rs index aea339ca9..a2f794e76 100644 --- a/secret_store/src/key_server_cluster/io/deadline.rs +++ b/secret_store/src/key_server_cluster/io/deadline.rs @@ -32,8 +32,8 @@ pub fn deadline(duration: Duration, handle: &Handle, future: F) -> Result< Ok(deadline) } -#[derive(Debug, PartialEq)] /// Deadline future completion status. +#[derive(Debug, PartialEq)] pub enum DeadlineStatus { /// Completed a future. Meet(T), diff --git a/secret_store/src/key_server_cluster/io/handshake.rs b/secret_store/src/key_server_cluster/io/handshake.rs index bf52ab798..0aedcc624 100644 --- a/secret_store/src/key_server_cluster/io/handshake.rs +++ b/secret_store/src/key_server_cluster/io/handshake.rs @@ -20,7 +20,7 @@ use std::collections::BTreeSet; use futures::{Future, Poll, Async}; use tokio_io::{AsyncRead, AsyncWrite}; use ethkey::{Random, Generator, KeyPair, verify_public}; -use util::H256; +use bigint::hash::H256; use key_server_cluster::{NodeId, Error, NodeKeyPair}; use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessage, ReadMessage, @@ -74,8 +74,8 @@ pub fn accept_handshake(a: A, self_key_pair: Arc) -> Handshake); impl Deref for SerializedMessage { diff --git a/secret_store/src/key_server_cluster/jobs/consensus_session.rs b/secret_store/src/key_server_cluster/jobs/consensus_session.rs index 27542bc44..e789663d1 100644 --- a/secret_store/src/key_server_cluster/jobs/consensus_session.rs +++ b/secret_store/src/key_server_cluster/jobs/consensus_session.rs @@ -107,6 +107,17 @@ impl ConsensusSes &self.consensus_job } + /// Get all nodes, which chas not rejected consensus request. + pub fn consensus_non_rejected_nodes(&self) -> BTreeSet { + self.consensus_job.responses().iter() + .filter(|r| *r.1) + .map(|r| r.0) + .chain(self.consensus_job.requests()) + .filter(|n| **n != self.meta.self_node_id) + .cloned() + .collect() + } + /// Get computation job reference. #[cfg(test)] pub fn computation_job(&self) -> &JobSession { diff --git a/secret_store/src/key_server_cluster/jobs/job_session.rs b/secret_store/src/key_server_cluster/jobs/job_session.rs index 6608397dd..ef8ab66fb 100644 --- a/secret_store/src/key_server_cluster/jobs/job_session.rs +++ b/secret_store/src/key_server_cluster/jobs/job_session.rs @@ -17,8 +17,8 @@ use std::collections::{BTreeSet, BTreeMap}; use key_server_cluster::{Error, NodeId, SessionMeta}; -#[derive(Debug, Clone, Copy, PartialEq)] /// Partial response action. +#[derive(Debug, Clone, Copy, PartialEq)] pub enum JobPartialResponseAction { /// Ignore this response. Ignore, @@ -28,8 +28,8 @@ pub enum JobPartialResponseAction { Accept, } -#[derive(Debug, Clone, Copy, PartialEq)] /// Partial request action. +#[derive(Debug, Clone, Copy, PartialEq)] pub enum JobPartialRequestAction { /// Repond with reject. Reject(PartialJobResponse), @@ -64,8 +64,8 @@ pub trait JobTransport { fn send_partial_response(&self, node: &NodeId, response: Self::PartialJobResponse) -> Result<(), Error>; } -#[derive(Debug, Clone, Copy, PartialEq)] /// Current state of job session. +#[derive(Debug, Clone, Copy, PartialEq)] pub enum JobSessionState { /// Session is inactive. Inactive, @@ -123,8 +123,8 @@ impl JobSession where Executor: JobExe } } - #[cfg(test)] /// Get transport reference. + #[cfg(test)] pub fn transport(&self) -> &Transport { &self.transport } @@ -134,8 +134,8 @@ impl JobSession where Executor: JobExe self.data.state } - #[cfg(test)] /// Get rejects. + #[cfg(test)] pub fn rejects(&self) -> &BTreeSet { debug_assert!(self.meta.self_node_id == self.meta.master_node_id); @@ -153,7 +153,6 @@ impl JobSession where Executor: JobExe .requests } - #[cfg(test)] /// Get responses. pub fn responses(&self) -> &BTreeMap { debug_assert!(self.meta.self_node_id == self.meta.master_node_id); diff --git a/secret_store/src/key_server_cluster/jobs/signing_job.rs b/secret_store/src/key_server_cluster/jobs/signing_job.rs index 28ac31a1e..acf6047ce 100644 --- a/secret_store/src/key_server_cluster/jobs/signing_job.rs +++ b/secret_store/src/key_server_cluster/jobs/signing_job.rs @@ -16,7 +16,7 @@ use std::collections::{BTreeSet, BTreeMap}; use ethkey::{Public, Secret}; -use util::H256; +use bigint::hash::H256; use key_server_cluster::{Error, NodeId, DocumentKeyShare}; use key_server_cluster::math; use key_server_cluster::jobs::job_session::{JobPartialRequestAction, JobPartialResponseAction, JobExecutor}; @@ -139,7 +139,7 @@ impl JobExecutor for SigningJob { let signature_c = math::combine_message_hash_with_public(message_hash, &self.session_public)?; let signature_s = math::compute_signature(partial_responses.values().map(|r| &r.partial_signature))?; - + Ok((signature_c, signature_s)) } } diff --git a/secret_store/src/key_server_cluster/math.rs b/secret_store/src/key_server_cluster/math.rs index 047a4556c..0a9907e35 100644 --- a/secret_store/src/key_server_cluster/math.rs +++ b/secret_store/src/key_server_cluster/math.rs @@ -15,11 +15,13 @@ // along with Parity. If not, see . use ethkey::{Public, Secret, Random, Generator, math}; -use util::{U256, H256, Hashable}; +use bigint::prelude::U256; +use bigint::hash::H256; +use hash::keccak; use key_server_cluster::Error; -#[derive(Debug)] /// Encryption result. +#[derive(Debug)] pub struct EncryptedSecret { /// Common encryption point. pub common_point: Public, @@ -181,8 +183,8 @@ pub fn compute_joint_public<'a, I>(public_shares: I) -> Result wh compute_public_sum(public_shares) } -#[cfg(test)] /// Compute joint secret key. +#[cfg(test)] pub fn compute_joint_secret<'a, I>(secret_coeffs: I) -> Result where I: Iterator { compute_secret_sum(secret_coeffs) } @@ -238,8 +240,8 @@ pub fn compute_joint_shadow_point<'a, I>(nodes_shadow_points: I) -> Result(access_key: &Secret, common_point: &Public, nodes_shadows: I) -> Result where I: Iterator { let mut joint_shadow = compute_secret_sum(nodes_shadows)?; joint_shadow.mul(access_key)?; @@ -277,8 +279,8 @@ pub fn make_common_shadow_point(threshold: usize, mut common_point: Public) -> R } } -#[cfg(test)] /// Decrypt shadow-encrypted secret. +#[cfg(test)] pub fn decrypt_with_shadow_coefficients(mut decrypted_shadow: Public, mut common_shadow_point: Public, shadow_coefficients: Vec) -> Result { let shadow_coefficients_sum = compute_secret_sum(shadow_coefficients.iter())?; math::public_mul_secret(&mut common_shadow_point, &shadow_coefficients_sum)?; @@ -286,8 +288,8 @@ pub fn decrypt_with_shadow_coefficients(mut decrypted_shadow: Public, mut common Ok(decrypted_shadow) } -#[cfg(test)] /// Decrypt data using joint secret (version for tests). +#[cfg(test)] pub fn decrypt_with_joint_secret(encrypted_point: &Public, common_point: &Public, joint_secret: &Secret) -> Result { let mut common_point_mul = common_point.clone(); math::public_mul_secret(&mut common_point_mul, joint_secret)?; @@ -306,7 +308,7 @@ pub fn combine_message_hash_with_public(message_hash: &H256, public: &Public) -> buffer[32..64].copy_from_slice(&public[0..32]); // calculate hash of buffer - let hash = (&buffer[..]).sha3(); + let hash = keccak(&buffer[..]); // map hash to EC finite field value let hash: U256 = hash.into(); @@ -356,8 +358,8 @@ pub fn compute_signature<'a, I>(signature_shares: I) -> Result wh compute_secret_sum(signature_shares) } -#[cfg(test)] /// Locally compute Schnorr signature as described in https://en.wikipedia.org/wiki/Schnorr_signature#Signing. +#[cfg(test)] pub fn local_compute_signature(nonce: &Secret, secret: &Secret, message_hash: &Secret) -> Result<(Secret, Secret), Error> { let mut nonce_public = math::generation_point(); math::public_mul_secret(&mut nonce_public, &nonce).unwrap(); @@ -372,8 +374,8 @@ pub fn local_compute_signature(nonce: &Secret, secret: &Secret, message_hash: &S Ok((combined_hash, sig)) } -#[cfg(test)] /// Verify signature as described in https://en.wikipedia.org/wiki/Schnorr_signature#Verifying. +#[cfg(test)] pub fn verify_signature(public: &Public, signature: &(Secret, Secret), message_hash: &H256) -> Result { let mut addendum = math::generation_point(); math::public_mul_secret(&mut addendum, &signature.1)?; diff --git a/secret_store/src/key_server_cluster/message.rs b/secret_store/src/key_server_cluster/message.rs index b18cf512f..c0f5729c0 100644 --- a/secret_store/src/key_server_cluster/message.rs +++ b/secret_store/src/key_server_cluster/message.rs @@ -23,8 +23,8 @@ use super::{SerializableH256, SerializablePublic, SerializableSecret, Serializab pub type MessageSessionId = SerializableH256; pub type MessageNodeId = SerializablePublic; -#[derive(Clone, Debug)] /// All possible messages that can be sent during encryption/decryption sessions. +#[derive(Clone, Debug)] pub enum Message { /// Cluster message. Cluster(ClusterMessage), @@ -38,8 +38,8 @@ pub enum Message { Signing(SigningMessage), } -#[derive(Clone, Debug)] /// All possible cluster-level messages. +#[derive(Clone, Debug)] pub enum ClusterMessage { /// Introduce node public key. NodePublicKey(NodePublicKey), @@ -51,8 +51,8 @@ pub enum ClusterMessage { KeepAliveResponse(KeepAliveResponse), } -#[derive(Clone, Debug, Serialize, Deserialize)] /// All possible messages that can be sent during key generation session. +#[derive(Clone, Debug, Serialize, Deserialize)] pub enum GenerationMessage { /// Initialize new DKG session. InitializeSession(InitializeSession), @@ -70,8 +70,8 @@ pub enum GenerationMessage { SessionCompleted(SessionCompleted), } -#[derive(Clone, Debug)] /// All possible messages that can be sent during encryption session. +#[derive(Clone, Debug)] pub enum EncryptionMessage { /// Initialize encryption session. InitializeEncryptionSession(InitializeEncryptionSession), @@ -81,8 +81,8 @@ pub enum EncryptionMessage { EncryptionSessionError(EncryptionSessionError), } -#[derive(Clone, Debug, Serialize, Deserialize)] /// All possible messages that can be sent during consensus establishing. +#[derive(Clone, Debug, Serialize, Deserialize)] pub enum ConsensusMessage { /// Initialize consensus session. InitializeConsensusSession(InitializeConsensusSession), @@ -90,8 +90,8 @@ pub enum ConsensusMessage { ConfirmConsensusInitialization(ConfirmConsensusInitialization), } -#[derive(Clone, Debug)] /// All possible messages that can be sent during decryption session. +#[derive(Clone, Debug)] pub enum DecryptionMessage { /// Consensus establishing message. DecryptionConsensusMessage(DecryptionConsensusMessage), @@ -105,8 +105,8 @@ pub enum DecryptionMessage { DecryptionSessionCompleted(DecryptionSessionCompleted), } -#[derive(Clone, Debug)] /// All possible messages that can be sent during signing session. +#[derive(Clone, Debug)] pub enum SigningMessage { /// Consensus establishing message. SigningConsensusMessage(SigningConsensusMessage), @@ -122,8 +122,8 @@ pub enum SigningMessage { SigningSessionCompleted(SigningSessionCompleted), } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Introduce node public key. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct NodePublicKey { /// Node identifier (aka node public key). pub node_id: MessageNodeId, @@ -131,26 +131,26 @@ pub struct NodePublicKey { pub confirmation_plain: SerializableH256, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Confirm that node owns the private key of previously passed public key (aka node id). +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct NodePrivateKeySignature { /// Previously passed `confirmation_plain`, signed with node private key. pub confirmation_signed: SerializableSignature, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Ask if the node is still alive. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct KeepAlive { } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Confirm that the node is still alive. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct KeepAliveResponse { } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Initialize new DKG session. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct InitializeSession { /// Session Id. pub session: MessageSessionId, @@ -168,8 +168,8 @@ pub struct InitializeSession { pub derived_point: SerializablePublic, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Confirm DKG session initialization. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct ConfirmInitialization { /// Session Id. pub session: MessageSessionId, @@ -177,8 +177,8 @@ pub struct ConfirmInitialization { pub derived_point: SerializablePublic, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Broadcast generated point to every other node. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct CompleteInitialization { /// Session Id. pub session: MessageSessionId, @@ -186,8 +186,8 @@ pub struct CompleteInitialization { pub derived_point: SerializablePublic, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Generated keys are sent to every node. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct KeysDissemination { /// Session Id. pub session: MessageSessionId, @@ -199,8 +199,8 @@ pub struct KeysDissemination { pub publics: Vec, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is sharing its public key share. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct PublicKeyShare { /// Session Id. pub session: MessageSessionId, @@ -208,8 +208,8 @@ pub struct PublicKeyShare { pub public_share: SerializablePublic, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// When session error has occured. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct SessionError { /// Session Id. pub session: MessageSessionId, @@ -217,15 +217,15 @@ pub struct SessionError { pub error: String, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// When session is completed. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct SessionCompleted { /// Session Id. pub session: MessageSessionId, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is requested to prepare for saving encrypted data. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct InitializeEncryptionSession { /// Encryption session Id. pub session: MessageSessionId, @@ -237,15 +237,15 @@ pub struct InitializeEncryptionSession { pub encrypted_point: SerializablePublic, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is responding to encryption initialization request. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct ConfirmEncryptionInitialization { /// Encryption session Id. pub session: MessageSessionId, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// When encryption session error has occured. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct EncryptionSessionError { /// Encryption session Id. pub session: MessageSessionId, @@ -253,22 +253,22 @@ pub struct EncryptionSessionError { pub error: String, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is asked to be part of consensus group. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct InitializeConsensusSession { /// Requestor signature. pub requestor_signature: SerializableSignature, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is responding to consensus initialization request. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct ConfirmConsensusInitialization { /// Is node confirmed consensus participation. pub is_confirmed: bool, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Consensus-related signing message. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct SigningConsensusMessage { /// Generation session Id. pub session: MessageSessionId, @@ -278,8 +278,8 @@ pub struct SigningConsensusMessage { pub message: ConsensusMessage, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Session key generation message. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct SigningGenerationMessage { /// Generation session Id. pub session: MessageSessionId, @@ -289,8 +289,8 @@ pub struct SigningGenerationMessage { pub message: GenerationMessage, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Request partial signature. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct RequestPartialSignature { /// Generation session Id. pub session: MessageSessionId, @@ -304,8 +304,8 @@ pub struct RequestPartialSignature { pub nodes: BTreeSet, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Partial signature. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct PartialSignature { /// Generation session Id. pub session: MessageSessionId, @@ -317,8 +317,8 @@ pub struct PartialSignature { pub partial_signature: SerializableSecret, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// When signing session error has occured. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct SigningSessionError { /// Encryption session Id. pub session: MessageSessionId, @@ -328,8 +328,8 @@ pub struct SigningSessionError { pub error: String, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Signing session completed. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct SigningSessionCompleted { /// Generation session Id. pub session: MessageSessionId, @@ -337,8 +337,8 @@ pub struct SigningSessionCompleted { pub sub_session: SerializableSecret, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Consensus-related decryption message. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct DecryptionConsensusMessage { /// Generation session Id. pub session: MessageSessionId, @@ -348,8 +348,8 @@ pub struct DecryptionConsensusMessage { pub message: ConsensusMessage, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is requested to do a partial decryption. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct RequestPartialDecryption { /// Encryption session Id. pub session: MessageSessionId, @@ -364,8 +364,8 @@ pub struct RequestPartialDecryption { pub nodes: BTreeSet, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// Node has partially decrypted the secret. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct PartialDecryption { /// Encryption session Id. pub session: MessageSessionId, @@ -379,8 +379,8 @@ pub struct PartialDecryption { pub decrypt_shadow: Option>, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// When decryption session error has occured. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct DecryptionSessionError { /// Encryption session Id. pub session: MessageSessionId, @@ -390,8 +390,8 @@ pub struct DecryptionSessionError { pub error: String, } -#[derive(Clone, Debug, Serialize, Deserialize)] /// When decryption session is completed. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct DecryptionSessionCompleted { /// Encryption session Id. pub session: MessageSessionId, diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 4fcda1539..1ec04e2e0 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -42,8 +42,8 @@ pub use super::key_server_set::tests::MapKeyServerSet; pub type SessionId = ServerKeyId; -#[derive(Debug, Clone)] /// Session metadata. +#[derive(Debug, Clone)] pub struct SessionMeta { /// Key id. pub id: SessionId, @@ -55,8 +55,8 @@ pub struct SessionMeta { pub threshold: usize, } -#[derive(Clone, Debug, PartialEq)] /// Errors which can occur during encryption/decryption session +#[derive(Clone, Debug, PartialEq)] pub enum Error { /// Invalid node address has been passed. InvalidNodeAddress, diff --git a/secret_store/src/key_server_cluster/signing_session.rs b/secret_store/src/key_server_cluster/signing_session.rs index e56306142..73d65f749 100644 --- a/secret_store/src/key_server_cluster/signing_session.rs +++ b/secret_store/src/key_server_cluster/signing_session.rs @@ -18,7 +18,7 @@ use std::collections::BTreeSet; use std::sync::Arc; use parking_lot::{Mutex, Condvar}; use ethkey::{Public, Secret, Signature}; -use util::H256; +use bigint::hash::H256; use key_server_cluster::{Error, NodeId, SessionId, SessionMeta, AclStorage, DocumentKeyShare}; use key_server_cluster::cluster::{Cluster}; use key_server_cluster::cluster_sessions::ClusterSession; @@ -86,6 +86,7 @@ struct SessionData { /// Signing session state. #[derive(Debug, PartialEq)] +#[cfg_attr(test, derive(Clone, Copy))] pub enum SessionState { /// State when consensus is establishing. ConsensusEstablishing, @@ -187,6 +188,12 @@ impl SessionImpl { }) } + /// Get session state. + #[cfg(test)] + pub fn state(&self) -> SessionState { + self.data.lock().state + } + /// Initialize signing session on master node. pub fn initialize(&self, message_hash: H256) -> Result<(), Error> { let mut data = self.data.lock(); @@ -230,13 +237,13 @@ impl SessionImpl { self.on_consensus_message(sender, message), &SigningMessage::SigningGenerationMessage(ref message) => self.on_generation_message(sender, message), - &SigningMessage::RequestPartialSignature(ref message) => + &SigningMessage::RequestPartialSignature(ref message) => self.on_partial_signature_requested(sender, message), - &SigningMessage::PartialSignature(ref message) => + &SigningMessage::PartialSignature(ref message) => self.on_partial_signature(sender, message), - &SigningMessage::SigningSessionError(ref message) => + &SigningMessage::SigningSessionError(ref message) => self.on_session_error(sender, message), - &SigningMessage::SigningSessionCompleted(ref message) => + &SigningMessage::SigningSessionCompleted(ref message) => self.on_session_completed(sender, message), } } @@ -378,10 +385,13 @@ impl SessionImpl { return Ok(()); } - self.core.cluster.broadcast(Message::Signing(SigningMessage::SigningSessionCompleted(SigningSessionCompleted { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - })))?; + // send compeltion signal to all nodes, except for rejected nodes + for node in data.consensus_session.consensus_non_rejected_nodes() { + self.core.cluster.send(&node, Message::Signing(SigningMessage::SigningSessionCompleted(SigningSessionCompleted { + session: self.core.meta.id.clone().into(), + sub_session: self.core.access_key.clone().into(), + })))?; + } data.result = Some(Ok(data.consensus_session.result()?)); self.core.completed.notify_all(); @@ -569,17 +579,19 @@ impl JobTransport for SigningJobTransport { #[cfg(test)] mod tests { use std::sync::Arc; + use std::str::FromStr; use std::collections::{BTreeMap, VecDeque}; - use ethkey::{self, Random, Generator, Public}; - use util::H256; + use bigint::hash::H256; + use ethkey::{self, Random, Generator, Public, Secret, KeyPair}; use acl_storage::DummyAclStorage; - use key_server_cluster::{NodeId, SessionId, SessionMeta, Error, KeyStorage}; + use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, SessionMeta, Error, KeyStorage}; use key_server_cluster::cluster::tests::DummyCluster; use key_server_cluster::generation_session::{Session as GenerationSession}; use key_server_cluster::generation_session::tests::MessageLoop as KeyGenerationMessageLoop; use key_server_cluster::math; - use key_server_cluster::message::{Message, SigningMessage}; - use key_server_cluster::signing_session::{Session, SessionImpl, SessionParams}; + use key_server_cluster::message::{Message, SigningMessage, SigningConsensusMessage, ConsensusMessage, ConfirmConsensusInitialization, + SigningGenerationMessage, GenerationMessage, ConfirmInitialization, InitializeSession, RequestPartialSignature}; + use key_server_cluster::signing_session::{Session, SessionImpl, SessionState, SessionParams}; struct Node { pub node_id: NodeId, @@ -589,8 +601,10 @@ mod tests { struct MessageLoop { pub session_id: SessionId, + pub requester: KeyPair, pub nodes: BTreeMap, pub queue: VecDeque<(NodeId, NodeId, Message)>, + pub acl_storages: Vec>, } impl MessageLoop { @@ -600,8 +614,10 @@ mod tests { let requester = Random.generate().unwrap(); let signature = Some(ethkey::sign(requester.secret(), &SessionId::default()).unwrap()); let master_node_id = gl.nodes.keys().nth(0).unwrap().clone(); + let mut acl_storages = Vec::new(); for (i, (gl_node_id, gl_node)) in gl.nodes.iter().enumerate() { let acl_storage = Arc::new(DummyAclStorage::default()); + acl_storages.push(acl_storage.clone()); let cluster = Arc::new(DummyCluster::new(gl_node_id.clone())); let session = SessionImpl::new(SessionParams { meta: SessionMeta { @@ -627,8 +643,10 @@ mod tests { MessageLoop { session_id: session_id, + requester: requester, nodes: nodes, queue: VecDeque::new(), + acl_storages: acl_storages, } } @@ -676,22 +694,41 @@ mod tests { } } } + + pub fn run_until bool>(&mut self, predicate: F) -> Result<(), Error> { + while let Some((from, to, message)) = self.take_message() { + if predicate(self) { + return Ok(()); + } + + self.process_message((from, to, message))?; + } + + unreachable!("either wrong predicate, or failing test") + } + } + + fn prepare_signing_sessions(threshold: usize, num_nodes: usize) -> (KeyGenerationMessageLoop, MessageLoop) { + // run key generation sessions + let mut gl = KeyGenerationMessageLoop::new(num_nodes); + gl.master().initialize(Public::default(), threshold, gl.nodes.keys().cloned().collect()).unwrap(); + while let Some((from, to, message)) = gl.take_message() { + gl.process_message((from, to, message)).unwrap(); + } + + // run signing session + let sl = MessageLoop::new(&gl); + (gl, sl) } #[test] fn complete_gen_sign_session() { let test_cases = [(0, 1), (0, 5), (2, 5), (3, 5)]; for &(threshold, num_nodes) in &test_cases { - // run key generation sessions - let mut gl = KeyGenerationMessageLoop::new(num_nodes); - gl.master().initialize(Public::default(), threshold, gl.nodes.keys().cloned().collect()).unwrap(); - while let Some((from, to, message)) = gl.take_message() { - gl.process_message((from, to, message)).unwrap(); - } + let (gl, mut sl) = prepare_signing_sessions(threshold, num_nodes); // run signing session let message_hash = H256::from(777); - let mut sl = MessageLoop::new(&gl); sl.master().initialize(message_hash).unwrap(); while let Some((from, to, message)) = sl.take_message() { sl.process_message((from, to, message)).unwrap(); @@ -703,4 +740,247 @@ mod tests { assert!(math::verify_signature(&public, &signature, &message_hash).unwrap()); } } + + #[test] + fn constructs_in_cluster_of_single_node() { + let mut nodes = BTreeMap::new(); + let self_node_id = Random.generate().unwrap().public().clone(); + nodes.insert(self_node_id, Random.generate().unwrap().secret().clone()); + match SessionImpl::new(SessionParams { + meta: SessionMeta { + id: SessionId::default(), + self_node_id: self_node_id.clone(), + master_node_id: self_node_id.clone(), + threshold: 0, + }, + access_key: Random.generate().unwrap().secret().clone(), + key_share: DocumentKeyShare { + author: Public::default(), + threshold: 0, + id_numbers: nodes, + secret_share: Random.generate().unwrap().secret().clone(), + common_point: Some(Random.generate().unwrap().public().clone()), + encrypted_point: Some(Random.generate().unwrap().public().clone()), + }, + acl_storage: Arc::new(DummyAclStorage::default()), + cluster: Arc::new(DummyCluster::new(self_node_id.clone())), + }, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) { + Ok(_) => (), + _ => panic!("unexpected"), + } + } + + #[test] + fn fails_to_construct_if_not_a_part_of_cluster() { + let mut nodes = BTreeMap::new(); + let self_node_id = Random.generate().unwrap().public().clone(); + nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); + nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); + match SessionImpl::new(SessionParams { + meta: SessionMeta { + id: SessionId::default(), + self_node_id: self_node_id.clone(), + master_node_id: self_node_id.clone(), + threshold: 0, + }, + access_key: Random.generate().unwrap().secret().clone(), + key_share: DocumentKeyShare { + author: Public::default(), + threshold: 0, + id_numbers: nodes, + secret_share: Random.generate().unwrap().secret().clone(), + common_point: Some(Random.generate().unwrap().public().clone()), + encrypted_point: Some(Random.generate().unwrap().public().clone()), + }, + acl_storage: Arc::new(DummyAclStorage::default()), + cluster: Arc::new(DummyCluster::new(self_node_id.clone())), + }, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) { + Err(Error::InvalidNodesConfiguration) => (), + _ => panic!("unexpected"), + } + } + + #[test] + fn fails_to_construct_if_threshold_is_wrong() { + let mut nodes = BTreeMap::new(); + let self_node_id = Random.generate().unwrap().public().clone(); + nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone()); + nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); + match SessionImpl::new(SessionParams { + meta: SessionMeta { + id: SessionId::default(), + self_node_id: self_node_id.clone(), + master_node_id: self_node_id.clone(), + threshold: 2, + }, + access_key: Random.generate().unwrap().secret().clone(), + key_share: DocumentKeyShare { + author: Public::default(), + threshold: 2, + id_numbers: nodes, + secret_share: Random.generate().unwrap().secret().clone(), + common_point: Some(Random.generate().unwrap().public().clone()), + encrypted_point: Some(Random.generate().unwrap().public().clone()), + }, + acl_storage: Arc::new(DummyAclStorage::default()), + cluster: Arc::new(DummyCluster::new(self_node_id.clone())), + }, Some(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap())) { + Err(Error::InvalidThreshold) => (), + _ => panic!("unexpected"), + } + } + + #[test] + fn fails_to_initialize_when_already_initialized() { + let (_, sl) = prepare_signing_sessions(1, 3); + assert_eq!(sl.master().initialize(777.into()), Ok(())); + assert_eq!(sl.master().initialize(777.into()), Err(Error::InvalidStateForRequest)); + } + + #[test] + fn does_not_fail_when_consensus_message_received_after_consensus_established() { + let (_, mut sl) = prepare_signing_sessions(1, 3); + sl.master().initialize(777.into()).unwrap(); + // consensus is established + sl.run_until(|sl| sl.master().state() == SessionState::SessionKeyGeneration).unwrap(); + // but 3rd node continues to send its messages + // this should not fail session + let consensus_group = sl.master().data.lock().consensus_session.select_consensus_group().unwrap().clone(); + let mut had_3rd_message = false; + while let Some((from, to, message)) = sl.take_message() { + if !consensus_group.contains(&from) { + had_3rd_message = true; + sl.process_message((from, to, message)).unwrap(); + } + } + assert!(had_3rd_message); + } + + #[test] + fn fails_when_consensus_message_is_received_when_not_initialized() { + let (_, sl) = prepare_signing_sessions(1, 3); + assert_eq!(sl.master().on_consensus_message(sl.nodes.keys().nth(1).unwrap(), &SigningConsensusMessage { + session: SessionId::default().into(), + sub_session: sl.master().core.access_key.clone().into(), + message: ConsensusMessage::ConfirmConsensusInitialization(ConfirmConsensusInitialization { + is_confirmed: true, + }), + }), Err(Error::InvalidStateForRequest)); + } + + #[test] + fn fails_when_generation_message_is_received_when_not_initialized() { + let (_, sl) = prepare_signing_sessions(1, 3); + assert_eq!(sl.master().on_generation_message(sl.nodes.keys().nth(1).unwrap(), &SigningGenerationMessage { + session: SessionId::default().into(), + sub_session: sl.master().core.access_key.clone().into(), + message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { + session: SessionId::default().into(), + derived_point: Public::default().into(), + }), + }), Err(Error::InvalidStateForRequest)); + } + + #[test] + fn fails_when_generation_sesson_is_initialized_by_slave_node() { + let (_, mut sl) = prepare_signing_sessions(1, 3); + sl.master().initialize(777.into()).unwrap(); + sl.run_until(|sl| sl.master().state() == SessionState::SessionKeyGeneration).unwrap(); + + let slave2_id = sl.nodes.keys().nth(2).unwrap().clone(); + let slave1 = &sl.nodes.values().nth(1).unwrap().session; + + assert_eq!(slave1.on_generation_message(&slave2_id, &SigningGenerationMessage { + session: SessionId::default().into(), + sub_session: sl.master().core.access_key.clone().into(), + message: GenerationMessage::InitializeSession(InitializeSession { + session: SessionId::default().into(), + author: Public::default().into(), + nodes: BTreeMap::new(), + threshold: 1, + derived_point: Public::default().into(), + }) + }), Err(Error::InvalidMessage)); + } + + #[test] + fn fails_when_signature_requested_when_not_initialized() { + let (_, sl) = prepare_signing_sessions(1, 3); + let slave1 = &sl.nodes.values().nth(1).unwrap().session; + assert_eq!(slave1.on_partial_signature_requested(sl.nodes.keys().nth(0).unwrap(), &RequestPartialSignature { + session: SessionId::default().into(), + sub_session: sl.master().core.access_key.clone().into(), + request_id: Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap().into(), + message_hash: H256::default().into(), + nodes: Default::default(), + }), Err(Error::InvalidStateForRequest)); + } + + #[test] + fn fails_when_signature_requested_by_slave_node() { + let (_, sl) = prepare_signing_sessions(1, 3); + assert_eq!(sl.master().on_partial_signature_requested(sl.nodes.keys().nth(1).unwrap(), &RequestPartialSignature { + session: SessionId::default().into(), + sub_session: sl.master().core.access_key.clone().into(), + request_id: Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap().into(), + message_hash: H256::default().into(), + nodes: Default::default(), + }), Err(Error::InvalidMessage)); + } + + #[test] + fn failed_signing_session() { + let (_, mut sl) = prepare_signing_sessions(1, 3); + sl.master().initialize(777.into()).unwrap(); + + // we need at least 2-of-3 nodes to agree to reach consensus + // let's say 2 of 3 nodes disagee + sl.acl_storages[1].prohibit(sl.requester.public().clone(), SessionId::default()); + sl.acl_storages[2].prohibit(sl.requester.public().clone(), SessionId::default()); + + // then consensus is unreachable + assert_eq!(sl.run_until(|_| false), Err(Error::ConsensusUnreachable)); + } + + #[test] + fn complete_signing_session_with_single_node_failing() { + let (_, mut sl) = prepare_signing_sessions(1, 3); + sl.master().initialize(777.into()).unwrap(); + + // we need at least 2-of-3 nodes to agree to reach consensus + // let's say 1 of 3 nodes disagee + sl.acl_storages[1].prohibit(sl.requester.public().clone(), SessionId::default()); + + // then consensus reachable, but single node will disagree + while let Some((from, to, message)) = sl.take_message() { + sl.process_message((from, to, message)).unwrap(); + } + + let data = sl.master().data.lock(); + match data.result { + Some(Ok(_)) => (), + _ => unreachable!(), + } + } + + #[test] + fn complete_signing_session_with_acl_check_failed_on_master() { + let (_, mut sl) = prepare_signing_sessions(1, 3); + sl.master().initialize(777.into()).unwrap(); + + // we need at least 2-of-3 nodes to agree to reach consensus + // let's say 1 of 3 nodes disagee + sl.acl_storages[0].prohibit(sl.requester.public().clone(), SessionId::default()); + + // then consensus reachable, but single node will disagree + while let Some((from, to, message)) = sl.take_message() { + sl.process_message((from, to, message)).unwrap(); + } + + let data = sl.master().data.lock(); + match data.result { + Some(Ok(_)) => (), + _ => unreachable!(), + } + } } diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs index e17dceed5..ee959d0ba 100644 --- a/secret_store/src/key_server_set.rs +++ b/secret_store/src/key_server_set.rs @@ -22,7 +22,9 @@ use parking_lot::Mutex; use ethcore::filter::Filter; use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify}; use native_contracts::KeyServerSet as KeyServerSetContract; -use util::{H256, Address, Bytes, Hashable}; +use hash::keccak; +use bigint::hash::H256; +use util::{Address, Bytes}; use types::all::{Error, Public, NodeAddress}; const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set"; @@ -33,8 +35,8 @@ const ADDED_EVENT_NAME: &'static [u8] = &*b"KeyServerAdded(address)"; const REMOVED_EVENT_NAME: &'static [u8] = &*b"KeyServerRemoved(address)"; lazy_static! { - static ref ADDED_EVENT_NAME_HASH: H256 = ADDED_EVENT_NAME.sha3(); - static ref REMOVED_EVENT_NAME_HASH: H256 = REMOVED_EVENT_NAME.sha3(); + static ref ADDED_EVENT_NAME_HASH: H256 = keccak(ADDED_EVENT_NAME); + static ref REMOVED_EVENT_NAME_HASH: H256 = keccak(REMOVED_EVENT_NAME); } /// Key Server set diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index fdbb5fa40..28e3aa2c9 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -25,8 +25,8 @@ use serialization::{SerializablePublic, SerializableSecret}; /// Key of version value. const DB_META_KEY_VERSION: &'static [u8; 7] = b"version"; -#[derive(Debug, Clone, PartialEq)] /// Encrypted key share, stored by key storage on the single key server. +#[derive(Debug, Clone, PartialEq)] pub struct DocumentKeyShare { /// Author of the entry. pub author: Public, @@ -59,8 +59,8 @@ pub struct PersistentKeyStorage { db: Database, } -#[derive(Serialize, Deserialize)] /// V0 of encrypted key share, as it is stored by key storage on the single key server. +#[derive(Serialize, Deserialize)] struct SerializableDocumentKeyShareV0 { /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). pub threshold: usize, @@ -74,8 +74,8 @@ struct SerializableDocumentKeyShareV0 { pub encrypted_point: SerializablePublic, } -#[derive(Serialize, Deserialize)] /// V1 of encrypted key share, as it is stored by key storage on the single key server. +#[derive(Serialize, Deserialize)] struct SerializableDocumentKeyShareV1 { /// Authore of the entry. pub author: SerializablePublic, @@ -204,8 +204,8 @@ pub mod tests { use super::{DB_META_KEY_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare, SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1, upgrade_db}; - #[derive(Default)] /// In-memory document encryption keys storage + #[derive(Default)] pub struct DummyKeyStorage { keys: RwLock>, } diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index d7f35a55a..cbad4ffe6 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -39,11 +39,13 @@ extern crate ethabi; extern crate ethcore; extern crate ethcore_devtools as devtools; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate ethcore_ipc as ipc; extern crate ethcore_logger as logger; extern crate ethcrypto; extern crate ethkey; extern crate native_contracts; +extern crate hash; mod key_server_cluster; mod types; diff --git a/secret_store/src/node_key_pair.rs b/secret_store/src/node_key_pair.rs index ce6c88a07..29f1ffec6 100644 --- a/secret_store/src/node_key_pair.rs +++ b/secret_store/src/node_key_pair.rs @@ -18,7 +18,8 @@ use std::sync::Arc; use ethcrypto::ecdh::agree; use ethkey::{KeyPair, Public, Signature, Error as EthKeyError, sign}; use ethcore::account_provider::AccountProvider; -use util::{Address, H256}; +use bigint::hash::H256; +use util::Address; use traits::NodeKeyPair; pub struct PlainNodeKeyPair { diff --git a/secret_store/src/serialization.rs b/secret_store/src/serialization.rs index ad26bf7d4..caa74ec32 100644 --- a/secret_store/src/serialization.rs +++ b/secret_store/src/serialization.rs @@ -21,13 +21,14 @@ use rustc_hex::{ToHex, FromHex}; use serde::{Serialize, Deserialize, Serializer, Deserializer}; use serde::de::{Visitor, Error as SerdeError}; use ethkey::{Public, Secret, Signature}; -use util::{H256, Bytes}; +use bigint::hash::H256; +use util::Bytes; /// Serializable message hash. pub type SerializableMessageHash = SerializableH256; -#[derive(Clone, Debug, Serialize, Deserialize)] /// Serializable shadow decryption result. +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct SerializableEncryptedDocumentKeyShadow { /// Decrypted secret point. It is partially decrypted if shadow decrpytion was requested. pub decrypted_secret: SerializablePublic, @@ -37,8 +38,8 @@ pub struct SerializableEncryptedDocumentKeyShadow { pub decrypt_shadows: Vec, } -#[derive(Clone, Debug, PartialEq)] /// Serializable Bytes. +#[derive(Clone, Debug, PartialEq)] pub struct SerializableBytes(pub Bytes); impl From for SerializableBytes where Bytes: From { @@ -83,8 +84,8 @@ impl<'a> Deserialize<'a> for SerializableBytes { } } -#[derive(Clone, Debug)] /// Serializable Signature. +#[derive(Clone, Debug)] pub struct SerializableSignature(pub Signature); impl From for SerializableSignature where Signature: From { @@ -143,8 +144,8 @@ impl<'a> Deserialize<'a> for SerializableSignature { } } -#[derive(Clone, Debug)] /// Serializable H256. +#[derive(Clone, Debug)] pub struct SerializableH256(pub H256); impl From for SerializableH256 where H256: From { @@ -203,8 +204,8 @@ impl<'a> Deserialize<'a> for SerializableH256 { } } -#[derive(Clone, Debug)] /// Serializable EC scalar/secret key. +#[derive(Clone, Debug)] pub struct SerializableSecret(pub Secret); impl From for SerializableSecret where Secret: From { @@ -263,8 +264,8 @@ impl<'a> Deserialize<'a> for SerializableSecret { } } -#[derive(Clone, Debug)] /// Serializable EC point/public key. +#[derive(Clone, Debug)] pub struct SerializablePublic(pub Public); impl From for SerializablePublic where Public: From { diff --git a/secret_store/src/traits.rs b/secret_store/src/traits.rs index 31da748e0..1e00f1e05 100644 --- a/secret_store/src/traits.rs +++ b/secret_store/src/traits.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use ethkey::{KeyPair, Signature, Error as EthKeyError}; -use util::H256; +use bigint::hash::H256; use types::all::{Error, Public, ServerKeyId, MessageHash, EncryptedMessageSignature, RequestSignature, EncryptedDocumentKey, EncryptedDocumentKeyShadow}; @@ -82,7 +82,7 @@ pub trait MessageSigner: ServerKeyGenerator { } -#[ipc(client_ident="RemoteKeyServer")] /// Key server. +#[ipc(client_ident="RemoteKeyServer")] pub trait KeyServer: DocumentKeyServer + MessageSigner + Send + Sync { } diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index 6867c82f3..078d65649 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -20,16 +20,17 @@ use serde_json; use ethkey; use util; +use bigint; use key_server_cluster; /// Node id. pub type NodeId = ethkey::Public; /// Server key id. When key is used to encrypt document, it could be document contents hash. -pub type ServerKeyId = util::H256; +pub type ServerKeyId = bigint::hash::H256; /// Encrypted document key type. pub type EncryptedDocumentKey = util::Bytes; /// Message hash. -pub type MessageHash = util::H256; +pub type MessageHash = bigint::hash::H256; /// Message signature. pub type EncryptedMessageSignature = util::Bytes; /// Request signature type. @@ -37,9 +38,9 @@ pub type RequestSignature = ethkey::Signature; /// Public key type. pub use ethkey::Public; +/// Secret store error #[derive(Debug, Clone, PartialEq)] #[binary] -/// Secret store error pub enum Error { /// Bad signature is passed BadSignature, @@ -55,9 +56,9 @@ pub enum Error { Internal(String), } +/// Secret store configuration #[derive(Debug, Clone)] #[binary] -/// Secret store configuration pub struct NodeAddress { /// IP address. pub address: String, @@ -65,9 +66,9 @@ pub struct NodeAddress { pub port: u16, } +/// Secret store configuration #[derive(Debug)] #[binary] -/// Secret store configuration pub struct ServiceConfiguration { /// HTTP listener address. If None, HTTP API is disabled. pub listener_address: Option, @@ -79,9 +80,9 @@ pub struct ServiceConfiguration { pub cluster_config: ClusterConfiguration, } +/// Key server cluster configuration #[derive(Debug)] #[binary] -/// Key server cluster configuration pub struct ClusterConfiguration { /// Number of threads reserved by cluster. pub threads: usize, @@ -94,9 +95,9 @@ pub struct ClusterConfiguration { pub allow_connecting_to_higher_nodes: bool, } +/// Shadow decryption result. #[derive(Clone, Debug, PartialEq)] #[binary] -/// Shadow decryption result. pub struct EncryptedDocumentKeyShadow { /// Decrypted secret point. It is partially decrypted if shadow decrpytion was requested. pub decrypted_secret: ethkey::Public, diff --git a/stratum/Cargo.toml b/stratum/Cargo.toml index dcd605bb8..4a6733e67 100644 --- a/stratum/Cargo.toml +++ b/stratum/Cargo.toml @@ -15,6 +15,7 @@ jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "pa jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethcore-util = { path = "../util" } +ethcore-bigint = { path = "../util/bigint" } ethcore-devtools = { path = "../devtools" } lazy_static = "0.2" env_logger = "0.4" @@ -23,4 +24,6 @@ semver = "0.6" ethcore-ipc-nano = { path = "../ipc/nano" } futures = "0.1" tokio-core = "0.1" +parking_lot = "0.4" ethcore-logger = { path = "../logger" } +hash = { path = "../util/hash" } diff --git a/stratum/src/lib.rs b/stratum/src/lib.rs index 0042ab1e9..ce01e2c8e 100644 --- a/stratum/src/lib.rs +++ b/stratum/src/lib.rs @@ -21,10 +21,13 @@ extern crate jsonrpc_core; extern crate jsonrpc_macros; #[macro_use] extern crate log; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate ethcore_ipc as ipc; extern crate semver; extern crate futures; extern crate ethcore_logger; +extern crate hash; +extern crate parking_lot; #[cfg(test)] extern crate tokio_core; extern crate ethcore_devtools as devtools; @@ -54,7 +57,9 @@ use std::sync::Arc; use std::net::SocketAddr; use std::collections::{HashSet, HashMap}; -use util::{H256, Hashable, RwLock, RwLockReadGuard}; +use hash::keccak; +use bigint::hash::H256; +use parking_lot::{RwLock, RwLockReadGuard}; type RpcResult = BoxFuture; @@ -228,7 +233,7 @@ impl Stratum { fn authorize(&self, params: Params, meta: SocketMetadata) -> RpcResult { future::result(params.parse::<(String, String)>().map(|(worker_id, secret)|{ if let Some(valid_secret) = self.secret { - let hash = secret.sha3(); + let hash = keccak(secret); if hash != valid_secret { return to_value(&false); } diff --git a/stratum/src/traits.rs b/stratum/src/traits.rs index c2c522b3b..d37000076 100644 --- a/stratum/src/traits.rs +++ b/stratum/src/traits.rs @@ -16,7 +16,7 @@ use std; use std::error::Error as StdError; -use util::H256; +use bigint::hash::H256; use ipc::IpcConfig; use jsonrpc_tcp_server::PushMessageError; diff --git a/sync/Cargo.toml b/sync/Cargo.toml index b9a411879..f9edab512 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -13,11 +13,14 @@ ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] ethcore-util = { path = "../util" } +ethcore-bigint = { path = "../util/bigint" } ethcore-network = { path = "../util/network" } ethcore-io = { path = "../util/io" } ethcore-light = { path = "../ethcore/light"} ethcore = { path = "../ethcore" } rlp = { path = "../util/rlp" } +hash = { path = "../util/hash" } +triehash = { path = "../util/triehash" } clippy = { version = "0.0.103", optional = true} log = "0.3" env_logger = "0.4" diff --git a/sync/src/api.rs b/sync/src/api.rs index f3c5570ee..6f4b14970 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -20,7 +20,8 @@ use std::io; use util::Bytes; use network::{NetworkProtocolHandler, NetworkService, NetworkContext, HostInfo, PeerId, ProtocolId, NetworkConfiguration as BasicNetworkConfiguration, NonReservedPeerMode, NetworkError, ConnectionFilter}; -use util::{U256, H256, H512}; +use bigint::prelude::U256; +use bigint::hash::{H256, H512}; use io::{TimerToken}; use ethcore::ethstore::ethkey::Secret; use ethcore::client::{BlockChainClient, ChainNotify}; diff --git a/sync/src/block_sync.rs b/sync/src/block_sync.rs index 9bed9b906..68fa65564 100644 --- a/sync/src/block_sync.rs +++ b/sync/src/block_sync.rs @@ -21,7 +21,7 @@ use std::collections::{HashSet, VecDeque}; use std::cmp; use heapsize::HeapSizeOf; -use util::*; +use bigint::hash::H256; use rlp::*; use ethcore::views::{BlockView}; use ethcore::header::{BlockNumber, Header as BlockHeader}; @@ -479,7 +479,7 @@ impl BlockDownloader { let receipts = block_and_receipts.receipts; let (h, number, parent) = { let header = BlockView::new(&block).header_view(); - (header.sha3(), header.number(), header.parent_hash()) + (header.hash(), header.number(), header.parent_hash()) }; // Perform basic block verification diff --git a/sync/src/blocks.rs b/sync/src/blocks.rs index 8f1425145..3696cd9d0 100644 --- a/sync/src/blocks.rs +++ b/sync/src/blocks.rs @@ -17,7 +17,10 @@ use std::collections::{HashSet, HashMap}; use std::collections::hash_map::Entry; use smallvec::SmallVec; +use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP}; use heapsize::HeapSizeOf; +use bigint::hash::H256; +use triehash::ordered_trie_root; use util::*; use rlp::*; use network::NetworkError; @@ -343,7 +346,7 @@ impl BlockCollection { let body = UntrustedRlp::new(&b); let tx = body.at(0)?; let tx_root = ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec())); //TODO: get rid of vectors here - let uncles = body.at(1)?.as_raw().sha3(); + let uncles = keccak(body.at(1)?.as_raw()); HeaderId { transactions_root: tx_root, uncles: uncles @@ -426,7 +429,7 @@ impl BlockCollection { transactions_root: info.transactions_root().clone(), uncles: info.uncles_hash().clone(), }; - if header_id.transactions_root == sha3::SHA3_NULL_RLP && header_id.uncles == sha3::SHA3_EMPTY_LIST_RLP { + if header_id.transactions_root == KECCAK_NULL_RLP && header_id.uncles == KECCAK_EMPTY_LIST_RLP { // empty body, just mark as downloaded let mut body_stream = RlpStream::new_list(2); body_stream.append_raw(&::rlp::EMPTY_LIST_RLP, 1); @@ -439,7 +442,7 @@ impl BlockCollection { } if self.need_receipts { let receipt_root = info.receipts_root().clone(); - if receipt_root == sha3::SHA3_NULL_RLP { + if receipt_root == KECCAK_NULL_RLP { let receipts_stream = RlpStream::new_list(0); block.receipts = Some(receipts_stream.out()); } else { @@ -490,7 +493,6 @@ mod test { use ethcore::client::{TestBlockChainClient, EachBlockWith, BlockId, BlockChainClient}; use ethcore::views::HeaderView; use ethcore::header::BlockNumber; - use util::*; use rlp::*; fn is_empty(bc: &BlockCollection) -> bool { @@ -527,7 +529,7 @@ mod test { .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner()) .collect(); let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).as_raw().to_vec()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect(); + let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).hash()).collect(); let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(h.clone()) } else { None }).collect(); bc.reset_to(heads); assert!(!bc.is_empty()); @@ -582,7 +584,7 @@ mod test { .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner()) .collect(); let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).as_raw().to_vec()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect(); + let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).hash()).collect(); let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(h.clone()) } else { None }).collect(); bc.reset_to(heads); @@ -606,7 +608,7 @@ mod test { .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner()) .collect(); let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).as_raw().to_vec()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect(); + let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).hash()).collect(); let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(h.clone()) } else { None }).collect(); bc.reset_to(heads); diff --git a/sync/src/chain.rs b/sync/src/chain.rs index d5ed7a517..b3875fbcc 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -91,7 +91,11 @@ use std::collections::{HashSet, HashMap}; use std::cmp; +use hash::keccak; use heapsize::HeapSizeOf; +use bigint::prelude::U256; +use bigint::hash::{H256, H256FastMap}; +use parking_lot::RwLock; use util::*; use rlp::*; use network::*; @@ -683,7 +687,7 @@ impl ChainSync { peer.confirmation = ForkConfirmation::TooShort; } else { let header = r.at(0)?.as_raw(); - if header.sha3() == fork_hash { + if keccak(&header) == fork_hash { trace!(target: "sync", "{}: Confirmed peer", peer_id); peer.confirmation = ForkConfirmation::Confirmed; if !io.chain_overlay().read().contains_key(&fork_number) { @@ -895,7 +899,7 @@ impl ChainSync { } let block_rlp = r.at(0)?; let header_rlp = block_rlp.at(0)?; - let h = header_rlp.as_raw().sha3(); + let h = keccak(&header_rlp.as_raw()); trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h); let header: BlockHeader = header_rlp.as_val()?; if header.number() > self.highest_block.unwrap_or(0) { @@ -1056,7 +1060,7 @@ impl ChainSync { self.continue_sync(io); return Ok(()); } - self.snapshot.reset_to(&manifest, &manifest_rlp.as_raw().sha3()); + self.snapshot.reset_to(&manifest, &keccak(manifest_rlp.as_raw())); io.snapshot_service().begin_restore(manifest); self.state = SyncState::SnapshotData; @@ -1510,7 +1514,7 @@ impl ChainSync { false => io.snapshot_service().manifest(), }; let block_number = manifest.as_ref().map_or(0, |m| m.block_number); - let manifest_hash = manifest.map_or(H256::new(), |m| m.into_rlp().sha3()); + let manifest_hash = manifest.map_or(H256::new(), |m| keccak(m.into_rlp())); packet.append(&manifest_hash); packet.append(&block_number); } @@ -1532,7 +1536,7 @@ impl ChainSync { match io.chain().block_header(BlockId::Hash(hash)) { Some(hdr) => { let number = hdr.number().into(); - debug_assert_eq!(hdr.sha3(), hash); + debug_assert_eq!(hdr.hash(), hash); if max_headers == 1 || io.chain().block_hash(BlockId::Number(number)) != Some(hash) { // Non canonical header or single header requested @@ -2229,9 +2233,10 @@ mod tests { use network::PeerId; use tests::helpers::*; use tests::snapshot::TestSnapshotService; - use util::{U256, Address, RwLock}; - use util::sha3::Hashable; - use util::hash::H256; + use bigint::prelude::U256; + use bigint::hash::H256; + use util::Address; + use parking_lot::RwLock; use util::bytes::Bytes; use rlp::{Rlp, RlpStream, UntrustedRlp}; use super::*; @@ -2395,7 +2400,7 @@ mod tests { let blocks: Vec<_> = (0 .. 100) .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).map(|b| b.into_inner()).unwrap()).collect(); let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).as_raw().to_vec()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect(); + let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).hash()).collect(); let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 51947317d..4813bb474 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -28,6 +28,7 @@ //! extern crate ethcore_network as network; +extern crate ethcore_bigint as bigint; extern crate ethcore_io as io; extern crate ethcore; extern crate env_logger; @@ -38,6 +39,8 @@ extern crate parking_lot; extern crate smallvec; extern crate rlp; extern crate ipnetwork; +extern crate hash; +extern crate triehash; extern crate ethcore_light as light; diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index f05e4d7d0..54f8a26c0 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -45,7 +45,9 @@ use light::net::{ }; use light::request::{self, CompleteHeadersRequest as HeadersRequest}; use network::PeerId; -use util::{U256, H256, Mutex, RwLock}; +use bigint::prelude::U256; +use bigint::hash::H256; +use parking_lot::{Mutex, RwLock}; use rand::{Rng, OsRng}; use self::sync_round::{AbortReason, SyncRound, ResponseContext}; diff --git a/sync/src/light_sync/response.rs b/sync/src/light_sync/response.rs index 1ab504a9f..59a75dc1d 100644 --- a/sync/src/light_sync/response.rs +++ b/sync/src/light_sync/response.rs @@ -22,7 +22,7 @@ use ethcore::encoded; use ethcore::header::Header; use light::request::{HashOrNumber, CompleteHeadersRequest as HeadersRequest}; use rlp::DecoderError; -use util::H256; +use bigint::hash::H256; /// Errors found when decoding headers and verifying with basic constraints. #[derive(Debug, PartialEq)] diff --git a/sync/src/light_sync/sync_round.rs b/sync/src/light_sync/sync_round.rs index dfa17aad4..f8d1d438a 100644 --- a/sync/src/light_sync/sync_round.rs +++ b/sync/src/light_sync/sync_round.rs @@ -27,7 +27,7 @@ use light::net::ReqId; use light::request::CompleteHeadersRequest as HeadersRequest; use network::PeerId; -use util::H256; +use bigint::hash::H256; use super::response; diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index f6d5eddf0..65ddf92da 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -29,7 +29,7 @@ use light::client::Client as LightClient; use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams}; use light::provider::LightProvider; use network::{NodeId, PeerId}; -use util::RwLock; +use parking_lot::RwLock; use time::Duration; use light::cache::Cache; diff --git a/sync/src/snapshot.rs b/sync/src/snapshot.rs index cecfbe425..3abd47af4 100644 --- a/sync/src/snapshot.rs +++ b/sync/src/snapshot.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . - -use util::{H256, Hashable}; +use hash::keccak; +use bigint::hash::H256; use std::collections::HashSet; use ethcore::snapshot::ManifestData; @@ -71,7 +71,7 @@ impl Snapshot { /// Validate chunk and mark it as downloaded pub fn validate_chunk(&mut self, chunk: &[u8]) -> Result { - let hash = chunk.sha3(); + let hash = keccak(chunk); if self.completed_chunks.contains(&hash) { trace!(target: "sync", "Ignored proccessed chunk: {}", hash.hex()); return Err(()); @@ -136,6 +136,7 @@ impl Snapshot { #[cfg(test)] mod test { + use hash::keccak; use util::*; use super::*; use ethcore::snapshot::ManifestData; @@ -153,13 +154,13 @@ mod test { let block_chunks: Vec = (0..20).map(|_| H256::random().to_vec()).collect(); let manifest = ManifestData { version: 2, - state_hashes: state_chunks.iter().map(|data| data.sha3()).collect(), - block_hashes: block_chunks.iter().map(|data| data.sha3()).collect(), + state_hashes: state_chunks.iter().map(|data| keccak(data)).collect(), + block_hashes: block_chunks.iter().map(|data| keccak(data)).collect(), state_root: H256::new(), block_number: 42, block_hash: H256::new(), }; - let mhash = manifest.clone().into_rlp().sha3(); + let mhash = keccak(manifest.clone().into_rlp()); (manifest, mhash, state_chunks, block_chunks) } @@ -211,7 +212,7 @@ mod test { assert!(snapshot.is_complete()); assert_eq!(snapshot.done_chunks(), 40); assert_eq!(snapshot.done_chunks(), snapshot.total_chunks()); - assert_eq!(snapshot.snapshot_hash(), Some(manifest.into_rlp().sha3())); + assert_eq!(snapshot.snapshot_hash(), Some(keccak(manifest.into_rlp()))); } #[test] diff --git a/sync/src/tests/consensus.rs b/sync/src/tests/consensus.rs index 9b01156b7..a9c26712d 100644 --- a/sync/src/tests/consensus.rs +++ b/sync/src/tests/consensus.rs @@ -15,6 +15,8 @@ // along with Parity. If not, see . use std::sync::Arc; +use hash::keccak; +use bigint::prelude::U256; use util::*; use io::{IoHandler, IoContext, IoChannel}; use ethcore::client::{BlockChainClient, Client}; @@ -56,8 +58,8 @@ fn new_tx(secret: &Secret, nonce: U256, chain_id: u64) -> PendingTransaction { #[test] fn authority_round() { - let s0 = KeyPair::from_secret_slice(&"1".sha3()).unwrap(); - let s1 = KeyPair::from_secret_slice(&"0".sha3()).unwrap(); + let s0 = KeyPair::from_secret_slice(&keccak("1")).unwrap(); + let s1 = KeyPair::from_secret_slice(&keccak("0")).unwrap(); let ap = Arc::new(AccountProvider::transient_provider()); ap.insert_account(s0.secret().clone(), "").unwrap(); ap.insert_account(s1.secret().clone(), "").unwrap(); @@ -143,8 +145,8 @@ fn authority_round() { #[test] fn tendermint() { - let s0 = KeyPair::from_secret_slice(&"1".sha3()).unwrap(); - let s1 = KeyPair::from_secret_slice(&"0".sha3()).unwrap(); + let s0 = KeyPair::from_secret_slice(&keccak("1")).unwrap(); + let s1 = KeyPair::from_secret_slice(&keccak("0")).unwrap(); let ap = Arc::new(AccountProvider::transient_provider()); ap.insert_account(s0.secret().clone(), "").unwrap(); ap.insert_account(s1.secret().clone(), "").unwrap(); diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index 3ac68b0fb..e676f5467 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -16,6 +16,8 @@ use std::collections::{VecDeque, HashSet, HashMap}; use std::sync::Arc; +use bigint::hash::H256; +use parking_lot::RwLock; use util::*; use network::*; use tests::snapshot::*; diff --git a/sync/src/tests/snapshot.rs b/sync/src/tests/snapshot.rs index 9303aa9f7..a39b3949f 100644 --- a/sync/src/tests/snapshot.rs +++ b/sync/src/tests/snapshot.rs @@ -16,6 +16,9 @@ use std::collections::HashMap; use std::sync::Arc; +use hash::keccak; +use bigint::hash::H256; +use parking_lot::Mutex; use util::*; use ethcore::snapshot::{SnapshotService, ManifestData, RestorationStatus}; use ethcore::header::BlockNumber; @@ -50,14 +53,14 @@ impl TestSnapshotService { let block_chunks: Vec = (0..num_block_chunks).map(|_| H256::random().to_vec()).collect(); let manifest = ManifestData { version: 2, - state_hashes: state_chunks.iter().map(|data| data.sha3()).collect(), - block_hashes: block_chunks.iter().map(|data| data.sha3()).collect(), + state_hashes: state_chunks.iter().map(|data| keccak(data)).collect(), + block_hashes: block_chunks.iter().map(|data| keccak(data)).collect(), state_root: H256::new(), block_number: block_number, block_hash: block_hash, }; - let mut chunks: HashMap = state_chunks.into_iter().map(|data| (data.sha3(), data)).collect(); - chunks.extend(block_chunks.into_iter().map(|data| (data.sha3(), data))); + let mut chunks: HashMap = state_chunks.into_iter().map(|data| (keccak(&data), data)).collect(); + chunks.extend(block_chunks.into_iter().map(|data| (keccak(&data), data))); TestSnapshotService { manifest: Some(manifest), chunks: chunks, diff --git a/sync/src/transactions_stats.rs b/sync/src/transactions_stats.rs index dcd2702c8..7a1257cba 100644 --- a/sync/src/transactions_stats.rs +++ b/sync/src/transactions_stats.rs @@ -16,8 +16,7 @@ use api::TransactionStats; use std::collections::{HashSet, HashMap}; -use util::{H256, H512}; -use util::hash::H256FastMap; +use bigint::hash::{H256, H512, H256FastMap}; type NodeId = H512; type BlockNumber = u64; diff --git a/updater/Cargo.toml b/updater/Cargo.toml index bb6557f6d..8dad116d0 100644 --- a/updater/Cargo.toml +++ b/updater/Cargo.toml @@ -16,7 +16,9 @@ target_info = "0.1" ethcore = { path = "../ethcore" } ethsync = { path = "../sync" } ethcore-util = { path = "../util" } +ethcore-bigint = { path = "../util/bigint" } futures = "0.1" +parking_lot = "0.4" parity-hash-fetch = { path = "../hash-fetch" } ipc-common-types = { path = "../ipc-common-types" } ethcore-ipc = { path = "../ipc/rpc" } diff --git a/updater/src/lib.rs b/updater/src/lib.rs index b27f2039e..265f8de4e 100644 --- a/updater/src/lib.rs +++ b/updater/src/lib.rs @@ -18,7 +18,9 @@ #[macro_use] extern crate log; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate ipc_common_types; +extern crate parking_lot; extern crate parity_hash_fetch as hash_fetch; extern crate ethcore; extern crate ethabi; diff --git a/updater/src/operations.rs b/updater/src/operations.rs index d94459bef..4ec53331c 100644 --- a/updater/src/operations.rs +++ b/updater/src/operations.rs @@ -5,6 +5,7 @@ use std::result::Result; use std::fmt; use ethabi; use util; +use bigint; pub struct Operations { contract: ethabi::Contract, @@ -35,7 +36,7 @@ impl Operations { /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_client","type":"bytes32"},{"name":"_release","type":"bytes32"}],"name":"isLatest","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn is_latest(&self, _client: &str, _release: &util::H256) -> Result { + pub fn is_latest(&self, _client: &str, _release: &bigint::hash::H256) -> Result { let call = self.contract.function("isLatest".into()).map_err(Self::as_string)?; let data = call.encode_call( vec![ethabi::Token::FixedBytes(_client.as_bytes().to_owned()), ethabi::Token::FixedBytes(_release.as_ref().to_owned())] @@ -47,7 +48,7 @@ impl Operations { /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_txid","type":"bytes32"}],"name":"rejectTransaction","outputs":[],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn reject_transaction(&self, _txid: &util::H256) -> Result<(), String> { + pub fn reject_transaction(&self, _txid: &bigint::hash::H256) -> Result<(), String> { let call = self.contract.function("rejectTransaction".into()).map_err(Self::as_string)?; let data = call.encode_call( vec![ethabi::Token::FixedBytes(_txid.as_ref().to_owned())] @@ -71,10 +72,10 @@ impl Operations { /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_number","type":"uint32"},{"name":"_name","type":"bytes32"},{"name":"_hard","type":"bool"},{"name":"_spec","type":"bytes32"}],"name":"proposeFork","outputs":[],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn propose_fork(&self, _number: u32, _name: &util::H256, _hard: bool, _spec: &util::H256) -> Result<(), String> { + pub fn propose_fork(&self, _number: u32, _name: &bigint::hash::H256, _hard: bool, _spec: &bigint::hash::H256) -> Result<(), String> { let call = self.contract.function("proposeFork".into()).map_err(Self::as_string)?; let data = call.encode_call( - vec![ethabi::Token::Uint({ let mut r = [0u8; 32]; util::U256::from(_number as u64).to_big_endian(&mut r); r }), ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::Bool(_hard), ethabi::Token::FixedBytes(_spec.as_ref().to_owned())] + vec![ethabi::Token::Uint({ let mut r = [0u8; 32]; bigint::prelude::U256::from(_number as u64).to_big_endian(&mut r); r }), ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::Bool(_hard), ethabi::Token::FixedBytes(_spec.as_ref().to_owned())] ).map_err(Self::as_string)?; call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; @@ -95,26 +96,26 @@ impl Operations { /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_client","type":"bytes32"},{"name":"_release","type":"bytes32"}],"name":"release","outputs":[{"name":"o_forkBlock","type":"uint32"},{"name":"o_track","type":"uint8"},{"name":"o_semver","type":"uint24"},{"name":"o_critical","type":"bool"}],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn release(&self, _client: &str, _release: &util::H256) -> Result<(u32, u8, u32, bool), String> { + pub fn release(&self, _client: &str, _release: &bigint::hash::H256) -> Result<(u32, u8, u32, bool), String> { let call = self.contract.function("release".into()).map_err(Self::as_string)?; let data = call.encode_call( vec![ethabi::Token::FixedBytes(_client.as_bytes().to_owned()), ethabi::Token::FixedBytes(_release.as_ref().to_owned())] ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()).as_u64() as u32 }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()).as_u64() as u8 }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()).as_u64() as u32 }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()).as_u64() as u32 }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()).as_u64() as u8 }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()).as_u64() as u32 }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r })) } /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_client","type":"bytes32"},{"name":"_checksum","type":"bytes32"}],"name":"build","outputs":[{"name":"o_release","type":"bytes32"},{"name":"o_platform","type":"bytes32"}],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn build(&self, _client: &str, _checksum: &util::H256) -> Result<(util::H256, util::H256), String> { + pub fn build(&self, _client: &str, _checksum: &bigint::hash::H256) -> Result<(bigint::hash::H256, bigint::hash::H256), String> { let call = self.contract.function("build".into()).map_err(Self::as_string)?; let data = call.encode_call( vec![ethabi::Token::FixedBytes(_client.as_bytes().to_owned()), ethabi::Token::FixedBytes(_checksum.as_ref().to_owned())] ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; bigint::hash::H256::from_slice(r.as_ref()) }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; bigint::hash::H256::from_slice(r.as_ref()) })) } /// Auto-generated from: `{"constant":false,"inputs":[],"name":"rejectFork","outputs":[],"payable":false,"type":"function"}` @@ -131,7 +132,7 @@ impl Operations { /// Auto-generated from: `{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"client","outputs":[{"name":"owner","type":"address"},{"name":"required","type":"bool"}],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn client(&self, _1: &util::H256) -> Result<(util::Address, bool), String> { + pub fn client(&self, _1: &bigint::hash::H256) -> Result<(util::Address, bool), String> { let call = self.contract.function("client".into()).map_err(Self::as_string)?; let data = call.encode_call( vec![ethabi::Token::FixedBytes(_1.as_ref().to_owned())] @@ -155,19 +156,19 @@ impl Operations { /// Auto-generated from: `{"constant":true,"inputs":[{"name":"","type":"uint32"}],"name":"fork","outputs":[{"name":"name","type":"bytes32"},{"name":"spec","type":"bytes32"},{"name":"hard","type":"bool"},{"name":"ratified","type":"bool"},{"name":"requiredCount","type":"uint256"}],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn fork(&self, _1: u32) -> Result<(util::H256, util::H256, bool, bool, util::U256), String> { + pub fn fork(&self, _1: u32) -> Result<(bigint::hash::H256, bigint::hash::H256, bool, bool, bigint::prelude::U256), String> { let call = self.contract.function("fork".into()).map_err(Self::as_string)?; let data = call.encode_call( - vec![ethabi::Token::Uint({ let mut r = [0u8; 32]; util::U256::from(_1 as u64).to_big_endian(&mut r); r })] + vec![ethabi::Token::Uint({ let mut r = [0u8; 32]; bigint::prelude::U256::from(_1 as u64).to_big_endian(&mut r); r })] ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; bigint::hash::H256::from_slice(r.as_ref()) }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; bigint::hash::H256::from_slice(r.as_ref()) }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()) })) } /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_release","type":"bytes32"},{"name":"_platform","type":"bytes32"},{"name":"_checksum","type":"bytes32"}],"name":"addChecksum","outputs":[],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn add_checksum(&self, _release: &util::H256, _platform: &str, _checksum: &util::H256) -> Result<(), String> { + pub fn add_checksum(&self, _release: &bigint::hash::H256, _platform: &str, _checksum: &bigint::hash::H256) -> Result<(), String> { let call = self.contract.function("addChecksum".into()).map_err(Self::as_string)?; let data = call.encode_call( vec![ethabi::Token::FixedBytes(_release.as_ref().to_owned()), ethabi::Token::FixedBytes(_platform.as_bytes().to_owned()), ethabi::Token::FixedBytes(_checksum.as_ref().to_owned())] @@ -179,26 +180,26 @@ impl Operations { /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_txid","type":"bytes32"}],"name":"confirmTransaction","outputs":[{"name":"txSuccess","type":"uint256"}],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn confirm_transaction(&self, _txid: &util::H256) -> Result { + pub fn confirm_transaction(&self, _txid: &bigint::hash::H256) -> Result { let call = self.contract.function("confirmTransaction".into()).map_err(Self::as_string)?; let data = call.encode_call( vec![ethabi::Token::FixedBytes(_txid.as_ref().to_owned())] ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()) })) } /// Auto-generated from: `{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"proxy","outputs":[{"name":"requiredCount","type":"uint256"},{"name":"to","type":"address"},{"name":"data","type":"bytes"},{"name":"value","type":"uint256"},{"name":"gas","type":"uint256"}],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn proxy(&self, _1: &util::H256) -> Result<(util::U256, util::Address, Vec, util::U256, util::U256), String> { + pub fn proxy(&self, _1: &bigint::hash::H256) -> Result<(bigint::prelude::U256, util::Address, Vec, bigint::prelude::U256, bigint::prelude::U256), String> { let call = self.contract.function("proxy".into()).map_err(Self::as_string)?; let data = call.encode_call( vec![ethabi::Token::FixedBytes(_1.as_ref().to_owned())] ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bytes().ok_or("Invalid type returned")?; r }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()) }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bytes().ok_or("Invalid type returned")?; r }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()) }, { let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()) })) } /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_client","type":"bytes32"},{"name":"_owner","type":"address"}],"name":"addClient","outputs":[],"payable":false,"type":"function"}` @@ -215,26 +216,26 @@ impl Operations { /// Auto-generated from: `{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"clientOwner","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn client_owner(&self, _1: &util::Address) -> Result { + pub fn client_owner(&self, _1: &util::Address) -> Result { let call = self.contract.function("clientOwner".into()).map_err(Self::as_string)?; let data = call.encode_call( vec![ethabi::Token::Address(_1.clone().0)] ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; bigint::hash::H256::from_slice(r.as_ref()) })) } /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_txid","type":"bytes32"},{"name":"_to","type":"address"},{"name":"_data","type":"bytes"},{"name":"_value","type":"uint256"},{"name":"_gas","type":"uint256"}],"name":"proposeTransaction","outputs":[{"name":"txSuccess","type":"uint256"}],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn propose_transaction(&self, _txid: &util::H256, _to: &util::Address, _data: &[u8], _value: util::U256, _gas: util::U256) -> Result { + pub fn propose_transaction(&self, _txid: &bigint::hash::H256, _to: &util::Address, _data: &[u8], _value: bigint::prelude::U256, _gas: bigint::prelude::U256) -> Result { let call = self.contract.function("proposeTransaction".into()).map_err(Self::as_string)?; let data = call.encode_call( vec![ethabi::Token::FixedBytes(_txid.as_ref().to_owned()), ethabi::Token::Address(_to.clone().0), ethabi::Token::Bytes(_data.to_owned()), ethabi::Token::Uint({ let mut r = [0u8; 32]; _value.to_big_endian(&mut r); r }), ethabi::Token::Uint({ let mut r = [0u8; 32]; _gas.to_big_endian(&mut r); r })] ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()) })) } /// Auto-generated from: `{"constant":true,"inputs":[],"name":"grandOwner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}` @@ -251,10 +252,10 @@ impl Operations { /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_release","type":"bytes32"},{"name":"_forkBlock","type":"uint32"},{"name":"_track","type":"uint8"},{"name":"_semver","type":"uint24"},{"name":"_critical","type":"bool"}],"name":"addRelease","outputs":[],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn add_release(&self, _release: &util::H256, _fork_block: u32, _track: u8, _semver: u32, _critical: bool) -> Result<(), String> { + pub fn add_release(&self, _release: &bigint::hash::H256, _fork_block: u32, _track: u8, _semver: u32, _critical: bool) -> Result<(), String> { let call = self.contract.function("addRelease".into()).map_err(Self::as_string)?; let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_release.as_ref().to_owned()), ethabi::Token::Uint({ let mut r = [0u8; 32]; util::U256::from(_fork_block as u64).to_big_endian(&mut r); r }), ethabi::Token::Uint({ let mut r = [0u8; 32]; util::U256::from(_track as u64).to_big_endian(&mut r); r }), ethabi::Token::Uint({ let mut r = [0u8; 32]; util::U256::from(_semver as u64).to_big_endian(&mut r); r }), ethabi::Token::Bool(_critical)] + vec![ethabi::Token::FixedBytes(_release.as_ref().to_owned()), ethabi::Token::Uint({ let mut r = [0u8; 32]; bigint::prelude::U256::from(_fork_block as u64).to_big_endian(&mut r); r }), ethabi::Token::Uint({ let mut r = [0u8; 32]; bigint::prelude::U256::from(_track as u64).to_big_endian(&mut r); r }), ethabi::Token::Uint({ let mut r = [0u8; 32]; bigint::prelude::U256::from(_semver as u64).to_big_endian(&mut r); r }), ethabi::Token::Bool(_critical)] ).map_err(Self::as_string)?; call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; @@ -282,19 +283,19 @@ impl Operations { ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()).as_u64() as u32 })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()).as_u64() as u32 })) } /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_client","type":"bytes32"},{"name":"_release","type":"bytes32"}],"name":"track","outputs":[{"name":"","type":"uint8"}],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn track(&self, _client: &str, _release: &util::H256) -> Result { + pub fn track(&self, _client: &str, _release: &bigint::hash::H256) -> Result { let call = self.contract.function("track".into()).map_err(Self::as_string)?; let data = call.encode_call( vec![ethabi::Token::FixedBytes(_client.as_bytes().to_owned()), ethabi::Token::FixedBytes(_release.as_ref().to_owned())] ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()).as_u64() as u8 })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()).as_u64() as u8 })) } /// Auto-generated from: `{"constant":false,"inputs":[{"name":"_client","type":"bytes32"},{"name":"_r","type":"bool"}],"name":"setClientRequired","outputs":[],"payable":false,"type":"function"}` @@ -318,31 +319,31 @@ impl Operations { ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()).as_u64() as u32 })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()).as_u64() as u32 })) } /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_client","type":"bytes32"},{"name":"_track","type":"uint8"}],"name":"latestInTrack","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn latest_in_track(&self, _client: &str, _track: u8) -> Result { + pub fn latest_in_track(&self, _client: &str, _track: u8) -> Result { let call = self.contract.function("latestInTrack".into()).map_err(Self::as_string)?; let data = call.encode_call( - vec![ethabi::Token::FixedBytes(_client.as_bytes().to_owned()), ethabi::Token::Uint({ let mut r = [0u8; 32]; util::U256::from(_track as u64).to_big_endian(&mut r); r })] + vec![ethabi::Token::FixedBytes(_client.as_bytes().to_owned()), ethabi::Token::Uint({ let mut r = [0u8; 32]; bigint::prelude::U256::from(_track as u64).to_big_endian(&mut r); r })] ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; bigint::hash::H256::from_slice(r.as_ref()) })) } /// Auto-generated from: `{"constant":true,"inputs":[{"name":"_client","type":"bytes32"},{"name":"_release","type":"bytes32"},{"name":"_platform","type":"bytes32"}],"name":"checksum","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}` #[allow(dead_code)] - pub fn checksum(&self, _client: &str, _release: &util::H256, _platform: &str) -> Result { + pub fn checksum(&self, _client: &str, _release: &bigint::hash::H256, _platform: &str) -> Result { let call = self.contract.function("checksum".into()).map_err(Self::as_string)?; let data = call.encode_call( vec![ethabi::Token::FixedBytes(_client.as_bytes().to_owned()), ethabi::Token::FixedBytes(_release.as_ref().to_owned()), ethabi::Token::FixedBytes(_platform.as_bytes().to_owned())] ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; bigint::hash::H256::from_slice(r.as_ref()) })) } /// Auto-generated from: `{"constant":true,"inputs":[],"name":"proposedFork","outputs":[{"name":"","type":"uint32"}],"payable":false,"type":"function"}` @@ -354,6 +355,6 @@ impl Operations { ).map_err(Self::as_string)?; let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?; let mut result = output.into_iter().rev().collect::>(); - Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()).as_u64() as u32 })) + Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; bigint::prelude::U256::from(r.as_ref()).as_u64() as u32 })) } } diff --git a/updater/src/types/all.rs b/updater/src/types/all.rs index dbb5f413a..aef1a0ce3 100644 --- a/updater/src/types/all.rs +++ b/updater/src/types/all.rs @@ -16,7 +16,7 @@ //! Types used in the public API -use util::{H256}; +use bigint::hash::H256; pub use ipc_common_types::{VersionInfo, ReleaseTrack}; /// Information regarding a particular release of Parity diff --git a/updater/src/updater.rs b/updater/src/updater.rs index 817688688..325048fab 100644 --- a/updater/src/updater.rs +++ b/updater/src/updater.rs @@ -31,7 +31,9 @@ use path::restrict_permissions_owner; use service::{Service}; use target_info::Target; use types::all::{ReleaseInfo, OperationsInfo, CapState}; -use util::{Address, H160, H256, Mutex, Bytes}; +use bigint::hash::{H160, H256}; +use util::{Address, Bytes}; +use parking_lot::Mutex; use util::misc; /// Filter for releases. diff --git a/util/Cargo.toml b/util/Cargo.toml index 0e485431e..cc684c6cd 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -11,28 +11,25 @@ build = "build.rs" log = "0.3" env_logger = "0.4" rustc-hex = "1.0" -rand = "0.3.12" -time = "0.1.34" rocksdb = { git = "https://github.com/paritytech/rust-rocksdb" } eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } -rust-crypto = "0.2.34" elastic-array = "0.9" rlp = { path = "rlp" } heapsize = "0.4" -sha3 = { path = "sha3" } +hash = { path = "hash" } clippy = { version = "0.0.103", optional = true} ethcore-devtools = { path = "../devtools" } libc = "0.2.7" -vergen = "0.1" target_info = "0.1" ethcore-bigint = { path = "bigint", features = ["heapsizeof"] } parking_lot = "0.4" -ansi_term = "0.9" tiny-keccak= "1.0" ethcore-bloom-journal = { path = "bloom" } regex = "0.2" lru-cache = "0.1.0" ethcore-logger = { path = "../logger" } +triehash = { path = "triehash" } +error-chain = "0.11.0-rc.2" [features] default = [] @@ -40,5 +37,5 @@ dev = ["clippy"] final = [] [build-dependencies] -vergen = "*" +vergen = "0.1" rustc_version = "0.1.0" diff --git a/util/benches/trie.rs b/util/benches/trie.rs index e02077ad4..fb41c0377 100644 --- a/util/benches/trie.rs +++ b/util/benches/trie.rs @@ -17,22 +17,24 @@ #![feature(test)] extern crate test; +extern crate triehash; extern crate ethcore_util; +extern crate ethcore_bigint; #[macro_use] extern crate log; +extern crate hash; use test::{Bencher, black_box}; -use ethcore_util::hash::*; +use ethcore_bigint::hash::*; use ethcore_util::bytes::*; use ethcore_util::trie::*; use ethcore_util::memorydb::*; -use ethcore_util::triehash::*; -use ethcore_util::sha3::*; - +use triehash::*; +use hash::keccak; fn random_word(alphabet: &[u8], min_count: usize, diff_count: usize, seed: &mut H256) -> Vec { assert!(min_count + diff_count <= 32); - *seed = seed.sha3(); + *seed = keccak(&seed); let r = min_count + (seed[31] as usize % (diff_count + 1)); let mut ret: Vec = Vec::with_capacity(r); for i in 0..r { @@ -43,13 +45,13 @@ fn random_word(alphabet: &[u8], min_count: usize, diff_count: usize, seed: &mut fn random_bytes(min_count: usize, diff_count: usize, seed: &mut H256) -> Vec { assert!(min_count + diff_count <= 32); - *seed = seed.sha3(); + *seed = keccak(&seed); let r = min_count + (seed[31] as usize % (diff_count + 1)); seed[0..r].to_vec() } fn random_value(seed: &mut H256) -> Bytes { - *seed = seed.sha3(); + *seed = keccak(&seed); match seed[0] % 2 { 1 => vec![seed[31];1], _ => seed.to_vec(), @@ -306,11 +308,11 @@ fn triehash_insertions_six_low(b: &mut Bencher) { } #[bench] -fn sha3x10000(b: &mut Bencher) { +fn keccakx10000(b: &mut Bencher) { b.iter(||{ let mut seed = H256::new(); for _ in 0..10000 { - seed = seed.sha3() + seed = keccak(&seed); } }) } diff --git a/util/bloom/src/lib.rs b/util/bloom/src/lib.rs index 4fdb61d40..22a2cbc2a 100644 --- a/util/bloom/src/lib.rs +++ b/util/bloom/src/lib.rs @@ -245,14 +245,33 @@ mod tests { } #[test] - fn hash_backward_compatibility() { + fn hash_backward_compatibility_for_new() { let ss = vec!["you", "should", "not", "break", "hash", "backward", "compatibility"]; let mut bloom = Bloom::new(16, 8); for s in ss.iter() { bloom.set(&s); } + let drained_elems: HashSet = bloom.drain_journal().entries.into_iter().map(|t| t.1).collect(); let expected: HashSet = [2094615114573771027u64, 244675582389208413u64].iter().cloned().collect(); assert_eq!(drained_elems, expected); + assert_eq!(bloom.k_num, 12); + } + + #[test] + fn hash_backward_compatibility_for_from_parts() { + let stored_state = vec![2094615114573771027u64, 244675582389208413u64]; + let k_num = 12; + let bloom = Bloom::from_parts(&stored_state, k_num); + + let ss = vec!["you", "should", "not", "break", "hash", "backward", "compatibility"]; + let tt = vec!["this", "doesnot", "exist"]; + for s in ss.iter() { + assert!(bloom.check(&s)); + } + for s in tt.iter() { + assert!(!bloom.check(&s)); + } + } } diff --git a/util/bloomable/Cargo.toml b/util/bloomable/Cargo.toml index f85b67943..94ca4856b 100644 --- a/util/bloomable/Cargo.toml +++ b/util/bloomable/Cargo.toml @@ -7,4 +7,4 @@ authors = ["debris "] ethcore-bigint = { path = "../bigint" } [dev-dependencies] -tiny-keccak = "1.3" +hash = { path = "../hash" } diff --git a/util/bloomable/tests/test.rs b/util/bloomable/tests/test.rs index 85ced83e6..f3fa908e2 100644 --- a/util/bloomable/tests/test.rs +++ b/util/bloomable/tests/test.rs @@ -1,14 +1,10 @@ -extern crate tiny_keccak; +extern crate hash; extern crate ethcore_bigint; extern crate bloomable; use ethcore_bigint::hash::{H160, H256, H2048}; use bloomable::Bloomable; -use tiny_keccak::keccak256; - -fn sha3(input: &[u8]) -> H256 { - keccak256(input).into() -} +use hash::keccak; #[test] fn shift_bloomed() { @@ -17,15 +13,15 @@ fn shift_bloomed() { let topic: H256 = "02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc".into(); let mut my_bloom = H2048::default(); - assert!(!my_bloom.contains_bloomed(&sha3(&address))); - assert!(!my_bloom.contains_bloomed(&sha3(&topic))); + assert!(!my_bloom.contains_bloomed(&keccak(&address))); + assert!(!my_bloom.contains_bloomed(&keccak(&topic))); - my_bloom.shift_bloomed(&sha3(&address)); - assert!(my_bloom.contains_bloomed(&sha3(&address))); - assert!(!my_bloom.contains_bloomed(&sha3(&topic))); + my_bloom.shift_bloomed(&keccak(&address)); + assert!(my_bloom.contains_bloomed(&keccak(&address))); + assert!(!my_bloom.contains_bloomed(&keccak(&topic))); - my_bloom.shift_bloomed(&sha3(&topic)); + my_bloom.shift_bloomed(&keccak(&topic)); assert_eq!(my_bloom, bloom); - assert!(my_bloom.contains_bloomed(&sha3(&address))); - assert!(my_bloom.contains_bloomed(&sha3(&topic))); + assert!(my_bloom.contains_bloomed(&keccak(&address))); + assert!(my_bloom.contains_bloomed(&keccak(&topic))); } diff --git a/util/sha3/Cargo.toml b/util/hash/Cargo.toml similarity index 65% rename from util/sha3/Cargo.toml rename to util/hash/Cargo.toml index 8f8d6d160..91a195468 100644 --- a/util/sha3/Cargo.toml +++ b/util/hash/Cargo.toml @@ -2,10 +2,17 @@ description = "Rust bindings for tinykeccak C library" homepage = "http://parity.io" license = "GPL-3.0" -name = "sha3" +name = "hash" version = "0.1.0" authors = ["Parity Technologies "] build = "build.rs" +[dependencies] +ethcore-bigint = { path = "../bigint" } +tiny-keccak = "1.3" + [build-dependencies] gcc = "0.3" + +[dev-dependencies] +tempdir = "0.3" diff --git a/util/sha3/build.rs b/util/hash/build.rs similarity index 100% rename from util/sha3/build.rs rename to util/hash/build.rs diff --git a/util/hash/src/lib.rs b/util/hash/src/lib.rs new file mode 100644 index 000000000..91d3b4bf9 --- /dev/null +++ b/util/hash/src/lib.rs @@ -0,0 +1,110 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate ethcore_bigint as bigint; +extern crate tiny_keccak; + +use std::io; +use tiny_keccak::Keccak; +pub use bigint::hash::H256; + +/// Get the KECCAK (i.e. Keccak) hash of the empty bytes string. +pub const KECCAK_EMPTY: H256 = H256( [0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70] ); + +/// The KECCAK of the RLP encoding of empty data. +pub const KECCAK_NULL_RLP: H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] ); + +/// The KECCAK of the RLP encoding of empty list. +pub const KECCAK_EMPTY_LIST_RLP: H256 = H256( [0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47] ); + +extern { + /// Hashes input. Returns -1 if either out or input does not exist. Otherwise returns 0. + pub fn keccak_256(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) -> i32; + /// Hashes input. Returns -1 if either out or input does not exist. Otherwise returns 0. + pub fn keccak_512(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) -> i32; +} + +pub fn keccak>(s: T) -> H256 { + let mut result = [0u8; 32]; + write_keccak(s, &mut result); + H256(result) +} + +pub fn write_keccak>(s: T, dest: &mut [u8]) { + let input = s.as_ref(); + unsafe { + // we can safely ignore keccak_256 output, cause we know that both input + // and dest are properly allocated + keccak_256(dest.as_mut_ptr(), dest.len(), input.as_ptr(), input.len()); + } +} + +pub fn keccak_buffer(r: &mut io::BufRead) -> Result { + let mut output = [0u8; 32]; + let mut input = [0u8; 1024]; + let mut keccak = Keccak::new_keccak256(); + + // read file + loop { + let some = r.read(&mut input)?; + if some == 0 { + break; + } + keccak.update(&input[0..some]); + } + + keccak.finalize(&mut output); + Ok(output.into()) +} + +#[cfg(test)] +mod tests { + extern crate tempdir; + + use std::fs; + use std::io::{Write, BufReader}; + use self::tempdir::TempDir; + use super::{keccak, keccak_buffer, KECCAK_EMPTY}; + + #[test] + fn keccak_empty() { + assert_eq!(keccak([0u8; 0]), KECCAK_EMPTY); + } + #[test] + fn keccak_as() { + assert_eq!(keccak([0x41u8; 32]), From::from("59cad5948673622c1d64e2322488bf01619f7ff45789741b15a9f782ce9290a8")); + } + + #[test] + fn should_keccak_a_file() { + // given + let tempdir = TempDir::new("keccak").unwrap(); + let mut path = tempdir.path().to_owned(); + path.push("should_keccak_a_file"); + // Prepare file + { + let mut file = fs::File::create(&path).unwrap(); + file.write_all(b"something").unwrap(); + } + + let mut file = BufReader::new(fs::File::open(&path).unwrap()); + // when + let hash = keccak_buffer(&mut file).unwrap(); + + // then + assert_eq!(format!("{:?}", hash), "68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87"); + } +} diff --git a/util/sha3/src/tinykeccak.c b/util/hash/src/tinykeccak.c similarity index 92% rename from util/sha3/src/tinykeccak.c rename to util/hash/src/tinykeccak.c index 1f93ce2db..bfe172e5f 100644 --- a/util/sha3/src/tinykeccak.c +++ b/util/hash/src/tinykeccak.c @@ -13,15 +13,15 @@ #define decshake(bits) \ int shake##bits(uint8_t*, size_t, const uint8_t*, size_t); -#define decsha3(bits) \ - int sha3_##bits(uint8_t*, size_t, const uint8_t*, size_t); +#define deckeccak(bits) \ + int keccak_##bits(uint8_t*, size_t, const uint8_t*, size_t); decshake(128) decshake(256) -decsha3(224) -decsha3(256) -decsha3(384) -decsha3(512) +deckeccak(224) +deckeccak(256) +deckeccak(384) +deckeccak(512) /******** The Keccak-f[1600] permutation ********/ @@ -154,8 +154,8 @@ static inline int hash(uint8_t* out, size_t outlen, const uint8_t* in, size_t inlen) { \ return hash(out, outlen, in, inlen, 200 - (bits / 4), 0x1f); \ } -#define defsha3(bits) \ - int sha3_##bits(uint8_t* out, size_t outlen, \ +#define defkeccak(bits) \ + int keccak_##bits(uint8_t* out, size_t outlen, \ const uint8_t* in, size_t inlen) { \ if (outlen > (bits/8)) { \ return -1; \ @@ -168,10 +168,10 @@ defshake(128) defshake(256) /*** FIPS202 SHA3 FOFs ***/ -defsha3(224) -defsha3(256) -defsha3(384) -defsha3(512) +defkeccak(224) +defkeccak(256) +defkeccak(384) +defkeccak(512) diff --git a/util/network/Cargo.toml b/util/network/Cargo.toml index 2d571d632..f8d1e673b 100644 --- a/util/network/Cargo.toml +++ b/util/network/Cargo.toml @@ -24,6 +24,7 @@ rustc-hex = "1.0" rustc-serialize = "0.3" ethcore-io = { path = "../io" } ethcore-util = { path = ".." } +ethcore-bigint = { path = "../bigint" } ethcore-devtools = { path = "../../devtools" } ethkey = { path = "../../ethkey" } ethcrypto = { path = "../../ethcrypto" } @@ -31,6 +32,7 @@ rlp = { path = "../rlp" } path = { path = "../path" } ethcore-logger = { path ="../../logger" } ipnetwork = "0.12.6" +hash = { path = "../hash" } [features] default = [] diff --git a/util/network/src/connection.rs b/util/network/src/connection.rs index a9cb05c00..f30d46838 100644 --- a/util/network/src/connection.rs +++ b/util/network/src/connection.rs @@ -18,11 +18,11 @@ use std::sync::Arc; use std::collections::VecDeque; use std::net::SocketAddr; use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; +use hash::{keccak, write_keccak}; use mio::{Token, Ready, PollOpt}; use mio::deprecated::{Handler, EventLoop, TryRead, TryWrite}; use mio::tcp::*; -use util::hash::*; -use util::sha3::*; +use bigint::hash::*; use util::bytes::*; use rlp::*; use std::io::{self, Cursor, Read, Write}; @@ -312,16 +312,16 @@ impl EncryptedConnection { } let mut key_material = H512::new(); shared.copy_to(&mut key_material[0..32]); - nonce_material.sha3_into(&mut key_material[32..64]); - key_material.sha3().copy_to(&mut key_material[32..64]); - key_material.sha3().copy_to(&mut key_material[32..64]); + write_keccak(&nonce_material, &mut key_material[32..64]); + keccak(&key_material).copy_to(&mut key_material[32..64]); + keccak(&key_material).copy_to(&mut key_material[32..64]); let iv = vec![0u8; 16]; let encoder = CtrMode::new(AesSafe256Encryptor::new(&key_material[32..64]), iv); let iv = vec![0u8; 16]; let decoder = CtrMode::new(AesSafe256Encryptor::new(&key_material[32..64]), iv); - key_material.sha3().copy_to(&mut key_material[32..64]); + keccak(&key_material).copy_to(&mut key_material[32..64]); let mac_encoder = EcbEncryptor::new(AesSafe256Encryptor::new(&key_material[32..64]), NoPadding); let mut egress_mac = Keccak::new_keccak256(); @@ -481,7 +481,7 @@ impl EncryptedConnection { #[test] pub fn test_encryption() { - use util::hash::*; + use bigint::hash::*; use std::str::FromStr; let key = H256::from_str("2212767d793a7a3d66f869ae324dd11bd17044b82c9f463b8a541a4d089efec5").unwrap(); let before = H128::from_str("12532abaec065082a3cf1da7d0136f15").unwrap(); diff --git a/util/network/src/discovery.rs b/util/network/src/discovery.rs index 3dcd8548d..205f101bf 100644 --- a/util/network/src/discovery.rs +++ b/util/network/src/discovery.rs @@ -22,9 +22,9 @@ use std::default::Default; use mio::*; use mio::deprecated::{Handler, EventLoop}; use mio::udp::*; -use util::sha3::*; +use hash::keccak; use time; -use util::hash::*; +use bigint::hash::*; use rlp::*; use node_table::*; use error::NetworkError; @@ -112,7 +112,7 @@ impl Discovery { let socket = UdpSocket::bind(&listen).expect("Error binding UDP socket"); Discovery { id: key.public().clone(), - id_hash: key.public().sha3(), + id_hash: keccak(key.public()), secret: key.secret().clone(), public_endpoint: public, token: token, @@ -154,7 +154,7 @@ impl Discovery { fn update_node(&mut self, e: NodeEntry) { trace!(target: "discovery", "Inserting {:?}", &e); - let id_hash = e.id.sha3(); + let id_hash = keccak(e.id); let ping = { let mut bucket = &mut self.node_buckets[Discovery::distance(&self.id_hash, &id_hash) as usize]; let updated = if let Some(node) = bucket.nodes.iter_mut().find(|n| n.address.id == e.id) { @@ -180,7 +180,7 @@ impl Discovery { } fn clear_ping(&mut self, id: &NodeId) { - let mut bucket = &mut self.node_buckets[Discovery::distance(&self.id_hash, &id.sha3()) as usize]; + let mut bucket = &mut self.node_buckets[Discovery::distance(&self.id_hash, &keccak(id)) as usize]; if let Some(node) = bucket.nodes.iter_mut().find(|n| &n.address.id == id) { node.timeout = None; } @@ -264,7 +264,7 @@ impl Discovery { rlp.append(×tamp); let bytes = rlp.drain(); - let hash = bytes.as_ref().sha3(); + let hash = keccak(bytes.as_ref()); let signature = match sign(&self.secret, &hash) { Ok(s) => s, Err(_) => { @@ -276,7 +276,7 @@ impl Discovery { packet.extend(hash.iter()); packet.extend(signature.iter()); packet.extend(bytes.iter()); - let signed_hash = (&packet[32..]).sha3(); + let signed_hash = keccak(&packet[32..]); packet[0..32].clone_from_slice(&signed_hash); self.send_to(packet, address.clone()); } @@ -285,7 +285,7 @@ impl Discovery { fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec { let mut found: BTreeMap> = BTreeMap::new(); let mut count = 0; - let target_hash = target.sha3(); + let target_hash = keccak(target); // Sort nodes by distance to target for bucket in buckets { @@ -368,14 +368,14 @@ impl Discovery { return Err(NetworkError::BadProtocol); } - let hash_signed = (&packet[32..]).sha3(); + let hash_signed = keccak(&packet[32..]); if hash_signed[..] != packet[0..32] { return Err(NetworkError::BadProtocol); } let signed = &packet[(32 + 65)..]; let signature = H520::from_slice(&packet[32..(32 + 65)]); - let node_id = recover(&signature.into(), &signed.sha3())?; + let node_id = recover(&signature.into(), &keccak(signed))?; let packet_id = signed[0]; let rlp = UntrustedRlp::new(&signed[1..]); @@ -419,7 +419,7 @@ impl Discovery { self.update_node(entry.clone()); added_map.insert(node.clone(), entry); } - let hash = rlp.as_raw().sha3(); + let hash = keccak(rlp.as_raw()); let mut response = RlpStream::new_list(2); dest.to_rlp_list(&mut response); response.append(&hash); @@ -636,7 +636,7 @@ mod tests { buckets[0].nodes.push_back(BucketEntry { address: NodeEntry { id: NodeId::new(), endpoint: ep.clone() }, timeout: None, - id_hash: NodeId::new().sha3(), + id_hash: keccak(NodeId::new()), }); } let nearest = Discovery::nearest_node_entries(&NodeId::new(), &buckets); diff --git a/util/network/src/error.rs b/util/network/src/error.rs index 11cf8fdf0..54773d573 100644 --- a/util/network/src/error.rs +++ b/util/network/src/error.rs @@ -16,7 +16,6 @@ use io::IoError; use rlp::*; -use util::UtilError; use std::fmt; use ethkey::Error as KeyError; use crypto::Error as CryptoError; @@ -96,8 +95,8 @@ pub enum NetworkError { PeerNotFound, /// Peer is diconnected. Disconnect(DisconnectReason), - /// Util error. - Util(UtilError), + /// Invalid NodeId + InvalidNodeId, /// Socket IO error. Io(IoError), /// Error concerning the network address parsing subsystem. @@ -125,7 +124,7 @@ impl fmt::Display for NetworkError { AddressResolve(Some(ref err)) => format!("{}", err), AddressResolve(_) => "Failed to resolve network address.".into(), StdIo(ref err) => format!("{}", err), - Util(ref err) => format!("{}", err), + InvalidNodeId => "Invalid node id".into(), OversizedPacket => "Packet is too large".into(), }; @@ -151,12 +150,6 @@ impl From for NetworkError { } } -impl From for NetworkError { - fn from(err: UtilError) -> NetworkError { - NetworkError::Util(err) - } -} - impl From for NetworkError { fn from(_err: KeyError) -> Self { NetworkError::Auth diff --git a/util/network/src/handshake.rs b/util/network/src/handshake.rs index f5c4e118a..40ad57d6f 100644 --- a/util/network/src/handshake.rs +++ b/util/network/src/handshake.rs @@ -16,9 +16,9 @@ use std::sync::Arc; use rand::random; +use hash::write_keccak; use mio::tcp::*; -use util::hash::*; -use util::sha3::Hashable; +use bigint::hash::*; use util::bytes::Bytes; use rlp::*; use connection::{Connection}; @@ -273,7 +273,7 @@ impl Handshake { // E(remote-pubk, S(ecdhe-random, ecdh-shared-secret^nonce) || H(ecdhe-random-pubk) || pubk || nonce || 0x0) let shared = *ecdh::agree(secret, &self.id)?; sig.copy_from_slice(&*sign(self.ecdhe.secret(), &(&shared ^ &self.nonce))?); - self.ecdhe.public().sha3_into(hepubk); + write_keccak(self.ecdhe.public(), hepubk); pubk.copy_from_slice(public); nonce.copy_from_slice(&self.nonce); } @@ -333,7 +333,7 @@ mod test { use std::sync::Arc; use rustc_hex::FromHex; use super::*; - use util::hash::H256; + use bigint::hash::H256; use io::*; use mio::tcp::TcpStream; use stats::NetworkStats; diff --git a/util/network/src/host.rs b/util/network/src/host.rs index d74b2fa6e..13e5f74a3 100644 --- a/util/network/src/host.rs +++ b/util/network/src/host.rs @@ -25,11 +25,11 @@ use std::path::{Path, PathBuf}; use std::io::{Read, Write, ErrorKind}; use std::fs; use ethkey::{KeyPair, Secret, Random, Generator}; +use hash::keccak; use mio::*; use mio::deprecated::{EventLoop}; use mio::tcp::*; -use util::hash::*; -use util::Hashable; +use bigint::hash::*; use util::version; use rlp::*; use session::{Session, SessionInfo, SessionData}; @@ -354,8 +354,8 @@ impl HostInfo { /// Increments and returns connection nonce. pub fn next_nonce(&mut self) -> H256 { - self.nonce = self.nonce.sha3(); - self.nonce.clone() + self.nonce = keccak(&self.nonce); + self.nonce } } @@ -694,7 +694,7 @@ impl Host { let max_handshakes_per_round = max_handshakes / 2; let mut started: usize = 0; - for id in nodes.filter(|id| + for id in nodes.filter(|id| !self.have_session(id) && !self.connecting_to(id) && *id != self_id && diff --git a/util/network/src/lib.rs b/util/network/src/lib.rs index 5695b8196..beece7ad9 100644 --- a/util/network/src/lib.rs +++ b/util/network/src/lib.rs @@ -59,6 +59,7 @@ extern crate ethcore_io as io; extern crate ethcore_util as util; +extern crate ethcore_bigint as bigint; extern crate parking_lot; extern crate mio; extern crate tiny_keccak; @@ -78,6 +79,7 @@ extern crate bytes; extern crate path; extern crate ethcore_logger; extern crate ipnetwork; +extern crate hash; #[macro_use] extern crate log; @@ -156,7 +158,7 @@ pub struct IpFilter { pub predefined: AllowIP, pub custom_allow: Vec, pub custom_block: Vec, -} +} impl Default for IpFilter { fn default() -> Self { diff --git a/util/network/src/node_table.rs b/util/network/src/node_table.rs index f9d4e9589..1225c750c 100644 --- a/util/network/src/node_table.rs +++ b/util/network/src/node_table.rs @@ -25,8 +25,7 @@ use std::path::{PathBuf}; use std::fmt; use std::fs; use std::io::{Read, Write}; -use util::hash::*; -use util::UtilError; +use bigint::hash::*; use rlp::*; use time::Tm; use error::NetworkError; @@ -58,7 +57,7 @@ impl NodeEndpoint { pub fn is_allowed(&self, filter: &IpFilter) -> bool { (self.is_allowed_by_predefined(&filter.predefined) || filter.custom_allow.iter().any(|ipnet| { self.address.ip().is_within(ipnet) - })) + })) && !filter.custom_block.iter().any(|ipnet| { self.address.ip().is_within(ipnet) }) @@ -175,7 +174,7 @@ impl FromStr for Node { type Err = NetworkError; fn from_str(s: &str) -> Result { let (id, endpoint) = if s.len() > 136 && &s[0..8] == "enode://" && &s[136..137] == "@" { - (s[8..136].parse().map_err(UtilError::from)?, NodeEndpoint::from_str(&s[137..])?) + (s[8..136].parse().map_err(|_| NetworkError::InvalidNodeId)?, NodeEndpoint::from_str(&s[137..])?) } else { (NodeId::new(), NodeEndpoint::from_str(s)?) @@ -373,7 +372,7 @@ pub fn is_valid_node_url(url: &str) -> bool { mod tests { use super::*; use std::net::{SocketAddr, SocketAddrV4, Ipv4Addr}; - use util::H512; + use bigint::hash::H512; use std::str::FromStr; use devtools::*; use ipnetwork::IpNetwork; diff --git a/util/network/src/session.rs b/util/network/src/session.rs index 8affb4cf7..dbdf065eb 100644 --- a/util/network/src/session.rs +++ b/util/network/src/session.rs @@ -23,7 +23,7 @@ use std::collections::HashMap; use mio::*; use mio::deprecated::{Handler, EventLoop}; use mio::tcp::*; -use util::hash::*; +use bigint::hash::*; use rlp::*; use connection::{EncryptedConnection, Packet, Connection}; use handshake::Handshake; diff --git a/util/network/src/tests.rs b/util/network/src/tests.rs index d743318ab..f6c8ed1e8 100644 --- a/util/network/src/tests.rs +++ b/util/network/src/tests.rs @@ -19,7 +19,8 @@ use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; use std::sync::Arc; use std::thread; use std::time::*; -use util::{Bytes, Mutex}; +use parking_lot::Mutex; +use util::Bytes; use io::TimerToken; use ethkey::{Random, Generator}; diff --git a/util/rlp/src/common.rs b/util/rlp/src/common.rs index 07c75cf98..1f4e2c517 100644 --- a/util/rlp/src/common.rs +++ b/util/rlp/src/common.rs @@ -21,16 +21,16 @@ lazy_static! { } static EMPTY_RLPS: &'static [&'static [u8]] = &[ - // RLP of SHA3_NULL_RLP + // RLP of KECCAK_NULL_RLP &[160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33], - // RLP of SHA3_EMPTY + // RLP of KECCAK_EMPTY &[160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112] ]; static COMMON_RLPS: &'static [&'static [u8]] = &[ - // RLP of SHA3_NULL_RLP + // RLP of KECCAK_NULL_RLP &[160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33], - // RLP of SHA3_EMPTY + // RLP of KECCAK_EMPTY &[160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112], // Other RLPs found in blocks DB using the test below. &[160, 29, 204, 77, 232, 222, 199, 93, 122, 171, 133, 181, 103, 182, 204, 212, 26, 211, 18, 69, 27, 148, 138, 116, 19, 240, 161, 66, 253, 64, 212, 147, 71], diff --git a/util/semantic_version/Cargo.toml b/util/semantic_version/Cargo.toml new file mode 100644 index 000000000..5cd888d0a --- /dev/null +++ b/util/semantic_version/Cargo.toml @@ -0,0 +1,6 @@ +[package] +name = "semantic_version" +version = "0.1.0" +authors = ["Parity Technologies "] + +[dependencies] diff --git a/util/src/semantic_version.rs b/util/semantic_version/src/lib.rs similarity index 95% rename from util/src/semantic_version.rs rename to util/semantic_version/src/lib.rs index fe4b8727f..0a8eeb499 100644 --- a/util/src/semantic_version.rs +++ b/util/semantic_version/src/lib.rs @@ -20,8 +20,8 @@ /// /// # Example /// ``` -/// extern crate ethcore_util as util; -/// use util::semantic_version::*; +/// extern crate semantic_version; +/// use semantic_version::*; /// /// fn main() { /// assert_eq!(SemanticVersion::new(1, 2, 3).as_u32(), 0x010203); diff --git a/util/src/error.rs b/util/src/error.rs index b0e887434..59496f86f 100644 --- a/util/src/error.rs +++ b/util/src/error.rs @@ -16,9 +16,12 @@ //! General error types for use in ethcore. +#![allow(missing_docs)] +#![allow(unknown_lints)] + +use std::{self, fmt}; use rustc_hex::FromHexError; use rlp::DecoderError; -use std::fmt; use bigint::hash::H256; #[derive(Debug)] @@ -41,124 +44,23 @@ impl fmt::Display for BaseDataError { } } -#[derive(Debug)] -/// General error type which should be capable of representing all errors in ethcore. -pub enum UtilError { - /// Error concerning the Rust standard library's IO subsystem. - StdIo(::std::io::Error), - /// Error concerning the hex conversion logic. - FromHex(FromHexError), - /// Error concerning the database abstraction logic. - BaseData(BaseDataError), - /// Error concerning the RLP decoder. - Decoder(DecoderError), - /// Miscellaneous error described by a string. - SimpleString(String), - /// Error from a bad input size being given for the needed output. - BadSize, - /// Error from snappy. - Snappy(::snappy::InvalidInput), -} - -impl fmt::Display for UtilError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - UtilError::StdIo(ref err) => f.write_fmt(format_args!("{}", err)), - UtilError::FromHex(ref err) => f.write_fmt(format_args!("{}", err)), - UtilError::BaseData(ref err) => f.write_fmt(format_args!("{}", err)), - UtilError::Decoder(ref err) => f.write_fmt(format_args!("{}", err)), - UtilError::SimpleString(ref msg) => f.write_str(msg), - UtilError::BadSize => f.write_str("Bad input size."), - UtilError::Snappy(ref err) => f.write_fmt(format_args!("{}", err)), - } +impl std::error::Error for BaseDataError { + fn description(&self) -> &str { + "Error in database subsystem" } } -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -/// Error indicating an expected value was not found. -pub struct Mismatch { - /// Value expected. - pub expected: T, - /// Value found. - pub found: T, -} +error_chain! { + types { + UtilError, ErrorKind, ResultExt, Result; + } -impl fmt::Display for Mismatch { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_fmt(format_args!("Expected {}, found {}", self.expected, self.found)) + foreign_links { + Io(::std::io::Error); + FromHex(FromHexError); + Decoder(DecoderError); + Snappy(::snappy::InvalidInput); + BaseData(BaseDataError); } } -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -/// Error indicating value found is outside of a valid range. -pub struct OutOfBounds { - /// Minimum allowed value. - pub min: Option, - /// Maximum allowed value. - pub max: Option, - /// Value found. - pub found: T, -} - -impl fmt::Display for OutOfBounds { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let msg = match (self.min.as_ref(), self.max.as_ref()) { - (Some(min), Some(max)) => format!("Min={}, Max={}", min, max), - (Some(min), _) => format!("Min={}", min), - (_, Some(max)) => format!("Max={}", max), - (None, None) => "".into(), - }; - - f.write_fmt(format_args!("Value {} out of bounds. {}", self.found, msg)) - } -} - -impl From for UtilError { - fn from(err: FromHexError) -> UtilError { - UtilError::FromHex(err) - } -} - -impl From for UtilError { - fn from(err: BaseDataError) -> UtilError { - UtilError::BaseData(err) - } -} - -impl From<::std::io::Error> for UtilError { - fn from(err: ::std::io::Error) -> UtilError { - UtilError::StdIo(err) - } -} - -impl From<::rlp::DecoderError> for UtilError { - fn from(err: ::rlp::DecoderError) -> UtilError { - UtilError::Decoder(err) - } -} - -impl From for UtilError { - fn from(err: String) -> UtilError { - UtilError::SimpleString(err) - } -} - -impl From<::snappy::InvalidInput> for UtilError { - fn from(err: ::snappy::InvalidInput) -> UtilError { - UtilError::Snappy(err) - } -} - -// TODO: uncomment below once https://github.com/rust-lang/rust/issues/27336 sorted. -/*#![feature(concat_idents)] -macro_rules! assimilate { - ($name:ident) => ( - impl From for Error { - fn from(err: concat_idents!($name, Error)) -> Error { - Error:: $name (err) - } - } - ) -} -assimilate!(FromHex); -assimilate!(BaseData);*/ diff --git a/util/src/hashdb.rs b/util/src/hashdb.rs index ca4120702..3c149a1a3 100644 --- a/util/src/hashdb.rs +++ b/util/src/hashdb.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . //! Database of byte-slices keyed to their Keccak hash. -use hash::*; +use bigint::hash::*; use std::collections::HashMap; use elastic_array::ElasticArray128; @@ -48,14 +48,15 @@ pub trait HashDB: AsHashDB + Send + Sync { /// /// # Examples /// ```rust + /// extern crate hash; /// extern crate ethcore_util; /// use ethcore_util::hashdb::*; /// use ethcore_util::memorydb::*; - /// use ethcore_util::sha3::*; + /// use hash::keccak; /// fn main() { /// let mut m = MemoryDB::new(); /// let hello_bytes = "Hello world!".as_bytes(); - /// assert!(!m.contains(&hello_bytes.sha3())); + /// assert!(!m.contains(&keccak(hello_bytes))); /// let key = m.insert(hello_bytes); /// assert!(m.contains(&key)); /// m.remove(&key); @@ -71,9 +72,10 @@ pub trait HashDB: AsHashDB + Send + Sync { /// # Examples /// ```rust /// extern crate ethcore_util; + /// extern crate ethcore_bigint; /// use ethcore_util::hashdb::*; /// use ethcore_util::memorydb::*; - /// use ethcore_util::hash::*; + /// use ethcore_bigint::hash::*; /// fn main() { /// let mut m = MemoryDB::new(); /// let key = m.insert("Hello world!".as_bytes()); @@ -91,13 +93,14 @@ pub trait HashDB: AsHashDB + Send + Sync { /// # Examples /// ```rust /// extern crate ethcore_util; + /// extern crate hash; /// use ethcore_util::hashdb::*; /// use ethcore_util::memorydb::*; - /// use ethcore_util::sha3::*; + /// use hash::keccak; /// fn main() { /// let mut m = MemoryDB::new(); /// let d = "Hello world!".as_bytes(); - /// let key = &d.sha3(); + /// let key = &keccak(d); /// m.remove(key); // OK - we now owe an insertion. /// assert!(!m.contains(key)); /// m.remove(key); // OK - we now owe two insertions. diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index 8c87ef623..e77c908d3 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -25,7 +25,9 @@ use memorydb::*; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; use super::traits::JournalDB; use kvdb::{KeyValueDB, DBTransaction}; -use {Bytes, H256, BaseDataError, UtilError}; +use bigint::hash::H256; +use error::{BaseDataError, UtilError}; +use {Bytes}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. @@ -205,11 +207,12 @@ mod tests { #![cfg_attr(feature="dev", allow(similar_names))] use std::path::Path; + use keccak::keccak; use hashdb::{HashDB, DBValue}; use super::*; use journaldb::traits::JournalDB; use kvdb::Database; - use {Hashable, H32}; + use bigint::hash::H32; #[test] fn insert_same_in_fork() { @@ -217,18 +220,18 @@ mod tests { let mut jdb = ArchiveDB::new_temp(); let x = jdb.insert(b"X"); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); - jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); - jdb.commit_batch(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); - jdb.commit_batch(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + jdb.commit_batch(3, &keccak(b"1002a"), Some((1, keccak(b"1")))).unwrap(); + jdb.commit_batch(4, &keccak(b"1003a"), Some((2, keccak(b"2")))).unwrap(); jdb.remove(&x); - jdb.commit_batch(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"1002b"), Some((1, keccak(b"1")))).unwrap(); let x = jdb.insert(b"X"); - jdb.commit_batch(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"1003b"), Some((2, keccak(b"2")))).unwrap(); - jdb.commit_batch(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); - jdb.commit_batch(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))).unwrap(); + jdb.commit_batch(6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))).unwrap(); assert!(jdb.contains(&x)); } @@ -238,16 +241,16 @@ mod tests { // history is 3 let mut jdb = ArchiveDB::new_temp(); let h = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h)); jdb.remove(&h); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.contains(&h)); - jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); assert!(jdb.contains(&h)); - jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&h)); - jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&h)); } @@ -256,13 +259,13 @@ mod tests { fn multiple_owed_removal_not_allowed() { let mut jdb = ArchiveDB::new_temp(); let h = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h)); jdb.remove(&h); jdb.remove(&h); // commit_batch would call journal_under(), // and we don't allow multiple owned removals. - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); } #[test] @@ -272,29 +275,29 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.remove(&foo); jdb.remove(&bar); let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); assert!(jdb.contains(&baz)); let foo = jdb.insert(b"foo"); jdb.remove(&baz); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&baz)); jdb.remove(&foo); - jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo)); - jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); } #[test] @@ -304,22 +307,22 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); jdb.remove(&bar); - jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); assert!(jdb.contains(&baz)); - jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.contains(&foo)); } @@ -329,16 +332,16 @@ mod tests { let mut jdb = ArchiveDB::new_temp(); let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo)); jdb.remove(&foo); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); jdb.insert(b"foo"); assert!(jdb.contains(&foo)); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo)); - jdb.commit_batch(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"2"), Some((0, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo)); } @@ -346,16 +349,16 @@ mod tests { fn fork_same_key() { // history is 1 let mut jdb = ArchiveDB::new_temp(); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo)); - jdb.commit_batch(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2a"), Some((1, keccak(b"1a")))).unwrap(); assert!(jdb.contains(&foo)); } @@ -375,21 +378,21 @@ mod tests { // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); foo }; { let mut jdb = new_db(&dir); jdb.remove(&foo); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); } { let mut jdb = new_db(&dir); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); } } @@ -402,24 +405,24 @@ mod tests { let mut jdb = new_db(&dir); // history is 1 let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); // foo is ancient history. jdb.insert(b"foo"); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); foo }; { let mut jdb = new_db(&dir); jdb.remove(&foo); - jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo)); jdb.remove(&foo); - jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); - jdb.commit_batch(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); + jdb.commit_batch(5, &keccak(b"5"), Some((4, keccak(b"4")))).unwrap(); } } @@ -432,19 +435,19 @@ mod tests { // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); jdb.remove(&bar); - jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); (foo, bar, baz) }; { let mut jdb = new_db(&dir); - jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.contains(&foo)); } } @@ -456,7 +459,7 @@ mod tests { let key = { let mut jdb = new_db(temp.as_path().as_path()); let key = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); key }; diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index 0d0681a03..2502fe219 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -28,7 +28,9 @@ use memorydb::*; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; use super::traits::JournalDB; use kvdb::{KeyValueDB, DBTransaction}; -use {H256, BaseDataError, UtilError, Bytes}; +use bigint::hash::H256; +use error::{BaseDataError, UtilError}; +use {Bytes}; #[derive(Clone, PartialEq, Eq)] struct RefInfo { @@ -158,16 +160,14 @@ impl EarlyMergeDB { backing.get(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() } - fn insert_keys(inserts: &[(H256, DBValue)], backing: &KeyValueDB, col: Option, refs: &mut HashMap, batch: &mut DBTransaction, trace: bool) { + fn insert_keys(inserts: &[(H256, DBValue)], backing: &KeyValueDB, col: Option, refs: &mut HashMap, batch: &mut DBTransaction) { for &(ref h, ref d) in inserts { match refs.entry(*h) { Entry::Occupied(mut entry) => { let info = entry.get_mut(); // already counting. increment. info.queue_refs += 1; - if trace { - trace!(target: "jdb.fine", " insert({}): In queue: Incrementing refs to {}", h, info.queue_refs); - } + trace!(target: "jdb.fine", " insert({}): In queue: Incrementing refs to {}", h, info.queue_refs); }, Entry::Vacant(entry) => { // this is the first entry for this node in the journal. @@ -175,16 +175,12 @@ impl EarlyMergeDB { if in_archive { // already in the backing DB. start counting, and remember it was already in. Self::set_already_in(batch, col, h); - if trace { - trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h); - } + trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h); } else { // Gets removed when a key leaves the journal, so should never be set when we're placing a new key. //Self::reset_already_in(&h); assert!(!Self::is_already_in(backing, col, h)); - if trace { - trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h); - } + trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h); batch.put(col, h, d); } entry.insert(RefInfo { @@ -217,7 +213,7 @@ impl EarlyMergeDB { trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs); } - fn remove_keys(deletes: &[H256], refs: &mut HashMap, batch: &mut DBTransaction, col: Option, from: RemoveFrom, trace: bool) { + fn remove_keys(deletes: &[H256], refs: &mut HashMap, batch: &mut DBTransaction, col: Option, from: RemoveFrom) { // with a remove on {queue_refs: 1, in_archive: true}, we have two options: // - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive) // - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue) @@ -230,16 +226,12 @@ impl EarlyMergeDB { if entry.get().in_archive && from == RemoveFrom::Archive { entry.get_mut().in_archive = false; Self::reset_already_in(batch, col, h); - if trace { - trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Reducing to queue only and recording", h); - } + trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Reducing to queue only and recording", h); continue; } if entry.get().queue_refs > 1 { entry.get_mut().queue_refs -= 1; - if trace { - trace!(target: "jdb.fine", " remove({}): In queue > 1 refs: Decrementing ref count to {}", h, entry.get().queue_refs); - } + trace!(target: "jdb.fine", " remove({}): In queue > 1 refs: Decrementing ref count to {}", h, entry.get().queue_refs); continue; } @@ -250,16 +242,12 @@ impl EarlyMergeDB { (1, true) => { entry.remove(); Self::reset_already_in(batch, col, h); - if trace { - trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Removing from queue and leaving in archive", h); - } + trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Removing from queue and leaving in archive", h); }, (1, false) => { entry.remove(); batch.delete(col, h); - if trace { - trace!(target: "jdb.fine", " remove({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h); - } + trace!(target: "jdb.fine", " remove({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h); }, _ => panic!("Invalid value in refs: {:?}", entry.get()), } @@ -268,9 +256,7 @@ impl EarlyMergeDB { // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. //assert!(!Self::is_already_in(db, &h)); batch.delete(col, h); - if trace { - trace!(target: "jdb.fine", " remove({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h); - } + trace!(target: "jdb.fine", " remove({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h); }, } } @@ -400,8 +386,6 @@ impl JournalDB for EarlyMergeDB { } fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result { - let trace = false; - // record new commit's details. let mut refs = match self.refs.as_ref() { Some(refs) => refs.write(), @@ -425,9 +409,7 @@ impl JournalDB for EarlyMergeDB { let drained = self.overlay.drain(); - if trace { - trace!(target: "jdb", "commit: #{} ({})", now, id); - } + trace!(target: "jdb", "commit: #{} ({})", now, id); let removes: Vec = drained .iter() @@ -455,14 +437,12 @@ impl JournalDB for EarlyMergeDB { r.append(&k); } r.append_list(&removes); - Self::insert_keys(&inserts, &*self.backing, self.column, &mut refs, batch, trace); + Self::insert_keys(&inserts, &*self.backing, self.column, &mut refs, batch); let ins = inserts.iter().map(|&(k, _)| k).collect::>(); - if trace { - trace!(target: "jdb.ops", " Deletes: {:?}", removes); - trace!(target: "jdb.ops", " Inserts: {:?}", ins); - } + trace!(target: "jdb.ops", " Deletes: {:?}", removes); + trace!(target: "jdb.ops", " Inserts: {:?}", ins); batch.put(self.column, &last, r.as_raw()); if self.latest_era.map_or(true, |e| now > e) { @@ -476,8 +456,6 @@ impl JournalDB for EarlyMergeDB { #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> Result { - let trace = false; - let mut refs = self.refs.as_ref().unwrap().write(); // apply old commits' details @@ -499,9 +477,9 @@ impl JournalDB for EarlyMergeDB { // Collect keys to be removed. Canon block - remove the (enacted) deletes. let deletes: Vec = rlp.list_at(2); trace!(target: "jdb.ops", " Expunging: {:?}", deletes); - Self::remove_keys(&deletes, &mut refs, batch, self.column, RemoveFrom::Archive, trace); + Self::remove_keys(&deletes, &mut refs, batch, self.column, RemoveFrom::Archive); - trace!(target: "jdb.ops", " Finalising: {:?}", inserts); + trace!(target: "jdb.ops", " Finalising: {:?}", inserts); for k in &inserts { match refs.get(k).cloned() { None => { @@ -527,7 +505,7 @@ impl JournalDB for EarlyMergeDB { } else { // Collect keys to be removed. Non-canon block - remove the (reverted) inserts. trace!(target: "jdb.ops", " Reverting: {:?}", inserts); - Self::remove_keys(&inserts, &mut refs, batch, self.column, RemoveFrom::Queue, trace); + Self::remove_keys(&inserts, &mut refs, batch, self.column, RemoveFrom::Queue); } batch.delete(self.column, &last); @@ -535,7 +513,7 @@ impl JournalDB for EarlyMergeDB { } trace!(target: "jdb", "EarlyMergeDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); - trace!(target: "jdb", "OK: {:?}", refs.clone()); + trace!(target: "jdb", "OK: {:?}", &*refs); Ok(0) } @@ -577,12 +555,13 @@ mod tests { #![cfg_attr(feature="dev", allow(similar_names))] use std::path::Path; + use keccak::keccak; use hashdb::{HashDB, DBValue}; use super::*; use super::super::traits::JournalDB; use ethcore_logger::init_log; use kvdb::{DatabaseConfig}; - use {Hashable, H32}; + use bigint::hash::H32; #[test] fn insert_same_in_fork() { @@ -590,25 +569,25 @@ mod tests { let mut jdb = EarlyMergeDB::new_temp(); let x = jdb.insert(b"X"); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"1002a"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"1003a"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&x); - jdb.commit_batch(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"1002b"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); let x = jdb.insert(b"X"); - jdb.commit_batch(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"1003b"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + jdb.commit_batch(6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&x)); @@ -618,17 +597,17 @@ mod tests { fn insert_older_era() { let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0a".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let bar = jdb.insert(b"bar"); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); - jdb.commit_batch(0, &b"0b".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); @@ -639,20 +618,20 @@ mod tests { // history is 3 let mut jdb = EarlyMergeDB::new_temp(); let h = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); jdb.remove(&h); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); - jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); - jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); - jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&h)); } @@ -664,7 +643,7 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); @@ -672,7 +651,7 @@ mod tests { jdb.remove(&foo); jdb.remove(&bar); let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); @@ -680,20 +659,20 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.remove(&baz); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(jdb.contains(&baz)); jdb.remove(&foo); - jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(!jdb.contains(&baz)); - jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); assert!(!jdb.contains(&bar)); @@ -707,25 +686,25 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); - jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); assert!(jdb.contains(&baz)); - jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&baz)); @@ -738,19 +717,19 @@ mod tests { let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); jdb.remove(&foo); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); assert!(jdb.contains(&foo)); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); - jdb.commit_batch(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"2"), Some((0, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); } @@ -759,24 +738,24 @@ mod tests { fn fork_same_key_one() { let mut jdb = EarlyMergeDB::new_temp(); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); - jdb.commit_batch(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2a"), Some((1, keccak(b"1a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); } @@ -784,24 +763,24 @@ mod tests { #[test] fn fork_same_key_other() { let mut jdb = EarlyMergeDB::new_temp(); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); - jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); } @@ -809,33 +788,33 @@ mod tests { #[test] fn fork_ins_del_ins() { let mut jdb = EarlyMergeDB::new_temp(); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3a"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3b"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4a"), Some((2, keccak(b"2a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"5a"), Some((3, keccak(b"3a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -856,7 +835,7 @@ mod tests { // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); foo }; @@ -864,7 +843,7 @@ mod tests { { let mut jdb = new_db(&dir); jdb.remove(&foo); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -872,7 +851,7 @@ mod tests { let mut jdb = new_db(&dir); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); } @@ -886,22 +865,22 @@ mod tests { // history is 4 let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(3, &b"3".sha3(), None).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // expunge foo - jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -912,43 +891,43 @@ mod tests { // history is 4 let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(1, &b"1a".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(1, &b"1b".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(2, &b"2a".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(2, &b"2b".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(3, &b"3a".sha3(), None).unwrap(); + jdb.commit_batch(3, &keccak(b"3a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(3, &b"3b".sha3(), None).unwrap(); + jdb.commit_batch(3, &keccak(b"3b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // expunge foo - jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -958,25 +937,25 @@ mod tests { // history is 1 let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.remove(&foo); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); // BROKEN assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); jdb.remove(&foo); - jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"5"), Some((4, keccak(b"4")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); } @@ -987,30 +966,30 @@ mod tests { // history is 4 let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(3, &b"3".sha3(), None).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); jdb.remove(&bar); - jdb.commit_batch(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(6, &keccak(b"6"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); jdb.insert(b"bar"); - jdb.commit_batch(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(7, &keccak(b"7"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -1021,26 +1000,26 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let foo = b"foo".sha3(); + let foo = keccak(b"foo"); { let mut jdb = new_db(&dir); // history is 1 jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.remove(&foo); - jdb.commit_batch(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); jdb.insert(b"foo"); - jdb.commit_batch(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); @@ -1049,7 +1028,7 @@ mod tests { let mut jdb = new_db(&dir); jdb.remove(&foo); - jdb.commit_batch(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); @@ -1057,7 +1036,7 @@ mod tests { }; { let mut jdb = new_db(&dir); - jdb.commit_batch(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); @@ -1065,7 +1044,7 @@ mod tests { }; { let mut jdb = new_db(&dir); - jdb.commit_batch(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); } @@ -1080,22 +1059,22 @@ mod tests { // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); - jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); (foo, bar, baz) }; { let mut jdb = new_db(&dir); - jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&baz)); diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs index 975abd8e8..c450e53d8 100644 --- a/util/src/journaldb/overlayrecentdb.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -27,7 +27,9 @@ use memorydb::*; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; use kvdb::{KeyValueDB, DBTransaction}; use super::JournalDB; -use {H256, BaseDataError, UtilError, Bytes, H256FastMap}; +use bigint::hash::{H256, H256FastMap}; +use error::{BaseDataError, UtilError}; +use {Bytes}; /// Implementation of the `JournalDB` trait for a disk-backed database with a memory overlay /// and, possibly, latent-removal semantics. @@ -461,12 +463,13 @@ mod tests { #![cfg_attr(feature="dev", allow(similar_names))] use std::path::Path; + use keccak::keccak; use super::*; use hashdb::{HashDB, DBValue}; use ethcore_logger::init_log; use journaldb::JournalDB; use kvdb::Database; - use {H32, Hashable}; + use bigint::hash::H32; fn new_db(path: &Path) -> OverlayRecentDB { let backing = Arc::new(Database::open_default(path.to_str().unwrap()).unwrap()); @@ -479,25 +482,25 @@ mod tests { let mut jdb = OverlayRecentDB::new_temp(); let x = jdb.insert(b"X"); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"1002a"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"1003a"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&x); - jdb.commit_batch(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"1002b"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); let x = jdb.insert(b"X"); - jdb.commit_batch(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"1003b"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + jdb.commit_batch(6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&x)); @@ -508,20 +511,20 @@ mod tests { // history is 3 let mut jdb = OverlayRecentDB::new_temp(); let h = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); jdb.remove(&h); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); - jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); - jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h)); - jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&h)); } @@ -533,7 +536,7 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); @@ -541,7 +544,7 @@ mod tests { jdb.remove(&foo); jdb.remove(&bar); let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); @@ -549,20 +552,20 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.remove(&baz); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(jdb.contains(&baz)); jdb.remove(&foo); - jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(!jdb.contains(&baz)); - jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); assert!(!jdb.contains(&bar)); @@ -576,25 +579,25 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); - jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); assert!(jdb.contains(&baz)); - jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&baz)); @@ -607,19 +610,19 @@ mod tests { let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); jdb.remove(&foo); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); assert!(jdb.contains(&foo)); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); - jdb.commit_batch(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"2"), Some((0, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); } @@ -627,24 +630,24 @@ mod tests { #[test] fn fork_same_key_one() { let mut jdb = OverlayRecentDB::new_temp(); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); - jdb.commit_batch(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2a"), Some((1, keccak(b"1a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); } @@ -653,24 +656,24 @@ mod tests { fn fork_same_key_other() { let mut jdb = OverlayRecentDB::new_temp(); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); - jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); } @@ -679,33 +682,33 @@ mod tests { fn fork_ins_del_ins() { let mut jdb = OverlayRecentDB::new_temp(); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3a"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3b"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4a"), Some((2, keccak(b"2a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"5a"), Some((3, keccak(b"3a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -720,7 +723,7 @@ mod tests { // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); foo }; @@ -728,7 +731,7 @@ mod tests { { let mut jdb = new_db(&dir); jdb.remove(&foo); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -736,7 +739,7 @@ mod tests { let mut jdb = new_db(&dir); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); } @@ -749,22 +752,22 @@ mod tests { // history is 4 let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(3, &b"3".sha3(), None).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // expunge foo - jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -775,43 +778,43 @@ mod tests { // history is 4 let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(1, &b"1a".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(1, &b"1b".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(2, &b"2a".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(2, &b"2b".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(3, &b"3a".sha3(), None).unwrap(); + jdb.commit_batch(3, &keccak(b"3a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); - jdb.commit_batch(3, &b"3b".sha3(), None).unwrap(); + jdb.commit_batch(3, &keccak(b"3b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // expunge foo - jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -820,25 +823,25 @@ mod tests { let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.remove(&foo); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); - jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); // BROKEN assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); jdb.remove(&foo); - jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"5"), Some((4, keccak(b"4")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); } @@ -848,30 +851,30 @@ mod tests { let mut jdb = OverlayRecentDB::new_temp(); // history is 4 let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(3, &b"3".sha3(), None).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); jdb.remove(&bar); - jdb.commit_batch(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(6, &keccak(b"6"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); jdb.insert(b"bar"); - jdb.commit_batch(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(7, &keccak(b"7"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -882,26 +885,26 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let foo = b"foo".sha3(); + let foo = keccak(b"foo"); { let mut jdb = new_db(&dir); // history is 1 jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.remove(&foo); - jdb.commit_batch(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); jdb.insert(b"foo"); - jdb.commit_batch(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); @@ -910,7 +913,7 @@ mod tests { let mut jdb = new_db(&dir); jdb.remove(&foo); - jdb.commit_batch(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); @@ -918,7 +921,7 @@ mod tests { }; { let mut jdb = new_db(&dir); - jdb.commit_batch(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); @@ -926,7 +929,7 @@ mod tests { }; { let mut jdb = new_db(&dir); - jdb.commit_batch(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo)); } @@ -941,22 +944,22 @@ mod tests { // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); - jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); (foo, bar, baz) }; { let mut jdb = new_db(&dir); - jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&baz)); @@ -968,17 +971,17 @@ mod tests { fn insert_older_era() { let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0a".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let bar = jdb.insert(b"bar"); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); - jdb.commit_batch(0, &b"0b".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); @@ -1010,28 +1013,28 @@ mod tests { // single journalled era. let _key = jdb.insert(b"hello!"); let mut batch = jdb.backing().transaction(); - jdb.journal_under(&mut batch, 0, &b"0".sha3()).unwrap(); + jdb.journal_under(&mut batch, 0, &keccak(b"0")).unwrap(); jdb.backing().write_buffered(batch); assert_eq!(jdb.earliest_era(), Some(0)); // second journalled era. let mut batch = jdb.backing().transaction(); - jdb.journal_under(&mut batch, 1, &b"1".sha3()).unwrap(); + jdb.journal_under(&mut batch, 1, &keccak(b"1")).unwrap(); jdb.backing().write_buffered(batch); assert_eq!(jdb.earliest_era(), Some(0)); // single journalled era. let mut batch = jdb.backing().transaction(); - jdb.mark_canonical(&mut batch, 0, &b"0".sha3()).unwrap(); + jdb.mark_canonical(&mut batch, 0, &keccak(b"0")).unwrap(); jdb.backing().write_buffered(batch); assert_eq!(jdb.earliest_era(), Some(1)); // no journalled eras. let mut batch = jdb.backing().transaction(); - jdb.mark_canonical(&mut batch, 1, &b"1".sha3()).unwrap(); + jdb.mark_canonical(&mut batch, 1, &keccak(b"1")).unwrap(); jdb.backing().write_buffered(batch); assert_eq!(jdb.earliest_era(), Some(1)); diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs index 1b9c9ded8..7b970d640 100644 --- a/util/src/journaldb/refcounteddb.rs +++ b/util/src/journaldb/refcounteddb.rs @@ -26,7 +26,8 @@ use memorydb::MemoryDB; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; use super::traits::JournalDB; use kvdb::{KeyValueDB, DBTransaction}; -use {UtilError, H256, Bytes}; +use bigint::hash::H256; +use {UtilError, Bytes}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. @@ -213,26 +214,26 @@ mod tests { #![cfg_attr(feature="dev", allow(blacklisted_name))] #![cfg_attr(feature="dev", allow(similar_names))] + use keccak::keccak; use hashdb::{HashDB, DBValue}; use super::*; use super::super::traits::JournalDB; - use {Hashable}; #[test] fn long_history() { // history is 3 let mut jdb = RefCountedDB::new_temp(); let h = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h)); jdb.remove(&h); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert!(jdb.contains(&h)); - jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); assert!(jdb.contains(&h)); - jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&h)); - jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(!jdb.contains(&h)); } @@ -242,16 +243,16 @@ mod tests { let mut jdb = RefCountedDB::new_temp(); assert_eq!(jdb.latest_era(), None); let h = jdb.insert(b"foo"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(0)); jdb.remove(&h); - jdb.commit_batch(1, &b"1".sha3(), None).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(1)); - jdb.commit_batch(2, &b"2".sha3(), None).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(2)); - jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert_eq!(jdb.latest_era(), Some(3)); - jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert_eq!(jdb.latest_era(), Some(4)); } @@ -262,32 +263,32 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.remove(&foo); jdb.remove(&bar); let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); assert!(jdb.contains(&baz)); let foo = jdb.insert(b"foo"); jdb.remove(&baz); - jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(jdb.contains(&baz)); jdb.remove(&foo); - jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(!jdb.contains(&baz)); - jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(!jdb.contains(&foo)); assert!(!jdb.contains(&bar)); assert!(!jdb.contains(&baz)); @@ -300,22 +301,22 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); - jdb.commit_batch(0, &b"0".sha3(), None).unwrap(); + jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); - jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); jdb.remove(&bar); - jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); assert!(jdb.contains(&baz)); - jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.contains(&foo)); assert!(!jdb.contains(&baz)); assert!(!jdb.contains(&bar)); diff --git a/util/src/journaldb/traits.rs b/util/src/journaldb/traits.rs index 1f14e9765..b70bbdcd9 100644 --- a/util/src/journaldb/traits.rs +++ b/util/src/journaldb/traits.rs @@ -19,7 +19,8 @@ use std::sync::Arc; use hashdb::*; use kvdb::{self, DBTransaction}; -use {Bytes, H256, UtilError}; +use bigint::hash::H256; +use {Bytes, UtilError}; /// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually /// exclusive actions. diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 44321c531..1e2510c0a 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -279,7 +279,7 @@ impl KeyValueDB for InMemory { } fn restore(&self, _new_db: &str) -> Result<(), UtilError> { - Err(UtilError::SimpleString("Attempted to restore in-memory database".into())) + Err("Attempted to restore in-memory database".into()) } } @@ -902,7 +902,7 @@ impl Drop for Database { #[cfg(test)] mod tests { - use hash::H256; + use bigint::hash::H256; use super::*; use devtools::*; use std::str::FromStr; diff --git a/util/src/lib.rs b/util/src/lib.rs index 54d624a65..00399b823 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -88,25 +88,25 @@ //! ``` extern crate rustc_hex; -extern crate rand; extern crate rocksdb; extern crate env_logger; -extern crate crypto as rcrypto; extern crate secp256k1; extern crate elastic_array; -extern crate time; extern crate ethcore_devtools as devtools; extern crate libc; extern crate target_info; extern crate ethcore_bigint as bigint; extern crate parking_lot; -extern crate ansi_term; extern crate tiny_keccak; extern crate rlp; extern crate regex; extern crate lru_cache; extern crate heapsize; extern crate ethcore_logger; +extern crate hash as keccak; + +#[macro_use] +extern crate error_chain; #[macro_use] extern crate log as rlog; @@ -116,42 +116,27 @@ pub mod common; pub mod error; pub mod bytes; pub mod misc; -pub mod vector; -pub mod sha3; pub mod hashdb; pub mod memorydb; pub mod migration; pub mod overlaydb; pub mod journaldb; pub mod kvdb; -pub mod triehash; pub mod trie; pub mod nibbleslice; pub mod nibblevec; -pub mod semantic_version; pub mod snappy; pub mod cache; -mod timer; pub use misc::*; pub use hashdb::*; pub use memorydb::MemoryDB; pub use overlaydb::*; pub use journaldb::JournalDB; -pub use triehash::*; pub use trie::{Trie, TrieMut, TrieDB, TrieDBMut, TrieFactory, TrieError, SecTrieDB, SecTrieDBMut}; -pub use semantic_version::*; pub use kvdb::*; -pub use timer::*; -pub use error::*; +pub use error::UtilError; pub use bytes::*; -pub use vector::*; -pub use sha3::*; -pub use bigint::prelude::*; -pub use bigint::hash; - -pub use ansi_term::{Colour, Style}; -pub use parking_lot::{Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; /// 160-bit integer representing account address -pub type Address = H160; +pub type Address = bigint::hash::H160; diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 6c9cb63fe..bffb7ea29 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -20,9 +20,9 @@ use std::mem; use std::collections::HashMap; use std::collections::hash_map::Entry; use heapsize::HeapSizeOf; -use hash::{H256FastMap, H256}; +use bigint::hash::{H256FastMap, H256}; use rlp::NULL_RLP; -use sha3::*; +use keccak::{KECCAK_NULL_RLP, keccak}; use hashdb::*; /// Reference-counted memory-based `HashDB` implementation. @@ -117,7 +117,7 @@ impl MemoryDB { /// Even when Some is returned, the data is only guaranteed to be useful /// when the refs > 0. pub fn raw(&self, key: &H256) -> Option<(DBValue, i32)> { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return Some((DBValue::from_slice(&NULL_RLP), 1)); } self.data.get(key).cloned() @@ -131,7 +131,7 @@ impl MemoryDB { /// Remove an element and delete it from storage if reference count reaches zero. /// If the value was purged, return the old value. pub fn remove_and_purge(&mut self, key: &H256) -> Option { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return None; } match self.data.entry(key.clone()) { @@ -170,7 +170,7 @@ impl MemoryDB { impl HashDB for MemoryDB { fn get(&self, key: &H256) -> Option { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return Some(DBValue::from_slice(&NULL_RLP)); } @@ -191,7 +191,7 @@ impl HashDB for MemoryDB { } fn contains(&self, key: &H256) -> bool { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return true; } @@ -203,9 +203,9 @@ impl HashDB for MemoryDB { fn insert(&mut self, value: &[u8]) -> H256 { if value == &NULL_RLP { - return SHA3_NULL_RLP.clone(); + return KECCAK_NULL_RLP.clone(); } - let key = value.sha3(); + let key = keccak(value); match self.data.entry(key) { Entry::Occupied(mut entry) => { let &mut (ref mut old_value, ref mut rc) = entry.get_mut(); @@ -241,7 +241,7 @@ impl HashDB for MemoryDB { } fn remove(&mut self, key: &H256) { - if key == &SHA3_NULL_RLP { + if key == &KECCAK_NULL_RLP { return; } @@ -259,12 +259,13 @@ impl HashDB for MemoryDB { #[cfg(test)] mod tests { + use keccak::keccak; use super::*; #[test] fn memorydb_remove_and_purge() { let hello_bytes = b"Hello world!"; - let hello_key = hello_bytes.sha3(); + let hello_key = keccak(hello_bytes); let mut m = MemoryDB::new(); m.remove(&hello_key); diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 80cc499b9..b4c0beb25 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -19,8 +19,8 @@ use std::sync::Arc; use std::collections::HashMap; use std::collections::hash_map::Entry; -use error::*; -use hash::*; +use error::{Result, BaseDataError}; +use bigint::hash::*; use rlp::*; use hashdb::*; use memorydb::*; @@ -56,14 +56,14 @@ impl OverlayDB { /// Commit all operations in a single batch. #[cfg(test)] - pub fn commit(&mut self) -> Result { + pub fn commit(&mut self) -> Result { let mut batch = self.backing.transaction(); let res = self.commit_to_batch(&mut batch)?; self.backing.write(batch).map(|_| res).map_err(|e| e.into()) } /// Commit all operations to given batch. - pub fn commit_to_batch(&mut self, batch: &mut DBTransaction) -> Result { + pub fn commit_to_batch(&mut self, batch: &mut DBTransaction) -> Result { let mut ret = 0u32; let mut deletes = 0usize; for i in self.overlay.drain() { diff --git a/util/src/sha3.rs b/util/src/sha3.rs deleted file mode 100644 index 8adcfbfee..000000000 --- a/util/src/sha3.rs +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Wrapper around tiny-keccak crate as well as common hash constants. -extern crate sha3 as sha3_ext; - -use std::io; -use tiny_keccak::Keccak; -use hash::H256; -use self::sha3_ext::*; - -/// Get the SHA3 (i.e. Keccak) hash of the empty bytes string. -pub const SHA3_EMPTY: H256 = H256( [0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70] ); - -/// The SHA3 of the RLP encoding of empty data. -pub const SHA3_NULL_RLP: H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] ); - -/// The SHA3 of the RLP encoding of empty list. -pub const SHA3_EMPTY_LIST_RLP: H256 = H256( [0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47] ); - -/// Types implementing this trait are sha3able. -/// -/// ``` -/// extern crate ethcore_util as util; -/// use std::str::FromStr; -/// use util::sha3::*; -/// use util::hash::*; -/// -/// fn main() { -/// assert_eq!([0u8; 0].sha3(), H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap()); -/// } -/// ``` -pub trait Hashable { - /// Calculate SHA3 of this object. - fn sha3(&self) -> H256; - - /// Calculate SHA3 of this object and place result into dest. - fn sha3_into(&self, dest: &mut [u8]) { - self.sha3().copy_to(dest); - } -} - -impl Hashable for T where T: AsRef<[u8]> { - fn sha3(&self) -> H256 { - let mut ret: H256 = H256::zero(); - self.sha3_into(&mut *ret); - ret - } - fn sha3_into(&self, dest: &mut [u8]) { - let input: &[u8] = self.as_ref(); - - unsafe { - sha3_256(dest.as_mut_ptr(), dest.len(), input.as_ptr(), input.len()); - } - } -} - -/// Calculate SHA3 of given stream. -pub fn sha3(r: &mut io::BufRead) -> Result { - let mut output = [0u8; 32]; - let mut input = [0u8; 1024]; - let mut sha3 = Keccak::new_keccak256(); - - // read file - loop { - let some = r.read(&mut input)?; - if some == 0 { - break; - } - sha3.update(&input[0..some]); - } - - sha3.finalize(&mut output); - Ok(output.into()) -} - -#[cfg(test)] -mod tests { - use std::fs; - use std::io::{Write, BufReader}; - use super::*; - - #[test] - fn sha3_empty() { - assert_eq!([0u8; 0].sha3(), SHA3_EMPTY); - } - #[test] - fn sha3_as() { - assert_eq!([0x41u8; 32].sha3(), From::from("59cad5948673622c1d64e2322488bf01619f7ff45789741b15a9f782ce9290a8")); - } - - #[test] - fn should_sha3_a_file() { - // given - use devtools::RandomTempPath; - let path = RandomTempPath::new(); - // Prepare file - { - let mut file = fs::File::create(&path).unwrap(); - file.write_all(b"something").unwrap(); - } - - let mut file = BufReader::new(fs::File::open(&path).unwrap()); - // when - let hash = sha3(&mut file).unwrap(); - - // then - assert_eq!(format!("{:?}", hash), "68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87"); - } -} diff --git a/util/src/snappy.rs b/util/src/snappy.rs index fd15bd307..bfb68129a 100644 --- a/util/src/snappy.rs +++ b/util/src/snappy.rs @@ -16,7 +16,7 @@ //! Snappy compression bindings. -use std::fmt; +use std::{self, fmt}; use libc::{c_char, c_int, size_t}; const SNAPPY_OK: c_int = 0; @@ -56,6 +56,12 @@ extern { #[derive(Debug)] pub struct InvalidInput; +impl std::error::Error for InvalidInput { + fn description(&self) -> &str { + "Attempted snappy decompression with invalid input" + } +} + impl fmt::Display for InvalidInput { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Attempted snappy decompression with invalid input") diff --git a/util/src/trie/fatdb.rs b/util/src/trie/fatdb.rs index f10e6d4bf..0793c1ba5 100644 --- a/util/src/trie/fatdb.rs +++ b/util/src/trie/fatdb.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use hash::H256; -use sha3::Hashable; +use bigint::hash::H256; +use keccak::keccak; use hashdb::HashDB; use super::{TrieDB, Trie, TrieDBIterator, TrieItem, TrieIterator, Query}; @@ -55,13 +55,13 @@ impl<'db> Trie for FatDB<'db> { } fn contains(&self, key: &[u8]) -> super::Result { - self.raw.contains(&key.sha3()) + self.raw.contains(&keccak(key)) } fn get_with<'a, 'key, Q: Query>(&'a self, key: &'key [u8], query: Q) -> super::Result> where 'a: 'key { - self.raw.get_with(&key.sha3(), query) + self.raw.get_with(&keccak(key), query) } } @@ -83,7 +83,7 @@ impl<'db> FatDBIterator<'db> { impl<'db> TrieIterator for FatDBIterator<'db> { fn seek(&mut self, key: &[u8]) -> super::Result<()> { - self.trie_iterator.seek(&key.sha3()) + self.trie_iterator.seek(&keccak(key)) } } @@ -94,7 +94,7 @@ impl<'db> Iterator for FatDBIterator<'db> { self.trie_iterator.next() .map(|res| res.map(|(hash, value)| { - let aux_hash = hash.sha3(); + let aux_hash = keccak(hash); (self.trie.db().get(&aux_hash).expect("Missing fatdb hash").into_vec(), value) }) ) diff --git a/util/src/trie/fatdbmut.rs b/util/src/trie/fatdbmut.rs index 0f5d6f47e..8267585f9 100644 --- a/util/src/trie/fatdbmut.rs +++ b/util/src/trie/fatdbmut.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use hash::H256; -use sha3::Hashable; +use bigint::hash::H256; +use keccak::keccak; use hashdb::{HashDB, DBValue}; use super::{TrieDBMut, TrieMut}; @@ -53,7 +53,7 @@ impl<'db> FatDBMut<'db> { } fn to_aux_key(key: &[u8]) -> H256 { - key.sha3() + keccak(key) } } @@ -67,17 +67,17 @@ impl<'db> TrieMut for FatDBMut<'db> { } fn contains(&self, key: &[u8]) -> super::Result { - self.raw.contains(&key.sha3()) + self.raw.contains(&keccak(key)) } fn get<'a, 'key>(&'a self, key: &'key [u8]) -> super::Result> where 'a: 'key { - self.raw.get(&key.sha3()) + self.raw.get(&keccak(key)) } fn insert(&mut self, key: &[u8], value: &[u8]) -> super::Result> { - let hash = key.sha3(); + let hash = keccak(key); let out = self.raw.insert(&hash, value)?; let db = self.raw.db_mut(); @@ -89,7 +89,7 @@ impl<'db> TrieMut for FatDBMut<'db> { } fn remove(&mut self, key: &[u8]) -> super::Result> { - let hash = key.sha3(); + let hash = keccak(key); let out = self.raw.remove(&hash)?; // don't remove if it already exists. @@ -114,5 +114,5 @@ fn fatdb_to_trie() { t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); } let t = TrieDB::new(&memdb, &root).unwrap(); - assert_eq!(t.get(&(&[0x01u8, 0x23]).sha3()).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); + assert_eq!(t.get(&keccak(&[0x01u8, 0x23])).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); } diff --git a/util/src/trie/lookup.rs b/util/src/trie/lookup.rs index d24a82e16..168cd00a3 100644 --- a/util/src/trie/lookup.rs +++ b/util/src/trie/lookup.rs @@ -19,7 +19,7 @@ use hashdb::HashDB; use nibbleslice::NibbleSlice; use rlp::Rlp; -use ::{H256}; +use bigint::hash::H256; use super::{TrieError, Query}; use super::node::Node; diff --git a/util/src/trie/mod.rs b/util/src/trie/mod.rs index cae980505..457cc13e8 100644 --- a/util/src/trie/mod.rs +++ b/util/src/trie/mod.rs @@ -17,7 +17,8 @@ //! Trie interface and implementation. use std::fmt; -use hash::H256; +use bigint::hash::H256; +use keccak::KECCAK_NULL_RLP; use hashdb::{HashDB, DBValue}; /// Export the standardmap module. @@ -123,7 +124,7 @@ pub trait Trie { fn root(&self) -> &H256; /// Is the trie empty? - fn is_empty(&self) -> bool { *self.root() == ::sha3::SHA3_NULL_RLP } + fn is_empty(&self) -> bool { *self.root() == KECCAK_NULL_RLP } /// Does the trie contain a given key? fn contains(&self, key: &[u8]) -> Result { diff --git a/util/src/trie/recorder.rs b/util/src/trie/recorder.rs index 8d79b4315..f2b7fd91f 100644 --- a/util/src/trie/recorder.rs +++ b/util/src/trie/recorder.rs @@ -16,8 +16,9 @@ //! Trie query recorder. -use sha3::Hashable; -use {Bytes, H256}; +use keccak::keccak; +use bigint::hash::H256; +use Bytes; /// A record of a visited node. #[derive(PartialEq, Eq, Debug, Clone)] @@ -62,7 +63,7 @@ impl Recorder { /// Record a visited node, given its hash, data, and depth. pub fn record(&mut self, hash: &H256, data: &[u8], depth: u32) { - debug_assert_eq!(data.sha3(), *hash); + debug_assert_eq!(keccak(data), *hash); if depth >= self.min_depth { self.nodes.push(Record { @@ -82,8 +83,7 @@ impl Recorder { #[cfg(test)] mod tests { use super::*; - use sha3::Hashable; - use ::H256; + use bigint::hash::H256; #[test] fn basic_recorder() { @@ -92,7 +92,7 @@ mod tests { let node1 = vec![1, 2, 3, 4]; let node2 = vec![4, 5, 6, 7, 8, 9, 10]; - let (hash1, hash2) = (node1.sha3(), node2.sha3()); + let (hash1, hash2) = (keccak(&node1), keccak(&node2)); basic.record(&hash1, &node1, 0); basic.record(&hash2, &node2, 456); @@ -118,8 +118,8 @@ mod tests { let node1 = vec![1, 2, 3, 4]; let node2 = vec![4, 5, 6, 7, 8, 9, 10]; - let hash1 = node1.sha3(); - let hash2 = node2.sha3(); + let hash1 = keccak(&node1); + let hash2 = keccak(&node2); basic.record(&hash1, &node1, 0); basic.record(&hash2, &node2, 456); diff --git a/util/src/trie/sectriedb.rs b/util/src/trie/sectriedb.rs index b2f344794..ce617d9d0 100644 --- a/util/src/trie/sectriedb.rs +++ b/util/src/trie/sectriedb.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use hash::H256; -use sha3::Hashable; +use bigint::hash::H256; +use keccak::keccak; use hashdb::HashDB; use super::triedb::TrieDB; use super::{Trie, TrieItem, TrieIterator, Query}; @@ -56,13 +56,13 @@ impl<'db> Trie for SecTrieDB<'db> { fn root(&self) -> &H256 { self.raw.root() } fn contains(&self, key: &[u8]) -> super::Result { - self.raw.contains(&key.sha3()) + self.raw.contains(&keccak(key)) } fn get_with<'a, 'key, Q: Query>(&'a self, key: &'key [u8], query: Q) -> super::Result> where 'a: 'key { - self.raw.get_with(&key.sha3(), query) + self.raw.get_with(&keccak(key), query) } } @@ -77,7 +77,7 @@ fn trie_to_sectrie() { let mut root = H256::default(); { let mut t = TrieDBMut::new(&mut memdb, &mut root); - t.insert(&(&[0x01u8, 0x23]).sha3(), &[0x01u8, 0x23]).unwrap(); + t.insert(&keccak(&[0x01u8, 0x23]), &[0x01u8, 0x23]).unwrap(); } let t = SecTrieDB::new(&memdb, &root).unwrap(); assert_eq!(t.get(&[0x01u8, 0x23]).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); diff --git a/util/src/trie/sectriedbmut.rs b/util/src/trie/sectriedbmut.rs index 5cfdc4b9f..b3790f27d 100644 --- a/util/src/trie/sectriedbmut.rs +++ b/util/src/trie/sectriedbmut.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use hash::H256; -use sha3::Hashable; +use bigint::hash::H256; +use keccak::keccak; use hashdb::{HashDB, DBValue}; use super::triedbmut::TrieDBMut; use super::TrieMut; @@ -59,21 +59,21 @@ impl<'db> TrieMut for SecTrieDBMut<'db> { } fn contains(&self, key: &[u8]) -> super::Result { - self.raw.contains(&key.sha3()) + self.raw.contains(&keccak(key)) } fn get<'a, 'key>(&'a self, key: &'key [u8]) -> super::Result> where 'a: 'key { - self.raw.get(&key.sha3()) + self.raw.get(&keccak(key)) } fn insert(&mut self, key: &[u8], value: &[u8]) -> super::Result> { - self.raw.insert(&key.sha3(), value) + self.raw.insert(&keccak(key), value) } fn remove(&mut self, key: &[u8]) -> super::Result> { - self.raw.remove(&key.sha3()) + self.raw.remove(&keccak(key)) } } @@ -90,5 +90,5 @@ fn sectrie_to_trie() { t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); } let t = TrieDB::new(&memdb, &root).unwrap(); - assert_eq!(t.get(&(&[0x01u8, 0x23]).sha3()).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); + assert_eq!(t.get(&keccak(&[0x01u8, 0x23])).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); } diff --git a/util/src/trie/standardmap.rs b/util/src/trie/standardmap.rs index f839f4f2e..866dc6e0e 100644 --- a/util/src/trie/standardmap.rs +++ b/util/src/trie/standardmap.rs @@ -15,11 +15,9 @@ // along with Parity. If not, see . //! Key-value datastore with a modified Merkle tree. -extern crate rand; - +use keccak::keccak; use bytes::*; -use sha3::*; -use hash::*; +use bigint::hash::*; use rlp::encode; /// Alphabet to use when creating words for insertion into tries. @@ -63,14 +61,14 @@ impl StandardMap { /// `seed` is mutated pseudoramdonly and used. fn random_bytes(min_count: usize, journal_count: usize, seed: &mut H256) -> Vec { assert!(min_count + journal_count <= 32); - *seed = seed.sha3(); + *seed = keccak(&seed); let r = min_count + (seed[31] as usize % (journal_count + 1)); seed[0..r].to_vec() } /// Get a random value. Equal chance of being 1 byte as of 32. `seed` is mutated pseudoramdonly and used. fn random_value(seed: &mut H256) -> Bytes { - *seed = seed.sha3(); + *seed = keccak(&seed); match seed[0] % 2 { 1 => vec![seed[31];1], _ => seed.to_vec(), @@ -81,7 +79,7 @@ impl StandardMap { /// Each byte is an item from `alphabet`. `seed` is mutated pseudoramdonly and used. fn random_word(alphabet: &[u8], min_count: usize, journal_count: usize, seed: &mut H256) -> Vec { assert!(min_count + journal_count <= 32); - *seed = seed.sha3(); + *seed = keccak(&seed); let r = min_count + (seed[31] as usize % (journal_count + 1)); let mut ret: Vec = Vec::with_capacity(r); for i in 0..r { diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index 3e62550ba..208d257bb 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -21,7 +21,8 @@ use rlp::*; use super::node::{Node, OwnedNode}; use super::lookup::Lookup; use super::{Trie, TrieItem, TrieError, TrieIterator, Query}; -use {ToPretty, Bytes, H256}; +use bigint::hash::H256; +use {ToPretty, Bytes}; /// A `Trie` implementation using a generic `HashDB` backing database. /// @@ -31,11 +32,12 @@ use {ToPretty, Bytes, H256}; /// # Example /// ``` /// extern crate ethcore_util as util; +/// extern crate ethcore_bigint as bigint; /// /// use util::trie::*; /// use util::hashdb::*; /// use util::memorydb::*; -/// use util::hash::*; +/// use bigint::hash::*; /// /// fn main() { /// let mut memdb = MemoryDB::new(); @@ -130,7 +132,7 @@ impl<'db> TrieDB<'db> { /// This could be a simple identity operation in the case that the node is sufficiently small, but /// may require a database lookup. fn get_raw_or_lookup(&'db self, node: &'db [u8]) -> super::Result { - // check if its sha3 + len + // check if its keccak + len let r = Rlp::new(node); match r.is_data() && r.size() == 32 { true => { diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index 89eb574dc..594bb1380 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -21,18 +21,18 @@ use super::lookup::Lookup; use super::node::Node as RlpNode; use super::node::NodeKey; -use ::{HashDB, H256}; +use ::HashDB; use ::bytes::ToPretty; use ::nibbleslice::NibbleSlice; use ::rlp::{Rlp, RlpStream}; -use ::sha3::SHA3_NULL_RLP; use hashdb::DBValue; -use elastic_array::ElasticArray1024; - use std::collections::{HashSet, VecDeque}; use std::mem; use std::ops::Index; +use bigint::hash::H256; +use elastic_array::ElasticArray1024; +use keccak::{KECCAK_NULL_RLP}; // For lookups into the Node storage buffer. // This is deliberately non-copyable. @@ -262,18 +262,21 @@ impl<'a> Index<&'a StorageHandle> for NodeStorage { /// # Example /// ``` /// extern crate ethcore_util as util; +/// extern crate ethcore_bigint as bigint; +/// extern crate hash; /// +/// use hash::KECCAK_NULL_RLP; /// use util::trie::*; /// use util::hashdb::*; /// use util::memorydb::*; -/// use util::hash::*; +/// use bigint::hash::*; /// /// fn main() { /// let mut memdb = MemoryDB::new(); /// let mut root = H256::new(); /// let mut t = TrieDBMut::new(&mut memdb, &mut root); /// assert!(t.is_empty()); -/// assert_eq!(*t.root(), ::util::sha3::SHA3_NULL_RLP); +/// assert_eq!(*t.root(), KECCAK_NULL_RLP); /// t.insert(b"foo", b"bar").unwrap(); /// assert!(t.contains(b"foo").unwrap()); /// assert_eq!(t.get(b"foo").unwrap().unwrap(), DBValue::from_slice(b"bar")); @@ -295,8 +298,8 @@ pub struct TrieDBMut<'a> { impl<'a> TrieDBMut<'a> { /// Create a new trie with backing database `db` and empty `root`. pub fn new(db: &'a mut HashDB, root: &'a mut H256) -> Self { - *root = SHA3_NULL_RLP; - let root_handle = NodeHandle::Hash(SHA3_NULL_RLP); + *root = KECCAK_NULL_RLP; + let root_handle = NodeHandle::Hash(KECCAK_NULL_RLP); TrieDBMut { storage: NodeStorage::empty(), @@ -871,7 +874,7 @@ impl<'a> TrieMut for TrieDBMut<'a> { fn is_empty(&self) -> bool { match self.root_handle { - NodeHandle::Hash(h) => h == SHA3_NULL_RLP, + NodeHandle::Hash(h) => h == KECCAK_NULL_RLP, NodeHandle::InMemory(ref h) => match self.storage[h] { Node::Empty => true, _ => false, @@ -919,8 +922,8 @@ impl<'a> TrieMut for TrieDBMut<'a> { } None => { trace!(target: "trie", "remove: obliterated trie"); - self.root_handle = NodeHandle::Hash(SHA3_NULL_RLP); - *self.root = SHA3_NULL_RLP; + self.root_handle = NodeHandle::Hash(KECCAK_NULL_RLP); + *self.root = KECCAK_NULL_RLP; } } @@ -936,12 +939,13 @@ impl<'a> Drop for TrieDBMut<'a> { #[cfg(test)] mod tests { - use triehash::trie_root; + extern crate triehash; + use self::triehash::trie_root; use hashdb::*; use memorydb::*; use super::*; use bytes::ToPretty; - use sha3::SHA3_NULL_RLP; + use keccak::KECCAK_NULL_RLP; use super::super::TrieMut; use super::super::standardmap::*; @@ -996,7 +1000,7 @@ mod tests { assert_eq!(*memtrie.root(), real); unpopulate_trie(&mut memtrie, &x); memtrie.commit(); - if *memtrie.root() != SHA3_NULL_RLP { + if *memtrie.root() != KECCAK_NULL_RLP { println!("- TRIE MISMATCH"); println!(""); println!("{:?} vs {:?}", memtrie.root(), real); @@ -1004,7 +1008,7 @@ mod tests { println!("{:?} -> {:?}", i.0.pretty(), i.1.pretty()); } } - assert_eq!(*memtrie.root(), SHA3_NULL_RLP); + assert_eq!(*memtrie.root(), KECCAK_NULL_RLP); } } @@ -1013,7 +1017,7 @@ mod tests { let mut memdb = MemoryDB::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); - assert_eq!(*t.root(), SHA3_NULL_RLP); + assert_eq!(*t.root(), KECCAK_NULL_RLP); } #[test] @@ -1268,7 +1272,7 @@ mod tests { } assert!(t.is_empty()); - assert_eq!(*t.root(), SHA3_NULL_RLP); + assert_eq!(*t.root(), KECCAK_NULL_RLP); } #[test] diff --git a/util/src/vector.rs b/util/src/vector.rs deleted file mode 100644 index 951d396c7..000000000 --- a/util/src/vector.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Vector extensions. - -/// Returns len of prefix shared with elem -/// -/// ```rust -/// extern crate ethcore_util as util; -/// use util::vector::SharedPrefix; -/// -/// fn main () { -/// let a = vec![1,2,3,3,5]; -/// let b = vec![1,2,3]; -/// assert_eq!(a.shared_prefix_len(&b), 3); -/// } -/// ``` -pub trait SharedPrefix { - /// Get common prefix length - fn shared_prefix_len(&self, elem: &[T]) -> usize; -} - -impl SharedPrefix for [T] where T: Eq { - fn shared_prefix_len(&self, elem: &[T]) -> usize { - use std::cmp; - let len = cmp::min(self.len(), elem.len()); - (0..len).take_while(|&i| self[i] == elem[i]).count() - } -} - -#[cfg(test)] -mod test { - use vector::SharedPrefix; - - #[test] - fn test_shared_prefix() { - let a = vec![1,2,3,4,5,6]; - let b = vec![4,2,3,4,5,6]; - assert_eq!(a.shared_prefix_len(&b), 0); - } - - #[test] - fn test_shared_prefix2() { - let a = vec![1,2,3,3,5]; - let b = vec![1,2,3]; - assert_eq!(a.shared_prefix_len(&b), 3); - } - - #[test] - fn test_shared_prefix3() { - let a = vec![1,2,3,4,5,6]; - let b = vec![1,2,3,4,5,6]; - assert_eq!(a.shared_prefix_len(&b), 6); - } -} diff --git a/util/triehash/Cargo.toml b/util/triehash/Cargo.toml new file mode 100644 index 000000000..cc18a6ea8 --- /dev/null +++ b/util/triehash/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "triehash" +version = "0.1.0" +authors = ["Parity Technologies "] + +[dependencies] +rlp = { path = "../rlp" } +ethcore-bigint = { path = "../bigint" } +hash = { path = "../hash" } diff --git a/util/src/triehash.rs b/util/triehash/src/lib.rs similarity index 78% rename from util/src/triehash.rs rename to util/triehash/src/lib.rs index 62d3dbf97..d166aa5d0 100644 --- a/util/src/triehash.rs +++ b/util/triehash/src/lib.rs @@ -18,26 +18,31 @@ //! //! This module should be used to generate trie root hash. +extern crate ethcore_bigint; +extern crate hash; +extern crate rlp; + use std::collections::BTreeMap; use std::cmp; -use hash::*; -use sha3::*; -use rlp; +use ethcore_bigint::hash::H256; +use hash::keccak; use rlp::RlpStream; -use vector::SharedPrefix; + +fn shared_prefix_len(first: &[T], second: &[T]) -> usize { + let len = cmp::min(first.len(), second.len()); + (0..len).take_while(|&i| first[i] == second[i]).count() +} /// Generates a trie root hash for a vector of values /// /// ```rust -/// extern crate ethcore_util as util; -/// use std::str::FromStr; -/// use util::triehash::*; -/// use util::hash::*; +/// extern crate triehash; +/// use triehash::ordered_trie_root; /// /// fn main() { /// let v = vec![From::from("doe"), From::from("reindeer")]; /// let root = "e766d5d51b89dc39d981b41bda63248d7abce4f0225eefd023792a540bcffee3"; -/// assert_eq!(ordered_trie_root(v), H256::from_str(root).unwrap()); +/// assert_eq!(ordered_trie_root(v), root.parse().unwrap()); /// } /// ``` pub fn ordered_trie_root(input: I) -> H256 @@ -61,10 +66,8 @@ pub fn ordered_trie_root(input: I) -> H256 /// Generates a trie root hash for a vector of key-values /// /// ```rust -/// extern crate ethcore_util as util; -/// use std::str::FromStr; -/// use util::triehash::*; -/// use util::hash::*; +/// extern crate triehash; +/// use triehash::trie_root; /// /// fn main() { /// let v = vec![ @@ -74,7 +77,7 @@ pub fn ordered_trie_root(input: I) -> H256 /// ]; /// /// let root = "8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3"; -/// assert_eq!(trie_root(v), H256::from_str(root).unwrap()); +/// assert_eq!(trie_root(v), root.parse().unwrap()); /// } /// ``` pub fn trie_root(input: I) -> H256 @@ -95,10 +98,8 @@ pub fn trie_root(input: I) -> H256 /// Generates a key-hashed (secure) trie root hash for a vector of key-values. /// /// ```rust -/// extern crate ethcore_util as util; -/// use std::str::FromStr; -/// use util::triehash::*; -/// use util::hash::*; +/// extern crate triehash; +/// use triehash::sec_trie_root; /// /// fn main() { /// let v = vec![ @@ -108,14 +109,14 @@ pub fn trie_root(input: I) -> H256 /// ]; /// /// let root = "d4cd937e4a4368d7931a9cf51686b7e10abb3dce38a39000fd7902a092b64585"; -/// assert_eq!(sec_trie_root(v), H256::from_str(root).unwrap()); +/// assert_eq!(sec_trie_root(v), root.parse().unwrap()); /// } /// ``` pub fn sec_trie_root(input: Vec<(Vec, Vec)>) -> H256 { let gen_input = input // first put elements into btree to sort them and to remove duplicates .into_iter() - .map(|(k, v)| (k.sha3(), v)) + .map(|(k, v)| (keccak(k), v)) .collect::>() // then move them to a vector .into_iter() @@ -128,7 +129,7 @@ pub fn sec_trie_root(input: Vec<(Vec, Vec)>) -> H256 { fn gen_trie_root(input: Vec<(Vec, Vec)>) -> H256 { let mut stream = RlpStream::new(); hash256rlp(&input, 0, &mut stream); - stream.out().sha3() + keccak(stream.out()) } /// Hex-prefix Notation. First nibble has flags: oddness = 2^0 & termination = 2^1. @@ -216,7 +217,7 @@ fn hash256rlp(input: &[(Vec, Vec)], pre_len: usize, stream: &mut RlpStre .skip(1) // get minimum number of shared nibbles between first and each successive .fold(key.len(), | acc, &(ref k, _) | { - cmp::min(key.shared_prefix_len(k), acc) + cmp::min(shared_prefix_len(key, k), acc) }); // if shared prefix is higher than current prefix append its @@ -271,7 +272,7 @@ fn hash256aux(input: &[(Vec, Vec)], pre_len: usize, stream: &mut RlpStre let out = s.out(); match out.len() { 0...31 => stream.append_raw(&out, 1), - _ => stream.append(&out.sha3()) + _ => stream.append(&keccak(out)) }; } @@ -288,50 +289,49 @@ fn test_nibbles() { assert_eq!(as_nibbles(&v), e); } -#[test] -fn test_hex_prefix_encode() { - let v = vec![0, 0, 1, 2, 3, 4, 5]; - let e = vec![0x10, 0x01, 0x23, 0x45]; - let h = hex_prefix_encode(&v, false); - assert_eq!(h, e); - - let v = vec![0, 1, 2, 3, 4, 5]; - let e = vec![0x00, 0x01, 0x23, 0x45]; - let h = hex_prefix_encode(&v, false); - assert_eq!(h, e); - - let v = vec![0, 1, 2, 3, 4, 5]; - let e = vec![0x20, 0x01, 0x23, 0x45]; - let h = hex_prefix_encode(&v, true); - assert_eq!(h, e); - - let v = vec![1, 2, 3, 4, 5]; - let e = vec![0x31, 0x23, 0x45]; - let h = hex_prefix_encode(&v, true); - assert_eq!(h, e); - - let v = vec![1, 2, 3, 4]; - let e = vec![0x00, 0x12, 0x34]; - let h = hex_prefix_encode(&v, false); - assert_eq!(h, e); - - let v = vec![4, 1]; - let e = vec![0x20, 0x41]; - let h = hex_prefix_encode(&v, true); - assert_eq!(h, e); -} #[cfg(test)] mod tests { - use std::str::FromStr; - use hash::H256; - use super::trie_root; + use super::{trie_root, shared_prefix_len, hex_prefix_encode}; + + #[test] + fn test_hex_prefix_encode() { + let v = vec![0, 0, 1, 2, 3, 4, 5]; + let e = vec![0x10, 0x01, 0x23, 0x45]; + let h = hex_prefix_encode(&v, false); + assert_eq!(h, e); + + let v = vec![0, 1, 2, 3, 4, 5]; + let e = vec![0x00, 0x01, 0x23, 0x45]; + let h = hex_prefix_encode(&v, false); + assert_eq!(h, e); + + let v = vec![0, 1, 2, 3, 4, 5]; + let e = vec![0x20, 0x01, 0x23, 0x45]; + let h = hex_prefix_encode(&v, true); + assert_eq!(h, e); + + let v = vec![1, 2, 3, 4, 5]; + let e = vec![0x31, 0x23, 0x45]; + let h = hex_prefix_encode(&v, true); + assert_eq!(h, e); + + let v = vec![1, 2, 3, 4]; + let e = vec![0x00, 0x12, 0x34]; + let h = hex_prefix_encode(&v, false); + assert_eq!(h, e); + + let v = vec![4, 1]; + let e = vec![0x20, 0x41]; + let h = hex_prefix_encode(&v, true); + assert_eq!(h, e); + } #[test] fn simple_test() { assert_eq!(trie_root(vec![ (b"A".to_vec(), b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".to_vec()) - ]), H256::from_str("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab").unwrap()); + ]), "d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab".parse().unwrap()); } #[test] @@ -348,4 +348,24 @@ mod tests { ])); } + #[test] + fn test_shared_prefix() { + let a = vec![1,2,3,4,5,6]; + let b = vec![4,2,3,4,5,6]; + assert_eq!(shared_prefix_len(&a, &b), 0); + } + + #[test] + fn test_shared_prefix2() { + let a = vec![1,2,3,3,5]; + let b = vec![1,2,3]; + assert_eq!(shared_prefix_len(&a, &b), 3); + } + + #[test] + fn test_shared_prefix3() { + let a = vec![1,2,3,4,5,6]; + let b = vec![1,2,3,4,5,6]; + assert_eq!(shared_prefix_len(&a, &b), 6); + } } diff --git a/util/unexpected/Cargo.toml b/util/unexpected/Cargo.toml new file mode 100644 index 000000000..35ff1a535 --- /dev/null +++ b/util/unexpected/Cargo.toml @@ -0,0 +1,6 @@ +[package] +name = "unexpected" +version = "0.1.0" +authors = ["Parity Technologies "] + +[dependencies] diff --git a/util/unexpected/src/lib.rs b/util/unexpected/src/lib.rs new file mode 100644 index 000000000..e34b2326c --- /dev/null +++ b/util/unexpected/src/lib.rs @@ -0,0 +1,58 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Error utils + +use std::fmt; + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +/// Error indicating an expected value was not found. +pub struct Mismatch { + /// Value expected. + pub expected: T, + /// Value found. + pub found: T, +} + +impl fmt::Display for Mismatch { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_fmt(format_args!("Expected {}, found {}", self.expected, self.found)) + } +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +/// Error indicating value found is outside of a valid range. +pub struct OutOfBounds { + /// Minimum allowed value. + pub min: Option, + /// Maximum allowed value. + pub max: Option, + /// Value found. + pub found: T, +} + +impl fmt::Display for OutOfBounds { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let msg = match (self.min.as_ref(), self.max.as_ref()) { + (Some(min), Some(max)) => format!("Min={}, Max={}", min, max), + (Some(min), _) => format!("Min={}", min), + (_, Some(max)) => format!("Max={}", max), + (None, None) => "".into(), + }; + + f.write_fmt(format_args!("Value {} out of bounds. {}", self.found, msg)) + } +}