Merge remote-tracking branch 'parity/master' into bft

Conflicts:
	ethcore/src/client/client.rs
	sync/src/api.rs
This commit is contained in:
keorn 2016-09-30 12:44:52 +01:00
commit 9ca938f740
99 changed files with 2515 additions and 897 deletions

View File

@ -266,6 +266,7 @@ test-linux:
before_script: before_script:
- git submodule update --init --recursive - git submodule update --init --recursive
script: script:
- export RUST_BACKTRACE=1
- ./test.sh --verbose - ./test.sh --verbose
tags: tags:
- rust-test - rust-test

53
Cargo.lock generated
View File

@ -288,6 +288,7 @@ dependencies = [
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"lru-cache 0.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -317,7 +318,7 @@ dependencies = [
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
"ethcore-rpc 1.4.0", "ethcore-rpc 1.4.0",
"ethcore-util 1.4.0", "ethcore-util 1.4.0",
"https-fetch 0.1.0", "fetch 0.1.0",
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
"jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)", "jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
@ -466,6 +467,7 @@ dependencies = [
"ethkey 0.2.0", "ethkey 0.2.0",
"ethstore 0.1.0", "ethstore 0.1.0",
"ethsync 1.4.0", "ethsync 1.4.0",
"fetch 0.1.0",
"json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)", "json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)",
"jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)", "jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
@ -639,6 +641,16 @@ dependencies = [
"libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "fetch"
version = "0.1.0"
dependencies = [
"https-fetch 0.1.0",
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "flate2" name = "flate2"
version = "0.2.14" version = "0.2.14"
@ -690,7 +702,7 @@ version = "0.1.0"
dependencies = [ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)", "mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)",
"rustls 0.1.1 (git+https://github.com/ctz/rustls)", "rustls 0.1.2 (git+https://github.com/ctz/rustls)",
] ]
[[package]] [[package]]
@ -852,6 +864,11 @@ name = "libc"
version = "0.2.15" version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "linked-hash-map"
version = "0.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "linked-hash-map" name = "linked-hash-map"
version = "0.3.0" version = "0.3.0"
@ -862,6 +879,14 @@ name = "log"
version = "0.3.6" version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lru-cache"
version = "0.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"linked-hash-map 0.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "matches" name = "matches"
version = "0.1.2" version = "0.1.2"
@ -1345,7 +1370,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "ring" name = "ring"
version = "0.3.1" version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1429,15 +1454,15 @@ dependencies = [
[[package]] [[package]]
name = "rustls" name = "rustls"
version = "0.1.1" version = "0.1.2"
source = "git+https://github.com/ctz/rustls#a9c5a79f49337e22ac05bb1ea114240bdbe0fdd2" source = "git+https://github.com/ctz/rustls#3d2db624997004b7b18ba4463d6081f37598b2f5"
dependencies = [ dependencies = [
"base64 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "base64 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"ring 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "ring 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"untrusted 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"webpki 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "webpki 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -1788,10 +1813,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "webpki" name = "webpki"
version = "0.2.2" version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"ring 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "ring 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"untrusted 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1903,14 +1928,16 @@ dependencies = [
"checksum itoa 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ae3088ea4baeceb0284ee9eea42f591226e6beaecf65373e41b38d95a1b8e7a1" "checksum itoa 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ae3088ea4baeceb0284ee9eea42f591226e6beaecf65373e41b38d95a1b8e7a1"
"checksum json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)" = "<none>" "checksum json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)" = "<none>"
"checksum json-tcp-server 0.1.0 (git+https://github.com/ethcore/json-tcp-server)" = "<none>" "checksum json-tcp-server 0.1.0 (git+https://github.com/ethcore/json-tcp-server)" = "<none>"
"checksum jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e913b3c809aab9378889da8b990b4a46b98bd4794c8117946a1cf63c5f87bcde" "checksum jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3c5094610b07f28f3edaf3947b732dadb31dbba4941d4d0c1c7a8350208f4414"
"checksum jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)" = "<none>" "checksum jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)" = "<none>"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a"
"checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f" "checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f"
"checksum libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "23e3757828fa702a20072c37ff47938e9dd331b92fac6e223d26d4b7a55f7ee2" "checksum libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "23e3757828fa702a20072c37ff47938e9dd331b92fac6e223d26d4b7a55f7ee2"
"checksum linked-hash-map 0.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "83f7ff3baae999fdf921cccf54b61842bb3b26868d50d02dff48052ebec8dd79"
"checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd" "checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd"
"checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054" "checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054"
"checksum lru-cache 0.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "42d50dcb5d9f145df83b1043207e1ac0c37c9c779c4e128ca4655abc3f3cbf8c"
"checksum matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "15305656809ce5a4805b1ff2946892810992197ce1270ff79baded852187942e" "checksum matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "15305656809ce5a4805b1ff2946892810992197ce1270ff79baded852187942e"
"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20" "checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20"
"checksum mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a74cc2587bf97c49f3f5bab62860d6abf3902ca73b66b51d9b049fbdcd727bd2" "checksum mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a74cc2587bf97c49f3f5bab62860d6abf3902ca73b66b51d9b049fbdcd727bd2"
@ -1964,7 +1991,7 @@ dependencies = [
"checksum rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "655df67c314c30fa3055a365eae276eb88aa4f3413a352a1ab32c1320eda41ea" "checksum rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "655df67c314c30fa3055a365eae276eb88aa4f3413a352a1ab32c1320eda41ea"
"checksum regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)" = "b4329b8928a284580a1c63ec9d846b12f6d3472317243ff7077aff11f23f2b29" "checksum regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)" = "b4329b8928a284580a1c63ec9d846b12f6d3472317243ff7077aff11f23f2b29"
"checksum regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "841591b1e05609a643e3b4d0045fce04f701daba7151ddcd3ad47b080693d5a9" "checksum regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "841591b1e05609a643e3b4d0045fce04f701daba7151ddcd3ad47b080693d5a9"
"checksum ring 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d059a6a96d3be79042e3f70eb97945912839265f9d8ab45b921abaf266c70dbb" "checksum ring 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0d2f6547bf9640f1d3cc4e771f82374ec8fd237c17eeb3ff5cd5ccbe22377a09"
"checksum rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)" = "<none>" "checksum rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)" = "<none>"
"checksum rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)" = "<none>" "checksum rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)" = "<none>"
"checksum rotor 0.6.3 (git+https://github.com/ethcore/rotor)" = "<none>" "checksum rotor 0.6.3 (git+https://github.com/ethcore/rotor)" = "<none>"
@ -1972,7 +1999,7 @@ dependencies = [
"checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a" "checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
"checksum rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "6159e4e6e559c81bd706afe9c8fd68f547d3e851ce12e76b1de7914bab61691b" "checksum rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "6159e4e6e559c81bd706afe9c8fd68f547d3e851ce12e76b1de7914bab61691b"
"checksum rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "c5f5376ea5e30ce23c03eb77cbe4962b988deead10910c372b226388b594c084" "checksum rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "c5f5376ea5e30ce23c03eb77cbe4962b988deead10910c372b226388b594c084"
"checksum rustls 0.1.1 (git+https://github.com/ctz/rustls)" = "<none>" "checksum rustls 0.1.2 (git+https://github.com/ctz/rustls)" = "<none>"
"checksum semver 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)" = "d4f410fedcf71af0345d7607d246e7ad15faaadd49d240ee3b24e5dc21a820ac" "checksum semver 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)" = "d4f410fedcf71af0345d7607d246e7ad15faaadd49d240ee3b24e5dc21a820ac"
"checksum semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2d5b7638a1f03815d94e88cb3b3c08e87f0db4d683ef499d1836aaf70a45623f" "checksum semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2d5b7638a1f03815d94e88cb3b3c08e87f0db4d683ef499d1836aaf70a45623f"
"checksum serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b1dfda9ebb31d29fa8b94d7eb3031a86a8dcec065f0fe268a30f98867bf45775" "checksum serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b1dfda9ebb31d29fa8b94d7eb3031a86a8dcec065f0fe268a30f98867bf45775"
@ -2017,7 +2044,7 @@ dependencies = [
"checksum vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0795a11576d29ae80525a3fda315bf7b534f8feb9d34101e5fe63fb95bb2fd24" "checksum vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0795a11576d29ae80525a3fda315bf7b534f8feb9d34101e5fe63fb95bb2fd24"
"checksum vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "56b639f935488eb40f06d17c3e3bcc3054f6f75d264e187b1107c8d1cba8d31c" "checksum vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "56b639f935488eb40f06d17c3e3bcc3054f6f75d264e187b1107c8d1cba8d31c"
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
"checksum webpki 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5dc10a815fabbb0c3145c1153240528f3a8703a47e26e8dbb4a5d4f6386200ad" "checksum webpki 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "813503a5985585e0812d430cd1328ee322f47f66629c8ed4ecab939cf9e92f91"
"checksum winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4dfaaa8fbdaa618fa6914b59b2769d690dd7521920a18d84b42d254678dd5fd4" "checksum winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4dfaaa8fbdaa618fa6914b59b2769d690dd7521920a18d84b42d254678dd5fd4"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
"checksum ws 0.5.2 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)" = "<none>" "checksum ws 0.5.2 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)" = "<none>"

View File

@ -26,7 +26,7 @@ linked-hash-map = "0.3"
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
ethcore-rpc = { path = "../rpc" } ethcore-rpc = { path = "../rpc" }
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
https-fetch = { path = "../util/https-fetch" } fetch = { path = "../util/fetch" }
parity-dapps = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" } parity-dapps = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" }
# List of apps # List of apps
parity-dapps-status = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" } parity-dapps-status = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" }

View File

@ -1,113 +0,0 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Hyper Client Handlers
pub mod fetch_file;
use std::env;
use std::sync::{mpsc, Arc};
use std::sync::atomic::AtomicBool;
use std::path::PathBuf;
use hyper;
use https_fetch as https;
use random_filename;
use self::fetch_file::{Fetch, Error as HttpFetchError};
pub type FetchResult = Result<PathBuf, FetchError>;
#[derive(Debug)]
pub enum FetchError {
InvalidUrl,
Http(HttpFetchError),
Https(https::FetchError),
Other(String),
}
impl From<HttpFetchError> for FetchError {
fn from(e: HttpFetchError) -> Self {
FetchError::Http(e)
}
}
pub struct Client {
http_client: hyper::Client<Fetch>,
https_client: https::Client,
}
impl Client {
pub fn new() -> Self {
Client {
http_client: hyper::Client::new().expect("Unable to initialize http client."),
https_client: https::Client::new().expect("Unable to initialize https client."),
}
}
pub fn close(self) {
self.http_client.close();
self.https_client.close();
}
pub fn request(&mut self, url: &str, abort: Arc<AtomicBool>, on_done: Box<Fn() + Send>) -> Result<mpsc::Receiver<FetchResult>, FetchError> {
let is_https = url.starts_with("https://");
let url = try!(url.parse().map_err(|_| FetchError::InvalidUrl));
trace!(target: "dapps", "Fetching from: {:?}", url);
if is_https {
let url = try!(Self::convert_url(url));
let (tx, rx) = mpsc::channel();
let temp_path = Self::temp_path();
let res = self.https_client.fetch_to_file(url, temp_path.clone(), abort, move |result| {
let res = tx.send(
result.map(|_| temp_path).map_err(FetchError::Https)
);
if let Err(_) = res {
warn!("Fetch finished, but no one was listening");
}
on_done();
});
match res {
Ok(_) => Ok(rx),
Err(e) => Err(FetchError::Other(format!("{:?}", e))),
}
} else {
let (tx, rx) = mpsc::channel();
let res = self.http_client.request(url, Fetch::new(tx, abort, on_done));
match res {
Ok(_) => Ok(rx),
Err(e) => Err(FetchError::Other(format!("{:?}", e))),
}
}
}
fn convert_url(url: hyper::Url) -> Result<https::Url, FetchError> {
let host = format!("{}", try!(url.host().ok_or(FetchError::InvalidUrl)));
let port = try!(url.port_or_known_default().ok_or(FetchError::InvalidUrl));
https::Url::new(&host, port, url.path()).map_err(|_| FetchError::InvalidUrl)
}
fn temp_path() -> PathBuf {
let mut dir = env::temp_dir();
dir.push(random_filename());
dir
}
}

View File

@ -22,13 +22,13 @@ use std::sync::{mpsc, Arc};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::time::{Instant, Duration}; use std::time::{Instant, Duration};
use util::Mutex; use util::Mutex;
use fetch::{Client, Fetch, FetchResult};
use hyper::{server, Decoder, Encoder, Next, Method, Control}; use hyper::{server, Decoder, Encoder, Next, Method, Control};
use hyper::net::HttpStream; use hyper::net::HttpStream;
use hyper::status::StatusCode; use hyper::status::StatusCode;
use handlers::{ContentHandler, Redirection}; use handlers::{ContentHandler, Redirection};
use handlers::client::{Client, FetchResult};
use apps::redirection_address; use apps::redirection_address;
use page::LocalPageEndpoint; use page::LocalPageEndpoint;
@ -159,7 +159,7 @@ impl<H: ContentValidator> ContentFetcherHandler<H> {
handler: H) -> (Self, Arc<FetchControl>) { handler: H) -> (Self, Arc<FetchControl>) {
let fetch_control = Arc::new(FetchControl::default()); let fetch_control = Arc::new(FetchControl::default());
let client = Client::new(); let client = Client::default();
let handler = ContentFetcherHandler { let handler = ContentFetcherHandler {
fetch_control: fetch_control.clone(), fetch_control: fetch_control.clone(),
control: Some(control), control: Some(control),

View File

@ -21,7 +21,6 @@ mod echo;
mod content; mod content;
mod redirect; mod redirect;
mod fetch; mod fetch;
pub mod client;
pub use self::auth::AuthRequiredHandler; pub use self::auth::AuthRequiredHandler;
pub use self::echo::EchoHandler; pub use self::echo::EchoHandler;

View File

@ -58,10 +58,10 @@ extern crate jsonrpc_http_server;
extern crate mime_guess; extern crate mime_guess;
extern crate rustc_serialize; extern crate rustc_serialize;
extern crate parity_dapps; extern crate parity_dapps;
extern crate https_fetch;
extern crate ethcore_rpc; extern crate ethcore_rpc;
extern crate ethcore_util as util; extern crate ethcore_util as util;
extern crate linked_hash_map; extern crate linked_hash_map;
extern crate fetch;
#[cfg(test)] #[cfg(test)]
extern crate ethcore_devtools as devtools; extern crate ethcore_devtools as devtools;

View File

@ -202,6 +202,9 @@ impl SeedHashCompute {
} }
} }
pub fn slow_get_seedhash(block_number: u64) -> H256 {
SeedHashCompute::resume_compute_seedhash([0u8; 32], 0, block_number / ETHASH_EPOCH_LENGTH)
}
#[inline] #[inline]
fn fnv_hash(x: u32, y: u32) -> u32 { fn fnv_hash(x: u32, y: u32) -> u32 {

View File

@ -26,7 +26,7 @@ mod compute;
use std::mem; use std::mem;
use compute::Light; use compute::Light;
pub use compute::{ETHASH_EPOCH_LENGTH, H256, ProofOfWork, SeedHashCompute, quick_get_difficulty}; pub use compute::{ETHASH_EPOCH_LENGTH, H256, ProofOfWork, SeedHashCompute, quick_get_difficulty, slow_get_seedhash};
use std::sync::Arc; use std::sync::Arc;
use parking_lot::Mutex; use parking_lot::Mutex;

View File

@ -37,6 +37,7 @@ ethkey = { path = "../ethkey" }
ethcore-ipc-nano = { path = "../ipc/nano" } ethcore-ipc-nano = { path = "../ipc/nano" }
rlp = { path = "../util/rlp" } rlp = { path = "../util/rlp" }
rand = "0.3" rand = "0.3"
lru-cache = "0.0.7"
[dependencies.hyper] [dependencies.hyper]
git = "https://github.com/ethcore/hyper" git = "https://github.com/ethcore/hyper"

View File

@ -0,0 +1,69 @@
{
"name": "Expanse",
"forkName": "expanse",
"engine": {
"Ethash": {
"params": {
"gasLimitBoundDivisor": "0x0400",
"minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800",
"difficultyIncrementDivisor": "60",
"durationLimit": "0x3C",
"blockReward": "0x6f05b59d3b200000",
"registrar" : "0x6c221ca53705f3497ec90ca7b84c59ae7382fc21",
"frontierCompatibilityModeLimit": "0x30d40",
"difficultyHardforkTransition": "0x59d9",
"difficultyHardforkBoundDivisor": "0x0200",
"bombDefuseTransition": "0x30d40"
}
}
},
"params": {
"accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388",
"networkID": "0x1",
"subprotocolName": "exp"
},
"genesis": {
"seal": {
"ethereum": {
"nonce": "0x214652414e4b4f21",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
},
"difficulty": "0x40000000",
"author": "0x93decab0cd745598860f782ac1e8f046cb99e898",
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x4672616e6b6f497346726565646f6d",
"gasLimit": "0x1388"
},
"nodes": [
"enode://7f335a047654f3e70d6f91312a7cf89c39704011f1a584e2698250db3d63817e74b88e26b7854111e16b2c9d0c7173c05419aeee2d0321850227b126d8b1be3f@46.101.156.249:42786",
"enode://df872f81e25f72356152b44cab662caf1f2e57c3a156ecd20e9ac9246272af68a2031b4239a0bc831f2c6ab34733a041464d46b3ea36dce88d6c11714446e06b@178.62.208.109:42786",
"enode://96d3919b903e7f5ad59ac2f73c43be9172d9d27e2771355db03fd194732b795829a31fe2ea6de109d0804786c39a807e155f065b4b94c6fce167becd0ac02383@45.55.22.34:42786",
"enode://5f6c625bf287e3c08aad568de42d868781e961cbda805c8397cfb7be97e229419bef9a5a25a75f97632787106bba8a7caf9060fab3887ad2cfbeb182ab0f433f@46.101.182.53:42786",
"enode://d33a8d4c2c38a08971ed975b750f21d54c927c0bf7415931e214465a8d01651ecffe4401e1db913f398383381413c78105656d665d83f385244ab302d6138414@128.199.183.48:42786",
"enode://df872f81e25f72356152b44cab662caf1f2e57c3a156ecd20e9ac9246272af68a2031b4239a0bc831f2c6ab34733a041464d46b3ea36dce88d6c11714446e06b@178.62.208.109:42786",
"enode://f6f0d6b9b7d02ec9e8e4a16e38675f3621ea5e69860c739a65c1597ca28aefb3cec7a6d84e471ac927d42a1b64c1cbdefad75e7ce8872d57548ddcece20afdd1@159.203.64.95:42786"
],
"accounts": {
"0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
"bb94f0ceb32257275b2a7a9c094c13e469b4563e": {
"balance": "10000000000000000000000000"
},
"15656715068ab0dbdf0ab00748a8a19e40f28192": {
"balance": "1000000000000000000000000"
},
"c075fa11f85bda3aaba67106226aaf086ac16f4e": {
"balance": "100000000000000000000000"
},
"93decab0cd745598860f782ac1e8f046cb99e898": {
"balance": "10000000000000000000000"
}
}
}

View File

@ -157,6 +157,9 @@
"stateRoot": "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544" "stateRoot": "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544"
}, },
"nodes": [ "nodes": [
"enode://cd6611461840543d5b9c56fbf088736154c699c43973b3a1a32390cf27106f87e58a818a606ccb05f3866de95a4fe860786fea71bf891ea95f234480d3022aa3@136.243.154.245:30303",
"enode://bcc7240543fe2cf86f5e9093d05753dd83343f8fda7bf0e833f65985c73afccf8f981301e13ef49c4804491eab043647374df1c4adf85766af88a624ecc3330e@136.243.154.244:30303",
"enode://ed4227681ca8c70beb2277b9e870353a9693f12e7c548c35df6bca6a956934d6f659999c2decb31f75ce217822eefca149ace914f1cbe461ed5a2ebaf9501455@88.212.206.70:30303",
"enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", "enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303",
"enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303", "enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303",
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303", "enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303",

View File

@ -19,6 +19,7 @@
use common::*; use common::*;
use engines::Engine; use engines::Engine;
use state::*; use state::*;
use state_db::StateDB;
use verification::PreverifiedBlock; use verification::PreverifiedBlock;
use trace::FlatTrace; use trace::FlatTrace;
use factory::Factories; use factory::Factories;
@ -69,7 +70,7 @@ impl Decodable for Block {
} }
} }
/// Internal type for a block's common elements. /// An internal type for a block's common elements.
#[derive(Clone)] #[derive(Clone)]
pub struct ExecutedBlock { pub struct ExecutedBlock {
base: Block, base: Block,
@ -179,7 +180,7 @@ pub trait IsBlock {
/// Trait for a object that has a state database. /// Trait for a object that has a state database.
pub trait Drain { pub trait Drain {
/// Drop this object and return the underlieing database. /// Drop this object and return the underlieing database.
fn drain(self) -> Box<JournalDB>; fn drain(self) -> StateDB;
} }
impl IsBlock for ExecutedBlock { impl IsBlock for ExecutedBlock {
@ -205,6 +206,7 @@ pub struct ClosedBlock {
block: ExecutedBlock, block: ExecutedBlock,
uncle_bytes: Bytes, uncle_bytes: Bytes,
last_hashes: Arc<LastHashes>, last_hashes: Arc<LastHashes>,
unclosed_state: State,
} }
/// Just like `ClosedBlock` except that we can't reopen it and it's faster. /// Just like `ClosedBlock` except that we can't reopen it and it's faster.
@ -231,7 +233,7 @@ impl<'x> OpenBlock<'x> {
engine: &'x Engine, engine: &'x Engine,
factories: Factories, factories: Factories,
tracing: bool, tracing: bool,
db: Box<JournalDB>, db: StateDB,
parent: &Header, parent: &Header,
last_hashes: Arc<LastHashes>, last_hashes: Arc<LastHashes>,
author: Address, author: Address,
@ -346,8 +348,7 @@ impl<'x> OpenBlock<'x> {
pub fn close(self) -> ClosedBlock { pub fn close(self) -> ClosedBlock {
let mut s = self; let mut s = self;
// take a snapshot so the engine's changes can be rolled back. let unclosed_state = s.block.state.clone();
s.block.state.snapshot();
s.engine.on_close_block(&mut s.block); s.engine.on_close_block(&mut s.block);
s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()))); s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec())));
@ -362,6 +363,7 @@ impl<'x> OpenBlock<'x> {
block: s.block, block: s.block,
uncle_bytes: uncle_bytes, uncle_bytes: uncle_bytes,
last_hashes: s.last_hashes, last_hashes: s.last_hashes,
unclosed_state: unclosed_state,
} }
} }
@ -369,9 +371,6 @@ impl<'x> OpenBlock<'x> {
pub fn close_and_lock(self) -> LockedBlock { pub fn close_and_lock(self) -> LockedBlock {
let mut s = self; let mut s = self;
// take a snapshot so the engine's changes can be rolled back.
s.block.state.snapshot();
s.engine.on_close_block(&mut s.block); s.engine.on_close_block(&mut s.block);
if s.block.base.header.transactions_root().is_zero() || s.block.base.header.transactions_root() == &SHA3_NULL_RLP { if s.block.base.header.transactions_root().is_zero() || s.block.base.header.transactions_root() == &SHA3_NULL_RLP {
s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()))); s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec())));
@ -388,11 +387,10 @@ impl<'x> OpenBlock<'x> {
s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator
s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used)); s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used));
ClosedBlock { LockedBlock {
block: s.block, block: s.block,
uncle_bytes: uncle_bytes, uncle_bytes: uncle_bytes,
last_hashes: s.last_hashes, }
}.lock()
} }
} }
@ -413,17 +411,7 @@ impl ClosedBlock {
pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) } pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) }
/// Turn this into a `LockedBlock`, unable to be reopened again. /// Turn this into a `LockedBlock`, unable to be reopened again.
pub fn lock(mut self) -> LockedBlock { pub fn lock(self) -> LockedBlock {
// finalize the changes made by the engine.
self.block.state.clear_snapshot();
if let Err(e) = self.block.state.commit() {
warn!("Error committing closed block's state: {:?}", e);
}
// set the state root here, after commit recalculates with the block
// rewards.
self.block.base.header.set_state_root(self.block.state.root().clone());
LockedBlock { LockedBlock {
block: self.block, block: self.block,
uncle_bytes: self.uncle_bytes, uncle_bytes: self.uncle_bytes,
@ -431,12 +419,12 @@ impl ClosedBlock {
} }
/// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`. /// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`.
pub fn reopen(mut self, engine: &Engine) -> OpenBlock { pub fn reopen(self, engine: &Engine) -> OpenBlock {
// revert rewards (i.e. set state back at last transaction's state). // revert rewards (i.e. set state back at last transaction's state).
self.block.state.revert_snapshot(); let mut block = self.block;
block.state = self.unclosed_state;
OpenBlock { OpenBlock {
block: self.block, block: block,
engine: engine, engine: engine,
last_hashes: self.last_hashes, last_hashes: self.last_hashes,
} }
@ -462,11 +450,11 @@ impl LockedBlock {
/// Provide a valid seal in order to turn this into a `SealedBlock`. /// Provide a valid seal in order to turn this into a `SealedBlock`.
/// This does check the validity of `seal` with the engine. /// This does check the validity of `seal` with the engine.
/// Returns the `ClosedBlock` back again if the seal is no good. /// Returns the `ClosedBlock` back again if the seal is no good.
pub fn try_seal(self, engine: &Engine, seal: Vec<Bytes>) -> Result<SealedBlock, LockedBlock> { pub fn try_seal(self, engine: &Engine, seal: Vec<Bytes>) -> Result<SealedBlock, (Error, LockedBlock)> {
let mut s = self; let mut s = self;
s.block.base.header.set_seal(seal); s.block.base.header.set_seal(seal);
match engine.verify_block_seal(&s.block.base.header) { match engine.verify_block_seal(&s.block.base.header) {
Err(_) => Err(s), Err(e) => Err((e, s)),
_ => Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes }), _ => Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes }),
} }
} }
@ -474,7 +462,9 @@ impl LockedBlock {
impl Drain for LockedBlock { impl Drain for LockedBlock {
/// Drop this object and return the underlieing database. /// Drop this object and return the underlieing database.
fn drain(self) -> Box<JournalDB> { self.block.state.drop().1 } fn drain(self) -> StateDB {
self.block.state.drop().1
}
} }
impl SealedBlock { impl SealedBlock {
@ -490,7 +480,9 @@ impl SealedBlock {
impl Drain for SealedBlock { impl Drain for SealedBlock {
/// Drop this object and return the underlieing database. /// Drop this object and return the underlieing database.
fn drain(self) -> Box<JournalDB> { self.block.state.drop().1 } fn drain(self) -> StateDB {
self.block.state.drop().1
}
} }
impl IsBlock for SealedBlock { impl IsBlock for SealedBlock {
@ -505,7 +497,7 @@ pub fn enact(
uncles: &[Header], uncles: &[Header],
engine: &Engine, engine: &Engine,
tracing: bool, tracing: bool,
db: Box<JournalDB>, db: StateDB,
parent: &Header, parent: &Header,
last_hashes: Arc<LastHashes>, last_hashes: Arc<LastHashes>,
factories: Factories, factories: Factories,
@ -537,7 +529,7 @@ pub fn enact_bytes(
block_bytes: &[u8], block_bytes: &[u8],
engine: &Engine, engine: &Engine,
tracing: bool, tracing: bool,
db: Box<JournalDB>, db: StateDB,
parent: &Header, parent: &Header,
last_hashes: Arc<LastHashes>, last_hashes: Arc<LastHashes>,
factories: Factories, factories: Factories,
@ -553,7 +545,7 @@ pub fn enact_verified(
block: &PreverifiedBlock, block: &PreverifiedBlock,
engine: &Engine, engine: &Engine,
tracing: bool, tracing: bool,
db: Box<JournalDB>, db: StateDB,
parent: &Header, parent: &Header,
last_hashes: Arc<LastHashes>, last_hashes: Arc<LastHashes>,
factories: Factories, factories: Factories,
@ -568,7 +560,7 @@ pub fn enact_and_seal(
block_bytes: &[u8], block_bytes: &[u8],
engine: &Engine, engine: &Engine,
tracing: bool, tracing: bool,
db: Box<JournalDB>, db: StateDB,
parent: &Header, parent: &Header,
last_hashes: Arc<LastHashes>, last_hashes: Arc<LastHashes>,
factories: Factories, factories: Factories,
@ -588,9 +580,9 @@ mod tests {
use spec::*; use spec::*;
let spec = Spec::new_test(); let spec = Spec::new_test();
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_state_db();
let mut db = db_result.take(); let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); spec.ensure_db_good(&mut db).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(&*spec.engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = OpenBlock::new(&*spec.engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
let b = b.close_and_lock(); let b = b.close_and_lock();
@ -604,25 +596,25 @@ mod tests {
let engine = &*spec.engine; let engine = &*spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_state_db();
let mut db = db_result.take(); let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); spec.ensure_db_good(&mut db).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap() let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap()
.close_and_lock().seal(engine, vec![]).unwrap(); .close_and_lock().seal(engine, vec![]).unwrap();
let orig_bytes = b.rlp_bytes(); let orig_bytes = b.rlp_bytes();
let orig_db = b.drain(); let orig_db = b.drain();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_state_db();
let mut db = db_result.take(); let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); spec.ensure_db_good(&mut db).unwrap();
let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap();
assert_eq!(e.rlp_bytes(), orig_bytes); assert_eq!(e.rlp_bytes(), orig_bytes);
let db = e.drain(); let db = e.drain();
assert_eq!(orig_db.keys(), db.keys()); assert_eq!(orig_db.journal_db().keys(), db.journal_db().keys());
assert!(orig_db.keys().iter().filter(|k| orig_db.get(k.0) != db.get(k.0)).next() == None); assert!(orig_db.journal_db().keys().iter().filter(|k| orig_db.journal_db().get(k.0) != db.journal_db().get(k.0)).next() == None);
} }
#[test] #[test]
@ -632,9 +624,9 @@ mod tests {
let engine = &*spec.engine; let engine = &*spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_state_db();
let mut db = db_result.take(); let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); spec.ensure_db_good(&mut db).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
let mut uncle1_header = Header::new(); let mut uncle1_header = Header::new();
@ -648,9 +640,9 @@ mod tests {
let orig_bytes = b.rlp_bytes(); let orig_bytes = b.rlp_bytes();
let orig_db = b.drain(); let orig_db = b.drain();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_state_db();
let mut db = db_result.take(); let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); spec.ensure_db_good(&mut db).unwrap();
let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap();
let bytes = e.rlp_bytes(); let bytes = e.rlp_bytes();
@ -659,7 +651,7 @@ mod tests {
assert_eq!(uncles[1].extra_data(), b"uncle2"); assert_eq!(uncles[1].extra_data(), b"uncle2");
let db = e.drain(); let db = e.drain();
assert_eq!(orig_db.keys(), db.keys()); assert_eq!(orig_db.journal_db().keys(), db.journal_db().keys());
assert!(orig_db.keys().iter().filter(|k| orig_db.get(k.0) != db.get(k.0)).next() == None); assert!(orig_db.journal_db().keys().iter().filter(|k| orig_db.journal_db().get(k.0) != db.journal_db().get(k.0)).next() == None);
} }
} }

View File

@ -181,7 +181,7 @@ pub struct BlockChain {
pending_best_block: RwLock<Option<BestBlock>>, pending_best_block: RwLock<Option<BestBlock>>,
pending_block_hashes: RwLock<HashMap<BlockNumber, H256>>, pending_block_hashes: RwLock<HashMap<BlockNumber, H256>>,
pending_transaction_addresses: RwLock<HashMap<H256, TransactionAddress>>, pending_transaction_addresses: RwLock<HashMap<H256, Option<TransactionAddress>>>,
} }
impl BlockProvider for BlockChain { impl BlockProvider for BlockChain {
@ -331,11 +331,12 @@ impl BlockProvider for BlockChain {
.filter_map(|number| self.block_hash(number).map(|hash| (number, hash))) .filter_map(|number| self.block_hash(number).map(|hash| (number, hash)))
.filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts))) .filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
.filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes()))) .filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes())))
.flat_map(|(number, hash, mut receipts, hashes)| { .flat_map(|(number, hash, mut receipts, mut hashes)| {
assert_eq!(receipts.len(), hashes.len()); assert_eq!(receipts.len(), hashes.len());
log_index = receipts.iter().fold(0, |sum, receipt| sum + receipt.logs.len()); log_index = receipts.iter().fold(0, |sum, receipt| sum + receipt.logs.len());
let receipts_len = receipts.len(); let receipts_len = receipts.len();
hashes.reverse();
receipts.reverse(); receipts.reverse();
receipts.into_iter() receipts.into_iter()
.map(|receipt| receipt.logs) .map(|receipt| receipt.logs)
@ -680,8 +681,8 @@ impl BlockChain {
block_hashes: self.prepare_block_hashes_update(bytes, &info), block_hashes: self.prepare_block_hashes_update(bytes, &info),
block_details: self.prepare_block_details_update(bytes, &info), block_details: self.prepare_block_details_update(bytes, &info),
block_receipts: self.prepare_block_receipts_update(receipts, &info), block_receipts: self.prepare_block_receipts_update(receipts, &info),
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
blocks_blooms: self.prepare_block_blooms_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
info: info, info: info,
block: bytes block: bytes
}, is_best); }, is_best);
@ -714,8 +715,8 @@ impl BlockChain {
block_hashes: self.prepare_block_hashes_update(bytes, &info), block_hashes: self.prepare_block_hashes_update(bytes, &info),
block_details: update, block_details: update,
block_receipts: self.prepare_block_receipts_update(receipts, &info), block_receipts: self.prepare_block_receipts_update(receipts, &info),
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
blocks_blooms: self.prepare_block_blooms_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
info: info, info: info,
block: bytes, block: bytes,
}, is_best); }, is_best);
@ -783,8 +784,8 @@ impl BlockChain {
block_hashes: self.prepare_block_hashes_update(bytes, &info), block_hashes: self.prepare_block_hashes_update(bytes, &info),
block_details: self.prepare_block_details_update(bytes, &info), block_details: self.prepare_block_details_update(bytes, &info),
block_receipts: self.prepare_block_receipts_update(receipts, &info), block_receipts: self.prepare_block_receipts_update(receipts, &info),
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
blocks_blooms: self.prepare_block_blooms_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
info: info.clone(), info: info.clone(),
block: bytes, block: bytes,
}, true); }, true);
@ -877,7 +878,7 @@ impl BlockChain {
let mut write_txs = self.pending_transaction_addresses.write(); let mut write_txs = self.pending_transaction_addresses.write();
batch.extend_with_cache(db::COL_EXTRA, &mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Overwrite); batch.extend_with_cache(db::COL_EXTRA, &mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Overwrite);
batch.extend_with_cache(db::COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Overwrite); batch.extend_with_option_cache(db::COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Overwrite);
} }
} }
@ -895,18 +896,25 @@ impl BlockChain {
*best_block = block; *best_block = block;
} }
let pending_txs = mem::replace(&mut *pending_write_txs, HashMap::new());
let (retracted_txs, enacted_txs) = pending_txs.into_iter().partition::<HashMap<_, _>, _>(|&(_, ref value)| value.is_none());
let pending_hashes_keys: Vec<_> = pending_write_hashes.keys().cloned().collect(); let pending_hashes_keys: Vec<_> = pending_write_hashes.keys().cloned().collect();
let pending_txs_keys: Vec<_> = pending_write_txs.keys().cloned().collect(); let enacted_txs_keys: Vec<_> = enacted_txs.keys().cloned().collect();
write_hashes.extend(mem::replace(&mut *pending_write_hashes, HashMap::new())); write_hashes.extend(mem::replace(&mut *pending_write_hashes, HashMap::new()));
write_txs.extend(mem::replace(&mut *pending_write_txs, HashMap::new())); write_txs.extend(enacted_txs.into_iter().map(|(k, v)| (k, v.expect("Transactions were partitioned; qed"))));
for hash in retracted_txs.keys() {
write_txs.remove(hash);
}
let mut cache_man = self.cache_man.lock(); let mut cache_man = self.cache_man.lock();
for n in pending_hashes_keys { for n in pending_hashes_keys {
cache_man.note_used(CacheID::BlockHashes(n)); cache_man.note_used(CacheID::BlockHashes(n));
} }
for hash in pending_txs_keys { for hash in enacted_txs_keys {
cache_man.note_used(CacheID::TransactionAddresses(hash)); cache_man.note_used(CacheID::TransactionAddresses(hash));
} }
} }
@ -1008,7 +1016,7 @@ impl BlockChain {
} }
/// This function returns modified transaction addresses. /// This function returns modified transaction addresses.
fn prepare_transaction_addresses_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<H256, TransactionAddress> { fn prepare_transaction_addresses_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<H256, Option<TransactionAddress>> {
let block = BlockView::new(block_bytes); let block = BlockView::new(block_bytes);
let transaction_hashes = block.transaction_hashes(); let transaction_hashes = block.transaction_hashes();
@ -1017,10 +1025,10 @@ impl BlockChain {
transaction_hashes.into_iter() transaction_hashes.into_iter()
.enumerate() .enumerate()
.map(|(i ,tx_hash)| { .map(|(i ,tx_hash)| {
(tx_hash, TransactionAddress { (tx_hash, Some(TransactionAddress {
block_hash: info.hash.clone(), block_hash: info.hash.clone(),
index: i index: i
}) }))
}) })
.collect() .collect()
}, },
@ -1031,23 +1039,30 @@ impl BlockChain {
let hashes = BodyView::new(&bytes).transaction_hashes(); let hashes = BodyView::new(&bytes).transaction_hashes();
hashes.into_iter() hashes.into_iter()
.enumerate() .enumerate()
.map(|(i, tx_hash)| (tx_hash, TransactionAddress { .map(|(i, tx_hash)| (tx_hash, Some(TransactionAddress {
block_hash: hash.clone(), block_hash: hash.clone(),
index: i, index: i,
})) })))
.collect::<HashMap<H256, TransactionAddress>>() .collect::<HashMap<H256, Option<TransactionAddress>>>()
}); });
let current_addresses = transaction_hashes.into_iter() let current_addresses = transaction_hashes.into_iter()
.enumerate() .enumerate()
.map(|(i ,tx_hash)| { .map(|(i ,tx_hash)| {
(tx_hash, TransactionAddress { (tx_hash, Some(TransactionAddress {
block_hash: info.hash.clone(), block_hash: info.hash.clone(),
index: i index: i
}) }))
}); });
addresses.chain(current_addresses).collect() let retracted = data.retracted.iter().flat_map(|hash| {
let bytes = self.block_body(hash).expect("Retracted block must be in database.");
let hashes = BodyView::new(&bytes).transaction_hashes();
hashes.into_iter().map(|hash| (hash, None)).collect::<HashMap<H256, Option<TransactionAddress>>>()
});
// The order here is important! Don't remove transaction if it was part of enacted blocks as well.
retracted.chain(addresses).chain(current_addresses).collect()
}, },
BlockLocation::Branch => HashMap::new(), BlockLocation::Branch => HashMap::new(),
} }
@ -1345,6 +1360,70 @@ mod tests {
// TODO: insert block that already includes one of them as an uncle to check it's not allowed. // TODO: insert block that already includes one of them as an uncle to check it's not allowed.
} }
#[test]
fn test_fork_transaction_addresses() {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let mut fork_chain = canon_chain.fork(1);
let mut fork_finalizer = finalizer.fork();
let t1 = Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: 100.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&"".sha3());
let b1a = canon_chain
.with_transaction(t1.clone())
.generate(&mut finalizer).unwrap();
// Empty block
let b1b = fork_chain
.generate(&mut fork_finalizer).unwrap();
let b2 = fork_chain
.generate(&mut fork_finalizer).unwrap();
let b1a_hash = BlockView::new(&b1a).header_view().sha3();
let b2_hash = BlockView::new(&b2).header_view().sha3();
let t1_hash = t1.hash();
let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let mut batch = db.transaction();
let _ = bc.insert_block(&mut batch, &b1a, vec![]);
bc.commit();
let _ = bc.insert_block(&mut batch, &b1b, vec![]);
bc.commit();
db.write(batch).unwrap();
assert_eq!(bc.best_block_hash(), b1a_hash);
assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress {
block_hash: b1a_hash.clone(),
index: 0,
}));
// now let's make forked chain the canon chain
let mut batch = db.transaction();
let _ = bc.insert_block(&mut batch, &b2, vec![]);
bc.commit();
db.write(batch).unwrap();
// Transaction should be retracted
assert_eq!(bc.best_block_hash(), b2_hash);
assert_eq!(bc.transaction_address(&t1_hash), None);
}
#[test] #[test]
fn test_overwriting_transaction_addresses() { fn test_overwriting_transaction_addresses() {
let mut canon_chain = ChainGenerator::default(); let mut canon_chain = ChainGenerator::default();
@ -1415,14 +1494,14 @@ mod tests {
db.write(batch).unwrap(); db.write(batch).unwrap();
assert_eq!(bc.best_block_hash(), b1a_hash); assert_eq!(bc.best_block_hash(), b1a_hash);
assert_eq!(bc.transaction_address(&t1_hash).unwrap(), TransactionAddress { assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress {
block_hash: b1a_hash.clone(), block_hash: b1a_hash.clone(),
index: 0, index: 0,
}); }));
assert_eq!(bc.transaction_address(&t2_hash).unwrap(), TransactionAddress { assert_eq!(bc.transaction_address(&t2_hash), Some(TransactionAddress {
block_hash: b1a_hash.clone(), block_hash: b1a_hash.clone(),
index: 1, index: 1,
}); }));
// now let's make forked chain the canon chain // now let's make forked chain the canon chain
let mut batch = db.transaction(); let mut batch = db.transaction();
@ -1431,18 +1510,18 @@ mod tests {
db.write(batch).unwrap(); db.write(batch).unwrap();
assert_eq!(bc.best_block_hash(), b2_hash); assert_eq!(bc.best_block_hash(), b2_hash);
assert_eq!(bc.transaction_address(&t1_hash).unwrap(), TransactionAddress { assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress {
block_hash: b1b_hash.clone(), block_hash: b1b_hash.clone(),
index: 1, index: 1,
}); }));
assert_eq!(bc.transaction_address(&t2_hash).unwrap(), TransactionAddress { assert_eq!(bc.transaction_address(&t2_hash), Some(TransactionAddress {
block_hash: b1b_hash.clone(), block_hash: b1b_hash.clone(),
index: 0, index: 0,
}); }));
assert_eq!(bc.transaction_address(&t3_hash).unwrap(), TransactionAddress { assert_eq!(bc.transaction_address(&t3_hash), Some(TransactionAddress {
block_hash: b2_hash.clone(), block_hash: b2_hash.clone(),
index: 0, index: 0,
}); }));
} }
#[test] #[test]
@ -1682,7 +1761,7 @@ mod tests {
gas_price: 0.into(), gas_price: 0.into(),
gas: 100_000.into(), gas: 100_000.into(),
action: Action::Create, action: Action::Create,
value: 100.into(), value: 101.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&"".sha3()); }.sign(&"".sha3());
let t2 = Transaction { let t2 = Transaction {
@ -1690,7 +1769,7 @@ mod tests {
gas_price: 0.into(), gas_price: 0.into(),
gas: 100_000.into(), gas: 100_000.into(),
action: Action::Create, action: Action::Create,
value: 100.into(), value: 102.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&"".sha3()); }.sign(&"".sha3());
let t3 = Transaction { let t3 = Transaction {
@ -1698,7 +1777,7 @@ mod tests {
gas_price: 0.into(), gas_price: 0.into(),
gas: 100_000.into(), gas: 100_000.into(),
action: Action::Create, action: Action::Create,
value: 100.into(), value: 103.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&"".sha3()); }.sign(&"".sha3());
let tx_hash1 = t1.hash(); let tx_hash1 = t1.hash();

View File

@ -17,8 +17,8 @@ pub struct ExtrasUpdate<'a> {
pub block_details: HashMap<H256, BlockDetails>, pub block_details: HashMap<H256, BlockDetails>,
/// Modified block receipts. /// Modified block receipts.
pub block_receipts: HashMap<H256, BlockReceipts>, pub block_receipts: HashMap<H256, BlockReceipts>,
/// Modified transaction addresses.
pub transactions_addresses: HashMap<H256, TransactionAddress>,
/// Modified blocks blooms. /// Modified blocks blooms.
pub blocks_blooms: HashMap<LogGroupPosition, BloomGroup>, pub blocks_blooms: HashMap<LogGroupPosition, BloomGroup>,
/// Modified transaction addresses (None signifies removed transactions).
pub transactions_addresses: HashMap<H256, Option<TransactionAddress>>,
} }

View File

@ -32,7 +32,7 @@ use util::kvdb::*;
// other // other
use ethkey::recover; use ethkey::recover;
use io::*; use io::*;
use views::{BlockView, HeaderView, BodyView}; use views::{HeaderView, BodyView};
use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError}; use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError};
use header::BlockNumber; use header::BlockNumber;
use state::State; use state::State;
@ -48,7 +48,7 @@ use transaction::{LocalizedTransaction, SignedTransaction, Action};
use blockchain::extras::TransactionAddress; use blockchain::extras::TransactionAddress;
use types::filter::Filter; use types::filter::Filter;
use log_entry::LocalizedLogEntry; use log_entry::LocalizedLogEntry;
use block_queue::{BlockQueue, BlockQueueInfo}; use verification::queue::{BlockQueue, QueueInfo as BlockQueueInfo};
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
use client::{ use client::{
BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient, BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient,
@ -67,7 +67,7 @@ use miner::{Miner, MinerService};
use snapshot::{self, io as snapshot_io}; use snapshot::{self, io as snapshot_io};
use factory::Factories; use factory::Factories;
use rlp::{View, UntrustedRlp}; use rlp::{View, UntrustedRlp};
use state_db::StateDB;
// re-export // re-export
pub use types::blockchain_info::BlockChainInfo; pub use types::blockchain_info::BlockChainInfo;
@ -127,9 +127,9 @@ pub struct Client {
tracedb: RwLock<TraceDB<BlockChain>>, tracedb: RwLock<TraceDB<BlockChain>>,
engine: Arc<Engine>, engine: Arc<Engine>,
config: ClientConfig, config: ClientConfig,
db: RwLock<Arc<Database>>,
pruning: journaldb::Algorithm, pruning: journaldb::Algorithm,
state_db: RwLock<Box<JournalDB>>, db: RwLock<Arc<Database>>,
state_db: Mutex<StateDB>,
block_queue: BlockQueue, block_queue: BlockQueue,
report: RwLock<ClientReport>, report: RwLock<ClientReport>,
import_lock: Mutex<()>, import_lock: Mutex<()>,
@ -173,14 +173,15 @@ impl Client {
let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone()));
let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone())); let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()));
let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); let journal_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE);
if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) { let mut state_db = StateDB::new(journal_db);
if state_db.journal_db().is_empty() && try!(spec.ensure_db_good(&mut state_db)) {
let mut batch = DBTransaction::new(&db); let mut batch = DBTransaction::new(&db);
try!(state_db.commit(&mut batch, 0, &spec.genesis_header().hash(), None)); try!(state_db.commit(&mut batch, 0, &spec.genesis_header().hash(), None));
try!(db.write(batch).map_err(ClientError::Database)); try!(db.write(batch).map_err(ClientError::Database));
} }
if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) { if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.journal_db().contains(h.state_root())) {
warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex()); warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex());
} }
@ -209,7 +210,7 @@ impl Client {
verifier: verification::new(config.verifier_type.clone()), verifier: verification::new(config.verifier_type.clone()),
config: config, config: config,
db: RwLock::new(db), db: RwLock::new(db),
state_db: RwLock::new(state_db), state_db: Mutex::new(state_db),
block_queue: block_queue, block_queue: block_queue,
report: RwLock::new(Default::default()), report: RwLock::new(Default::default()),
import_lock: Mutex::new(()), import_lock: Mutex::new(()),
@ -300,7 +301,8 @@ impl Client {
// Enact Verified Block // Enact Verified Block
let parent = chain_has_parent.unwrap(); let parent = chain_has_parent.unwrap();
let last_hashes = self.build_last_hashes(header.parent_hash().clone()); let last_hashes = self.build_last_hashes(header.parent_hash().clone());
let db = self.state_db.read().boxed_clone(); let is_canon = header.parent_hash() == &chain.best_block_hash();
let db = if is_canon { self.state_db.lock().boxed_clone_canon() } else { self.state_db.lock().boxed_clone() };
let enact_result = enact_verified(block, engine, self.tracedb.read().tracing_enabled(), db, &parent, last_hashes, self.factories.clone()); let enact_result = enact_verified(block, engine, self.tracedb.read().tracing_enabled(), db, &parent, last_hashes, self.factories.clone());
if let Err(e) = enact_result { if let Err(e) = enact_result {
@ -443,7 +445,8 @@ impl Client {
// CHECK! I *think* this is fine, even if the state_root is equal to another // CHECK! I *think* this is fine, even if the state_root is equal to another
// already-imported block of the same number. // already-imported block of the same number.
// TODO: Prove it with a test. // TODO: Prove it with a test.
block.drain().commit(&mut batch, number, hash, ancient).expect("DB commit failed."); let mut state = block.drain();
state.commit(&mut batch, number, hash, ancient).expect("DB commit failed.");
let route = chain.insert_block(&mut batch, block_data, receipts); let route = chain.insert_block(&mut batch, block_data, receipts);
self.tracedb.read().import(&mut batch, TraceImportRequest { self.tracedb.read().import(&mut batch, TraceImportRequest {
@ -456,7 +459,6 @@ impl Client {
// Final commit to the DB // Final commit to the DB
self.db.read().write_buffered(batch); self.db.read().write_buffered(batch);
chain.commit(); chain.commit();
self.update_last_hashes(&parent, hash); self.update_last_hashes(&parent, hash);
route route
} }
@ -498,7 +500,7 @@ impl Client {
}; };
self.block_header(id).and_then(|header| { self.block_header(id).and_then(|header| {
let db = self.state_db.read().boxed_clone(); let db = self.state_db.lock().boxed_clone();
// early exit for pruned blocks // early exit for pruned blocks
if db.is_pruned() && self.chain.read().best_block_number() >= block_number + HISTORY { if db.is_pruned() && self.chain.read().best_block_number() >= block_number + HISTORY {
@ -529,7 +531,7 @@ impl Client {
/// Get a copy of the best block's state. /// Get a copy of the best block's state.
pub fn state(&self) -> State { pub fn state(&self) -> State {
State::from_existing( State::from_existing(
self.state_db.read().boxed_clone(), self.state_db.lock().boxed_clone(),
HeaderView::new(&self.best_block_header()).state_root(), HeaderView::new(&self.best_block_header()).state_root(),
self.engine.account_start_nonce(), self.engine.account_start_nonce(),
self.factories.clone()) self.factories.clone())
@ -544,7 +546,7 @@ impl Client {
/// Get the report. /// Get the report.
pub fn report(&self) -> ClientReport { pub fn report(&self) -> ClientReport {
let mut report = self.report.read().clone(); let mut report = self.report.read().clone();
report.state_db_mem = self.state_db.read().mem_used(); report.state_db_mem = self.state_db.lock().mem_used();
report report
} }
@ -600,7 +602,7 @@ impl Client {
/// Take a snapshot at the given block. /// Take a snapshot at the given block.
/// If the ID given is "latest", this will default to 1000 blocks behind. /// If the ID given is "latest", this will default to 1000 blocks behind.
pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(&self, writer: W, at: BlockID, p: &snapshot::Progress) -> Result<(), EthcoreError> { pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(&self, writer: W, at: BlockID, p: &snapshot::Progress) -> Result<(), EthcoreError> {
let db = self.state_db.read().boxed_clone(); let db = self.state_db.lock().journal_db().boxed_clone();
let best_block_number = self.chain_info().best_block_number; let best_block_number = self.chain_info().best_block_number;
let block_number = try!(self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at))); let block_number = try!(self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at)));
@ -679,14 +681,14 @@ impl snapshot::DatabaseRestore for Client {
trace!(target: "snapshot", "Replacing client database with {:?}", new_db); trace!(target: "snapshot", "Replacing client database with {:?}", new_db);
let _import_lock = self.import_lock.lock(); let _import_lock = self.import_lock.lock();
let mut state_db = self.state_db.write(); let mut state_db = self.state_db.lock();
let mut chain = self.chain.write(); let mut chain = self.chain.write();
let mut tracedb = self.tracedb.write(); let mut tracedb = self.tracedb.write();
self.miner.clear(); self.miner.clear();
let db = self.db.write(); let db = self.db.write();
try!(db.restore(new_db)); try!(db.restore(new_db));
*state_db = journaldb::new(db.clone(), self.pruning, ::db::COL_STATE); *state_db = StateDB::new(journaldb::new(db.clone(), self.pruning, ::db::COL_STATE));
*chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone())); *chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone()));
*tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()); *tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone());
Ok(()) Ok(())
@ -806,7 +808,7 @@ impl BlockChainClient for Client {
let chain = self.chain.read(); let chain = self.chain.read();
match Self::block_hash(&chain, id) { match Self::block_hash(&chain, id) {
Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain,
Some(hash) => self.block_queue.block_status(&hash), Some(hash) => self.block_queue.status(&hash).into(),
None => BlockStatus::Unknown None => BlockStatus::Unknown
} }
} }
@ -853,9 +855,12 @@ impl BlockChainClient for Client {
fn transaction_receipt(&self, id: TransactionID) -> Option<LocalizedReceipt> { fn transaction_receipt(&self, id: TransactionID) -> Option<LocalizedReceipt> {
let chain = self.chain.read(); let chain = self.chain.read();
self.transaction_address(id).and_then(|address| chain.block_number(&address.block_hash).and_then(|block_number| { self.transaction_address(id)
.and_then(|address| chain.block_number(&address.block_hash).and_then(|block_number| {
let t = chain.block_body(&address.block_hash) let t = chain.block_body(&address.block_hash)
.and_then(|block| BodyView::new(&block).localized_transaction_at(&address.block_hash, block_number, address.index)); .and_then(|block| {
BodyView::new(&block).localized_transaction_at(&address.block_hash, block_number, address.index)
});
match (t, chain.transaction_receipt(&address)) { match (t, chain.transaction_receipt(&address)) {
(Some(tx), Some(receipt)) => { (Some(tx), Some(receipt)) => {
@ -910,7 +915,7 @@ impl BlockChainClient for Client {
} }
fn state_data(&self, hash: &H256) -> Option<Bytes> { fn state_data(&self, hash: &H256) -> Option<Bytes> {
self.state_db.read().state(hash) self.state_db.lock().journal_db().state(hash)
} }
fn block_receipts(&self, hash: &H256) -> Option<Bytes> { fn block_receipts(&self, hash: &H256) -> Option<Bytes> {
@ -918,16 +923,21 @@ impl BlockChainClient for Client {
} }
fn import_block(&self, bytes: Bytes) -> Result<H256, BlockImportError> { fn import_block(&self, bytes: Bytes) -> Result<H256, BlockImportError> {
use verification::queue::kind::HasHash;
use verification::queue::kind::blocks::Unverified;
// create unverified block here so the `sha3` calculation can be cached.
let unverified = Unverified::new(bytes);
{ {
let header = BlockView::new(&bytes).header_view(); if self.chain.read().is_known(&unverified.hash()) {
if self.chain.read().is_known(&header.sha3()) {
return Err(BlockImportError::Import(ImportError::AlreadyInChain)); return Err(BlockImportError::Import(ImportError::AlreadyInChain));
} }
if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown { if self.block_status(BlockID::Hash(unverified.parent_hash())) == BlockStatus::Unknown {
return Err(BlockImportError::Block(BlockError::UnknownParent(header.parent_hash()))); return Err(BlockImportError::Block(BlockError::UnknownParent(unverified.parent_hash())));
} }
} }
Ok(try!(self.block_queue.import_block(bytes))) Ok(try!(self.block_queue.import(unverified)))
} }
fn queue_info(&self) -> BlockQueueInfo { fn queue_info(&self) -> BlockQueueInfo {
@ -1062,7 +1072,7 @@ impl MiningBlockChainClient for Client {
engine, engine,
self.factories.clone(), self.factories.clone(),
false, // TODO: this will need to be parameterised once we want to do immediate mining insertion. false, // TODO: this will need to be parameterised once we want to do immediate mining insertion.
self.state_db.read().boxed_clone(), self.state_db.lock().boxed_clone(),
&chain.block_header(&h).expect("h is best block hash: so its header must exist: qed"), &chain.block_header(&h).expect("h is best block hash: so its header must exist: qed"),
self.build_last_hashes(h.clone()), self.build_last_hashes(h.clone()),
author, author,

View File

@ -16,11 +16,11 @@
use std::str::FromStr; use std::str::FromStr;
pub use std::time::Duration; pub use std::time::Duration;
pub use block_queue::BlockQueueConfig;
pub use blockchain::Config as BlockChainConfig; pub use blockchain::Config as BlockChainConfig;
pub use trace::Config as TraceConfig; pub use trace::Config as TraceConfig;
pub use evm::VMType; pub use evm::VMType;
pub use verification::VerifierType;
use verification::{VerifierType, QueueConfig};
use util::{journaldb, CompactionProfile}; use util::{journaldb, CompactionProfile};
use util::trie::TrieSpec; use util::trie::TrieSpec;
@ -84,7 +84,7 @@ impl Default for Mode {
#[derive(Debug, PartialEq, Default)] #[derive(Debug, PartialEq, Default)]
pub struct ClientConfig { pub struct ClientConfig {
/// Block queue configuration. /// Block queue configuration.
pub queue: BlockQueueConfig, pub queue: QueueConfig,
/// Blockchain configuration. /// Blockchain configuration.
pub blockchain: BlockChainConfig, pub blockchain: BlockChainConfig,
/// Trace configuration. /// Trace configuration.

View File

@ -23,7 +23,7 @@ mod trace;
mod client; mod client;
pub use self::client::*; pub use self::client::*;
pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, VMType}; pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockChainConfig, VMType};
pub use self::error::Error; pub use self::error::Error;
pub use types::ids::*; pub use types::ids::*;
pub use self::test_client::{TestBlockChainClient, EachBlockWith}; pub use self::test_client::{TestBlockChainClient, EachBlockWith};

View File

@ -37,11 +37,12 @@ use evm::{Factory as EvmFactory, VMType};
use miner::{Miner, MinerService, TransactionImportResult}; use miner::{Miner, MinerService, TransactionImportResult};
use spec::Spec; use spec::Spec;
use block_queue::BlockQueueInfo; use verification::queue::QueueInfo;
use block::{OpenBlock, SealedBlock}; use block::{OpenBlock, SealedBlock};
use executive::Executed; use executive::Executed;
use error::CallError; use error::CallError;
use trace::LocalizedTrace; use trace::LocalizedTrace;
use state_db::StateDB;
/// Test client. /// Test client.
pub struct TestBlockChainClient { pub struct TestBlockChainClient {
@ -283,13 +284,14 @@ impl TestBlockChainClient {
} }
} }
pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> { pub fn get_temp_state_db() -> GuardedTempResult<StateDB> {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let db = Database::open_default(temp.as_str()).unwrap(); let db = Database::open_default(temp.as_str()).unwrap();
let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, None); let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, None);
let state_db = StateDB::new(journal_db);
GuardedTempResult { GuardedTempResult {
_temp: temp, _temp: temp,
result: Some(journal_db) result: Some(state_db)
} }
} }
@ -297,9 +299,9 @@ impl MiningBlockChainClient for TestBlockChainClient {
fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock { fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock {
let engine = &*self.spec.engine; let engine = &*self.spec.engine;
let genesis_header = self.spec.genesis_header(); let genesis_header = self.spec.genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_state_db();
let mut db = db_result.take(); let mut db = db_result.take();
self.spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); self.spec.ensure_db_good(&mut db).unwrap();
let last_hashes = vec![genesis_header.hash()]; let last_hashes = vec![genesis_header.hash()];
let mut open_block = OpenBlock::new( let mut open_block = OpenBlock::new(
@ -383,11 +385,11 @@ impl BlockChainClient for TestBlockChainClient {
} }
fn transaction(&self, _id: TransactionID) -> Option<LocalizedTransaction> { fn transaction(&self, _id: TransactionID) -> Option<LocalizedTransaction> {
unimplemented!(); None // Simple default.
} }
fn uncle(&self, _id: UncleID) -> Option<Bytes> { fn uncle(&self, _id: UncleID) -> Option<Bytes> {
unimplemented!(); None // Simple default.
} }
fn transaction_receipt(&self, id: TransactionID) -> Option<LocalizedReceipt> { fn transaction_receipt(&self, id: TransactionID) -> Option<LocalizedReceipt> {
@ -544,8 +546,8 @@ impl BlockChainClient for TestBlockChainClient {
Ok(h) Ok(h)
} }
fn queue_info(&self) -> BlockQueueInfo { fn queue_info(&self) -> QueueInfo {
BlockQueueInfo { QueueInfo {
verified_queue_size: self.queue_size.load(AtomicOrder::Relaxed), verified_queue_size: self.queue_size.load(AtomicOrder::Relaxed),
unverified_queue_size: 0, unverified_queue_size: 0,
verifying_queue_size: 0, verifying_queue_size: 0,

View File

@ -17,7 +17,7 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use util::{U256, Address, H256, H2048, Bytes, Itertools}; use util::{U256, Address, H256, H2048, Bytes, Itertools};
use blockchain::TreeRoute; use blockchain::TreeRoute;
use block_queue::BlockQueueInfo; use verification::queue::QueueInfo as BlockQueueInfo;
use block::{OpenBlock, SealedBlock}; use block::{OpenBlock, SealedBlock};
use header::{BlockNumber}; use header::{BlockNumber};
use transaction::{LocalizedTransaction, SignedTransaction}; use transaction::{LocalizedTransaction, SignedTransaction};

View File

@ -86,6 +86,9 @@ pub trait Writable {
/// Writes the value into the database. /// Writes the value into the database.
fn write<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: Deref<Target = [u8]>; fn write<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: Deref<Target = [u8]>;
/// Deletes key from the databse.
fn delete<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>) where T: rlp::Encodable, R: Deref<Target = [u8]>;
/// Writes the value into the database and updates the cache. /// Writes the value into the database and updates the cache.
fn write_with_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut Cache<K, T>, key: K, value: T, policy: CacheUpdatePolicy) where fn write_with_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut Cache<K, T>, key: K, value: T, policy: CacheUpdatePolicy) where
K: Key<T, Target = R> + Hash + Eq, K: Key<T, Target = R> + Hash + Eq,
@ -122,6 +125,34 @@ pub trait Writable {
}, },
} }
} }
/// Writes and removes the values into the database and updates the cache.
fn extend_with_option_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut Cache<K, Option<T>>, values: HashMap<K, Option<T>>, policy: CacheUpdatePolicy) where
K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable,
R: Deref<Target = [u8]> {
match policy {
CacheUpdatePolicy::Overwrite => {
for (key, value) in values.into_iter() {
match value {
Some(ref v) => self.write(col, &key, v),
None => self.delete(col, &key),
}
cache.insert(key, value);
}
},
CacheUpdatePolicy::Remove => {
for (key, value) in values.into_iter() {
match value {
Some(v) => self.write(col, &key, &v),
None => self.delete(col, &key),
}
cache.remove(&key);
}
},
}
}
} }
/// Should be used to read values from database. /// Should be used to read values from database.
@ -173,6 +204,10 @@ impl Writable for DBTransaction {
fn write<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: Deref<Target = [u8]> { fn write<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: Deref<Target = [u8]> {
self.put(col, &key.key(), &rlp::encode(value)); self.put(col, &key.key(), &rlp::encode(value));
} }
fn delete<T, R>(&mut self, col: Option<u32>, key: &Key<T, Target = R>) where T: rlp::Encodable, R: Deref<Target = [u8]> {
self.delete(col, &key.key());
}
} }
impl Readable for Database { impl Readable for Database {

View File

@ -252,9 +252,9 @@ mod tests {
let spec = new_test_authority(); let spec = new_test_authority();
let engine = &*spec.engine; let engine = &*spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_state_db();
let mut db = db_result.take(); let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); spec.ensure_db_good(&mut db).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
let b = b.close_and_lock(); let b = b.close_and_lock();

View File

@ -81,9 +81,9 @@ mod tests {
let spec = Spec::new_test_instant(); let spec = Spec::new_test_instant();
let engine = &*spec.engine; let engine = &*spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_state_db();
let mut db = db_result.take(); let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); spec.ensure_db_good(&mut db).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
let b = b.close_and_lock(); let b = b.close_and_lock();

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethash::{quick_get_difficulty, EthashManager, H256 as EH256}; use ethash::{quick_get_difficulty, slow_get_seedhash, EthashManager, H256 as EH256};
use common::*; use common::*;
use block::*; use block::*;
use spec::CommonParams; use spec::CommonParams;
@ -32,6 +32,8 @@ pub struct EthashParams {
pub minimum_difficulty: U256, pub minimum_difficulty: U256,
/// Difficulty bound divisor. /// Difficulty bound divisor.
pub difficulty_bound_divisor: U256, pub difficulty_bound_divisor: U256,
/// Difficulty increment divisor.
pub difficulty_increment_divisor: u64,
/// Block duration. /// Block duration.
pub duration_limit: u64, pub duration_limit: u64,
/// Block reward. /// Block reward.
@ -46,6 +48,12 @@ pub struct EthashParams {
pub dao_hardfork_beneficiary: Address, pub dao_hardfork_beneficiary: Address,
/// DAO hard-fork DAO accounts list (L) /// DAO hard-fork DAO accounts list (L)
pub dao_hardfork_accounts: Vec<Address>, pub dao_hardfork_accounts: Vec<Address>,
/// Transition block for a change of difficulty params (currently just bound_divisor).
pub difficulty_hardfork_transition: u64,
/// Difficulty param after the difficulty transition.
pub difficulty_hardfork_bound_divisor: U256,
/// Block on which there is no additional difficulty from the exponential bomb.
pub bomb_defuse_transition: u64,
} }
impl From<ethjson::spec::EthashParams> for EthashParams { impl From<ethjson::spec::EthashParams> for EthashParams {
@ -54,6 +62,7 @@ impl From<ethjson::spec::EthashParams> for EthashParams {
gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(), gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(),
minimum_difficulty: p.minimum_difficulty.into(), minimum_difficulty: p.minimum_difficulty.into(),
difficulty_bound_divisor: p.difficulty_bound_divisor.into(), difficulty_bound_divisor: p.difficulty_bound_divisor.into(),
difficulty_increment_divisor: p.difficulty_increment_divisor.map_or(10, Into::into),
duration_limit: p.duration_limit.into(), duration_limit: p.duration_limit.into(),
block_reward: p.block_reward.into(), block_reward: p.block_reward.into(),
registrar: p.registrar.map_or_else(Address::new, Into::into), registrar: p.registrar.map_or_else(Address::new, Into::into),
@ -61,6 +70,9 @@ impl From<ethjson::spec::EthashParams> for EthashParams {
dao_hardfork_transition: p.dao_hardfork_transition.map_or(0x7fffffffffffffff, Into::into), dao_hardfork_transition: p.dao_hardfork_transition.map_or(0x7fffffffffffffff, Into::into),
dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map_or_else(Address::new, Into::into), dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map_or_else(Address::new, Into::into),
dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or_else(Vec::new).into_iter().map(Into::into).collect(), dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or_else(Vec::new).into_iter().map(Into::into).collect(),
difficulty_hardfork_transition: p.difficulty_hardfork_transition.map_or(0x7fffffffffffffff, Into::into),
difficulty_hardfork_bound_divisor: p.difficulty_hardfork_bound_divisor.map_or(p.difficulty_bound_divisor.into(), Into::into),
bomb_defuse_transition: p.bomb_defuse_transition.map_or(0x7fffffffffffffff, Into::into),
} }
} }
} }
@ -168,6 +180,10 @@ impl Engine for Ethash {
fields.state.add_balance(u.author(), &(reward * U256::from(8 + u.number() - current_number) / U256::from(8))); fields.state.add_balance(u.author(), &(reward * U256::from(8 + u.number() - current_number) / U256::from(8)));
} }
// Commit state so that we can actually figure out the state root.
if let Err(e) = fields.state.commit() {
warn!("Encountered error on state commit: {}", e);
}
} }
fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> {
@ -217,6 +233,7 @@ impl Engine for Ethash {
let result = self.pow.compute_light(header.number() as u64, &Ethash::to_ethash(header.bare_hash()), header.nonce().low_u64()); let result = self.pow.compute_light(header.number() as u64, &Ethash::to_ethash(header.bare_hash()), header.nonce().low_u64());
let mix = Ethash::from_ethash(result.mix_hash); let mix = Ethash::from_ethash(result.mix_hash);
let difficulty = Ethash::boundary_to_difficulty(&Ethash::from_ethash(result.value)); let difficulty = Ethash::boundary_to_difficulty(&Ethash::from_ethash(result.value));
trace!(target: "miner", "num: {}, seed: {}, h: {}, non: {}, mix: {}, res: {}" , header.number() as u64, Ethash::from_ethash(slow_get_seedhash(header.number() as u64)), header.bare_hash(), header.nonce().low_u64(), Ethash::from_ethash(result.mix_hash), Ethash::from_ethash(result.value));
if mix != header.mix_hash() { if mix != header.mix_hash() {
return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: header.mix_hash() }))); return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: header.mix_hash() })));
} }
@ -267,7 +284,11 @@ impl Ethash {
} }
let min_difficulty = self.ethash_params.minimum_difficulty; let min_difficulty = self.ethash_params.minimum_difficulty;
let difficulty_bound_divisor = self.ethash_params.difficulty_bound_divisor; let difficulty_hardfork = header.number() >= self.ethash_params.difficulty_hardfork_transition;
let difficulty_bound_divisor = match difficulty_hardfork {
true => self.ethash_params.difficulty_hardfork_bound_divisor,
false => self.ethash_params.difficulty_bound_divisor,
};
let duration_limit = self.ethash_params.duration_limit; let duration_limit = self.ethash_params.duration_limit;
let frontier_limit = self.ethash_params.frontier_compatibility_mode_limit; let frontier_limit = self.ethash_params.frontier_compatibility_mode_limit;
@ -281,17 +302,19 @@ impl Ethash {
else { else {
trace!(target: "ethash", "Calculating difficulty parent.difficulty={}, header.timestamp={}, parent.timestamp={}", parent.difficulty(), header.timestamp(), parent.timestamp()); trace!(target: "ethash", "Calculating difficulty parent.difficulty={}, header.timestamp={}, parent.timestamp={}", parent.difficulty(), header.timestamp(), parent.timestamp());
//block_diff = parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99) //block_diff = parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99)
let diff_inc = (header.timestamp() - parent.timestamp()) / 10; let diff_inc = (header.timestamp() - parent.timestamp()) / self.ethash_params.difficulty_increment_divisor;
if diff_inc <= 1 { if diff_inc <= 1 {
parent.difficulty().clone() + parent.difficulty().clone() / From::from(2048) * From::from(1 - diff_inc) parent.difficulty().clone() + parent.difficulty().clone() / From::from(difficulty_bound_divisor) * From::from(1 - diff_inc)
} else { } else {
parent.difficulty().clone() - parent.difficulty().clone() / From::from(2048) * From::from(min(diff_inc - 1, 99)) parent.difficulty().clone() - parent.difficulty().clone() / From::from(difficulty_bound_divisor) * From::from(min(diff_inc - 1, 99))
} }
}; };
target = max(min_difficulty, target); target = max(min_difficulty, target);
let period = ((parent.number() + 1) / EXP_DIFF_PERIOD) as usize; if header.number() < self.ethash_params.bomb_defuse_transition {
if period > 1 { let period = ((parent.number() + 1) / EXP_DIFF_PERIOD) as usize;
target = max(min_difficulty, target + (U256::from(1) << (period - 2))); if period > 1 {
target = max(min_difficulty, target + (U256::from(1) << (period - 2)));
}
} }
target target
} }
@ -355,9 +378,9 @@ mod tests {
let spec = new_morden(); let spec = new_morden();
let engine = &*spec.engine; let engine = &*spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_state_db();
let mut db = db_result.take(); let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); spec.ensure_db_good(&mut db).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
let b = b.close(); let b = b.close();
@ -369,9 +392,9 @@ mod tests {
let spec = new_morden(); let spec = new_morden();
let engine = &*spec.engine; let engine = &*spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_state_db();
let mut db = db_result.take(); let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); spec.ensure_db_good(&mut db).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
let mut uncle = Header::new(); let mut uncle = Header::new();

View File

@ -42,6 +42,9 @@ pub fn new_frontier() -> Spec { load(include_bytes!("../../res/ethereum/frontier
/// Create a new Frontier mainnet chain spec without the DAO hardfork. /// Create a new Frontier mainnet chain spec without the DAO hardfork.
pub fn new_classic() -> Spec { load(include_bytes!("../../res/ethereum/classic.json")) } pub fn new_classic() -> Spec { load(include_bytes!("../../res/ethereum/classic.json")) }
/// Create a new Frontier mainnet chain spec without the DAO hardfork.
pub fn new_expanse() -> Spec { load(include_bytes!("../../res/ethereum/expanse.json")) }
/// Create a new Frontier chain spec as though it never changes to Homestead. /// Create a new Frontier chain spec as though it never changes to Homestead.
pub fn new_frontier_test() -> Spec { load(include_bytes!("../../res/ethereum/frontier_test.json")) } pub fn new_frontier_test() -> Spec { load(include_bytes!("../../res/ethereum/frontier_test.json")) }
@ -69,9 +72,9 @@ mod tests {
let spec = new_morden(); let spec = new_morden();
let engine = &spec.engine; let engine = &spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_state_db();
let mut db = db_result.take(); let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); spec.ensure_db_good(&mut db).unwrap();
let s = State::from_existing(db, genesis_header.state_root().clone(), engine.account_start_nonce(), Default::default()).unwrap(); let s = State::from_existing(db, genesis_header.state_root().clone(), engine.account_start_nonce(), Default::default()).unwrap();
assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()), 1u64.into()); assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()), 1u64.into());
assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()), 1u64.into()); assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()), 1u64.into());

View File

@ -196,6 +196,7 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
receive_address: *const evmjit::H256, receive_address: *const evmjit::H256,
code_address: *const evmjit::H256, code_address: *const evmjit::H256,
transfer_value: *const evmjit::I256, transfer_value: *const evmjit::I256,
// We are ignoring apparent value - it's handled in externalities.
_apparent_value: *const evmjit::I256, _apparent_value: *const evmjit::I256,
in_beg: *const u8, in_beg: *const u8,
in_size: u64, in_size: u64,
@ -208,12 +209,13 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
let sender_address = unsafe { Address::from_jit(&*sender_address) }; let sender_address = unsafe { Address::from_jit(&*sender_address) };
let receive_address = unsafe { Address::from_jit(&*receive_address) }; let receive_address = unsafe { Address::from_jit(&*receive_address) };
let code_address = unsafe { Address::from_jit(&*code_address) }; let code_address = unsafe { Address::from_jit(&*code_address) };
// TODO Is it always safe in case of DELEGATE_CALL?
let transfer_value = unsafe { U256::from_jit(&*transfer_value) }; let transfer_value = unsafe { U256::from_jit(&*transfer_value) };
let value = Some(transfer_value);
// receive address and code address are the same in normal calls // receive address and code address are the same in normal calls
let is_callcode = receive_address != code_address; let is_callcode = receive_address != code_address;
let is_delegatecall = is_callcode && sender_address != receive_address;
let value = if is_delegatecall { None } else { Some(transfer_value) };
if !is_callcode && !self.ext.exists(&code_address) { if !is_callcode && !self.ext.exists(&code_address) {
gas_cost = gas_cost + U256::from(self.ext.schedule().call_new_account_gas); gas_cost = gas_cost + U256::from(self.ext.schedule().call_new_account_gas);
@ -242,10 +244,10 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
} }
} }
// TODO [ToDr] Any way to detect DelegateCall? let call_type = match (is_callcode, is_delegatecall) {
let call_type = match is_callcode { (_, true) => CallType::DelegateCall,
true => CallType::CallCode, (true, false) => CallType::CallCode,
false => CallType::Call, (false, false) => CallType::Call,
}; };
match self.ext.call( match self.ext.call(

View File

@ -141,7 +141,7 @@ impl Ext for FakeExt {
} }
fn extcodesize(&self, address: &Address) -> usize { fn extcodesize(&self, address: &Address) -> usize {
self.codes.get(address).map(|v| v.len()).unwrap_or(0) self.codes.get(address).map_or(0, |c| c.len())
} }
fn log(&mut self, topics: Vec<H256>, data: &[u8]) { fn log(&mut self, topics: Vec<H256>, data: &[u8]) {

View File

@ -589,8 +589,11 @@ mod tests {
assert_eq!(substate.contracts_created.len(), 0); assert_eq!(substate.contracts_created.len(), 0);
} }
evm_test!{test_call_to_create: test_call_to_create_jit, test_call_to_create_int} #[test]
fn test_call_to_create(factory: Factory) { // Tracing is not suported in JIT
fn test_call_to_create() {
let factory = Factory::new(VMType::Interpreter);
// code: // code:
// //
// 7c 601080600c6000396000f3006000355415600957005b60203560003555 - push 29 bytes? // 7c 601080600c6000396000f3006000355415600957005b60203560003555 - push 29 bytes?
@ -712,8 +715,10 @@ mod tests {
assert_eq!(vm_tracer.drain().unwrap(), expected_vm_trace); assert_eq!(vm_tracer.drain().unwrap(), expected_vm_trace);
} }
evm_test!{test_create_contract: test_create_contract_jit, test_create_contract_int} #[test]
fn test_create_contract(factory: Factory) { fn test_create_contract() {
// Tracing is not supported in JIT
let factory = Factory::new(VMType::Interpreter);
// code: // code:
// //
// 60 10 - push 16 // 60 10 - push 16

View File

@ -209,7 +209,6 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT
self.state.code_size(address).unwrap_or(0) self.state.code_size(address).unwrap_or(0)
} }
#[cfg_attr(feature="dev", allow(match_ref_pats))] #[cfg_attr(feature="dev", allow(match_ref_pats))]
fn ret(mut self, gas: &U256, data: &[u8]) -> evm::Result<U256> fn ret(mut self, gas: &U256, data: &[u8]) -> evm::Result<U256>
where Self: Sized { where Self: Sized {

View File

@ -295,6 +295,12 @@ impl Encodable for Header {
} }
} }
impl HeapSizeOf for Header {
fn heap_size_of_children(&self) -> usize {
self.extra_data.heap_size_of_children() + self.seal.heap_size_of_children()
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use rustc_serialize::hex::FromHex; use rustc_serialize::hex::FromHex;

View File

@ -110,6 +110,7 @@ extern crate lazy_static;
extern crate heapsize; extern crate heapsize;
#[macro_use] #[macro_use]
extern crate ethcore_ipc as ipc; extern crate ethcore_ipc as ipc;
extern crate lru_cache;
#[cfg(feature = "jit" )] #[cfg(feature = "jit" )]
extern crate evmjit; extern crate evmjit;
@ -119,7 +120,6 @@ pub extern crate ethstore;
pub mod account_provider; pub mod account_provider;
pub mod engines; pub mod engines;
pub mod block; pub mod block;
pub mod block_queue;
pub mod client; pub mod client;
pub mod error; pub mod error;
pub mod ethereum; pub mod ethereum;
@ -143,6 +143,7 @@ mod basic_types;
mod env_info; mod env_info;
mod pod_account; mod pod_account;
mod state; mod state;
mod state_db;
mod account_db; mod account_db;
mod builtin; mod builtin;
mod executive; mod executive;

View File

@ -864,15 +864,24 @@ impl MinerService for Miner {
} }
fn submit_seal(&self, chain: &MiningBlockChainClient, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> { fn submit_seal(&self, chain: &MiningBlockChainClient, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> {
let result = if let Some(b) = self.sealing_work.lock().queue.get_used_if(if self.options.enable_resubmission { GetAction::Clone } else { GetAction::Take }, |b| &b.hash() == &pow_hash) { let result =
b.lock().try_seal(&*self.engine, seal).or_else(|_| { if let Some(b) = self.sealing_work.lock().queue.get_used_if(
warn!(target: "miner", "Mined solution rejected: Invalid."); if self.options.enable_resubmission {
Err(Error::PowInvalid) GetAction::Clone
}) } else {
} else { GetAction::Take
warn!(target: "miner", "Mined solution rejected: Block unknown or out of date."); },
Err(Error::PowHashInvalid) |b| &b.hash() == &pow_hash
}; ) {
trace!(target: "miner", "Sealing block {}={}={} with seal {:?}", pow_hash, b.hash(), b.header().bare_hash(), seal);
b.lock().try_seal(&*self.engine, seal).or_else(|(e, _)| {
warn!(target: "miner", "Mined solution rejected: {}", e);
Err(Error::PowInvalid)
})
} else {
warn!(target: "miner", "Mined solution rejected: Block unknown or out of date.");
Err(Error::PowHashInvalid)
};
result.and_then(|sealed| { result.and_then(|sealed| {
let n = sealed.header().number(); let n = sealed.header().number();
let h = sealed.header().hash(); let h = sealed.header().hash();
@ -915,7 +924,7 @@ impl MinerService for Miner {
out_of_chain.for_each(|txs| { out_of_chain.for_each(|txs| {
let mut transaction_queue = self.transaction_queue.lock(); let mut transaction_queue = self.transaction_queue.lock();
let _ = self.add_transactions_to_queue( let _ = self.add_transactions_to_queue(
chain, txs, TransactionOrigin::External, &mut transaction_queue chain, txs, TransactionOrigin::RetractedBlock, &mut transaction_queue
); );
}); });
} }

View File

@ -98,6 +98,8 @@ pub enum TransactionOrigin {
Local, Local,
/// External transaction received from network /// External transaction received from network
External, External,
/// Transactions from retracted blocks
RetractedBlock,
} }
impl PartialOrd for TransactionOrigin { impl PartialOrd for TransactionOrigin {
@ -112,10 +114,11 @@ impl Ord for TransactionOrigin {
return Ordering::Equal; return Ordering::Equal;
} }
if *self == TransactionOrigin::Local { match (*self, *other) {
Ordering::Less (TransactionOrigin::RetractedBlock, _) => Ordering::Less,
} else { (_, TransactionOrigin::RetractedBlock) => Ordering::Greater,
Ordering::Greater (TransactionOrigin::Local, _) => Ordering::Less,
_ => Ordering::Greater,
} }
} }
} }
@ -711,7 +714,10 @@ impl TransactionQueue {
let order = self.current.drop(sender, &k).expect("iterating over a collection that has been retrieved above; let order = self.current.drop(sender, &k).expect("iterating over a collection that has been retrieved above;
qed"); qed");
if k >= current_nonce { if k >= current_nonce {
self.future.insert(*sender, k, order.update_height(k, current_nonce)); let order = order.update_height(k, current_nonce);
if let Some(old) = self.future.insert(*sender, k, order.clone()) {
Self::replace_orders(*sender, k, old, order, &mut self.future, &mut self.by_hash);
}
} else { } else {
trace!(target: "txqueue", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce); trace!(target: "txqueue", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce);
self.by_hash.remove(&order.hash).expect("All transactions in `future` are also in `by_hash`"); self.by_hash.remove(&order.hash).expect("All transactions in `future` are also in `by_hash`");
@ -776,7 +782,9 @@ impl TransactionQueue {
self.future.by_gas_price.remove(&order.gas_price, &order.hash); self.future.by_gas_price.remove(&order.gas_price, &order.hash);
// Put to current // Put to current
let order = order.update_height(current_nonce, first_nonce); let order = order.update_height(current_nonce, first_nonce);
self.current.insert(address, current_nonce, order); if let Some(old) = self.current.insert(address, current_nonce, order.clone()) {
Self::replace_orders(address, current_nonce, old, order, &mut self.current, &mut self.by_hash);
}
update_last_nonce_to = Some(current_nonce); update_last_nonce_to = Some(current_nonce);
current_nonce = current_nonce + U256::one(); current_nonce = current_nonce + U256::one();
} }
@ -810,47 +818,49 @@ impl TransactionQueue {
let nonce = tx.nonce(); let nonce = tx.nonce();
let hash = tx.hash(); let hash = tx.hash();
let next_nonce = self.last_nonces
.get(&address)
.cloned()
.map_or(state_nonce, |n| n + U256::one());
// The transaction might be old, let's check that. // The transaction might be old, let's check that.
// This has to be the first test, otherwise calculating // This has to be the first test, otherwise calculating
// nonce height would result in overflow. // nonce height would result in overflow.
if nonce < state_nonce { if nonce < state_nonce {
// Droping transaction // Droping transaction
trace!(target: "txqueue", "Dropping old transaction: {:?} (nonce: {} < {})", tx.hash(), nonce, next_nonce); trace!(target: "txqueue", "Dropping old transaction: {:?} (nonce: {} < {})", tx.hash(), nonce, state_nonce);
return Err(TransactionError::Old); return Err(TransactionError::Old);
} else if nonce > next_nonce { }
// Update nonces of transactions in future (remove old transactions)
self.update_future(&address, state_nonce);
// State nonce could be updated. Maybe there are some more items waiting in future?
self.move_matching_future_to_current(address, state_nonce, state_nonce);
// Check the next expected nonce (might be updated by move above)
let next_nonce = self.last_nonces
.get(&address)
.cloned()
.map_or(state_nonce, |n| n + U256::one());
// Future transaction
if nonce > next_nonce {
// We have a gap - put to future. // We have a gap - put to future.
// Update nonces of transactions in future (remove old transactions)
self.update_future(&address, state_nonce);
// Insert transaction (or replace old one with lower gas price) // Insert transaction (or replace old one with lower gas price)
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.future, &mut self.by_hash))); try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.future, &mut self.by_hash)));
// Return an error if this transaction is not imported because of limit. // Enforce limit in Future
try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash))); let removed = self.future.enforce_limit(&mut self.by_hash);
// Return an error if this transaction was not imported because of limit.
try!(check_if_removed(&address, &nonce, removed));
debug!(target: "txqueue", "Importing transaction to future: {:?}", hash); debug!(target: "txqueue", "Importing transaction to future: {:?}", hash);
debug!(target: "txqueue", "status: {:?}", self.status()); debug!(target: "txqueue", "status: {:?}", self.status());
return Ok(TransactionImportResult::Future); return Ok(TransactionImportResult::Future);
} }
// We might have filled a gap - move some more transactions from future
self.move_matching_future_to_current(address, nonce, state_nonce);
self.move_matching_future_to_current(address, nonce + U256::one(), state_nonce);
// Replace transaction if any
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash))); try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash)));
// Keep track of highest nonce stored in current // Keep track of highest nonce stored in current
let new_max = self.last_nonces.get(&address).map_or(nonce, |n| cmp::max(nonce, *n)); let new_max = self.last_nonces.get(&address).map_or(nonce, |n| cmp::max(nonce, *n));
self.last_nonces.insert(address, new_max); self.last_nonces.insert(address, new_max);
// Update nonces of transactions in future
self.update_future(&address, state_nonce);
// Maybe there are some more items waiting in future?
self.move_matching_future_to_current(address, nonce + U256::one(), state_nonce);
// There might be exactly the same transaction waiting in future
// same (sender, nonce), but above function would not move it.
if let Some(order) = self.future.drop(&address, &nonce) {
// Let's insert that transaction to current (if it has higher gas_price)
let future_tx = self.by_hash.remove(&order.hash).expect("All transactions in `future` are always in `by_hash`.");
// if transaction in `current` (then one we are importing) is replaced it means that it has to low gas_price
try!(check_too_cheap(!Self::replace_transaction(future_tx, state_nonce, &mut self.current, &mut self.by_hash)));
}
// Also enforce the limit // Also enforce the limit
let removed = self.current.enforce_limit(&mut self.by_hash); let removed = self.current.enforce_limit(&mut self.by_hash);
@ -895,24 +905,28 @@ impl TransactionQueue {
if let Some(old) = set.insert(address, nonce, order.clone()) { if let Some(old) = set.insert(address, nonce, order.clone()) {
// There was already transaction in queue. Let's check which one should stay Self::replace_orders(address, nonce, old, order, set, by_hash)
let old_fee = old.gas_price;
let new_fee = order.gas_price;
if old_fee.cmp(&new_fee) == Ordering::Greater {
// Put back old transaction since it has greater priority (higher gas_price)
set.insert(address, nonce, old);
// and remove new one
by_hash.remove(&hash).expect("The hash has been just inserted and no other line is altering `by_hash`.");
false
} else {
// Make sure we remove old transaction entirely
by_hash.remove(&old.hash).expect("The hash is coming from `future` so it has to be in `by_hash`.");
true
}
} else { } else {
true true
} }
} }
fn replace_orders(address: Address, nonce: U256, old: TransactionOrder, order: TransactionOrder, set: &mut TransactionSet, by_hash: &mut HashMap<H256, VerifiedTransaction>) -> bool {
// There was already transaction in queue. Let's check which one should stay
let old_fee = old.gas_price;
let new_fee = order.gas_price;
if old_fee.cmp(&new_fee) == Ordering::Greater {
// Put back old transaction since it has greater priority (higher gas_price)
set.insert(address, nonce, old);
// and remove new one
by_hash.remove(&order.hash).expect("The hash has been just inserted and no other line is altering `by_hash`.");
false
} else {
// Make sure we remove old transaction entirely
by_hash.remove(&old.hash).expect("The hash is coming from `future` so it has to be in `by_hash`.");
true
}
}
} }
fn check_too_cheap(is_in: bool) -> Result<(), TransactionError> { fn check_too_cheap(is_in: bool) -> Result<(), TransactionError> {
@ -1014,6 +1028,17 @@ mod test {
new_tx_pair_default(0.into(), 1.into()) new_tx_pair_default(0.into(), 1.into())
} }
#[test]
fn test_ordering() {
assert_eq!(TransactionOrigin::Local.cmp(&TransactionOrigin::External), Ordering::Less);
assert_eq!(TransactionOrigin::RetractedBlock.cmp(&TransactionOrigin::Local), Ordering::Less);
assert_eq!(TransactionOrigin::RetractedBlock.cmp(&TransactionOrigin::External), Ordering::Less);
assert_eq!(TransactionOrigin::External.cmp(&TransactionOrigin::Local), Ordering::Greater);
assert_eq!(TransactionOrigin::Local.cmp(&TransactionOrigin::RetractedBlock), Ordering::Greater);
assert_eq!(TransactionOrigin::External.cmp(&TransactionOrigin::RetractedBlock), Ordering::Greater);
}
#[test] #[test]
fn should_return_correct_nonces_when_dropped_because_of_limit() { fn should_return_correct_nonces_when_dropped_because_of_limit() {
// given // given
@ -1196,6 +1221,31 @@ mod test {
assert_eq!(txq.top_transactions()[0], tx2); assert_eq!(txq.top_transactions()[0], tx2);
} }
#[test]
fn should_move_all_transactions_from_future() {
// given
let mut txq = TransactionQueue::new();
let (tx, tx2) = new_tx_pair_default(1.into(), 1.into());
let prev_nonce = |a: &Address| AccountDetails{ nonce: default_account_details(a).nonce - U256::one(), balance:
!U256::zero() };
// First insert one transaction to future
let res = txq.add(tx.clone(), &prev_nonce, TransactionOrigin::External);
assert_eq!(res.unwrap(), TransactionImportResult::Future);
assert_eq!(txq.status().future, 1);
// now import second transaction to current
let res = txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External);
// then
assert_eq!(res.unwrap(), TransactionImportResult::Current);
assert_eq!(txq.status().pending, 2);
assert_eq!(txq.status().future, 0);
assert_eq!(txq.current.by_priority.len(), 2);
assert_eq!(txq.current.by_address.len(), 2);
assert_eq!(txq.top_transactions()[0], tx);
assert_eq!(txq.top_transactions()[1], tx2);
}
#[test] #[test]
fn should_import_tx() { fn should_import_tx() {
@ -1375,6 +1425,27 @@ mod test {
assert_eq!(top.len(), 2); assert_eq!(top.len(), 2);
} }
#[test]
fn should_prioritize_reimported_transactions_within_same_nonce_height() {
// given
let mut txq = TransactionQueue::new();
let tx = new_tx_default();
// the second one has same nonce but higher `gas_price`
let (_, tx2) = new_similar_tx_pair();
// when
// first insert local one with higher gas price
txq.add(tx2.clone(), &default_account_details, TransactionOrigin::Local).unwrap();
// then the one with lower gas price, but from retracted block
txq.add(tx.clone(), &default_account_details, TransactionOrigin::RetractedBlock).unwrap();
// then
let top = txq.top_transactions();
assert_eq!(top[0], tx); // retracted should be first
assert_eq!(top[1], tx2);
assert_eq!(top.len(), 2);
}
#[test] #[test]
fn should_not_prioritize_local_transactions_with_different_nonce_height() { fn should_not_prioritize_local_transactions_with_different_nonce_height() {
// given // given

View File

@ -48,7 +48,7 @@ impl PodAccount {
PodAccount { PodAccount {
balance: *acc.balance(), balance: *acc.balance(),
nonce: *acc.nonce(), nonce: *acc.nonce(),
storage: acc.storage_overlay().iter().fold(BTreeMap::new(), |mut m, (k, &(_, ref v))| {m.insert(k.clone(), v.clone()); m}), storage: acc.storage_changes().iter().fold(BTreeMap::new(), |mut m, (k, v)| {m.insert(k.clone(), v.clone()); m}),
code: acc.code().map(|x| x.to_vec()), code: acc.code().map(|x| x.to_vec()),
} }
} }

View File

@ -80,7 +80,13 @@ impl ClientService {
} }
let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
db_config.cache_size = config.db_cache_size;
// give all rocksdb cache to state column; everything else has its
// own caches.
if let Some(size) = config.db_cache_size {
db_config.set_cache(::db::COL_STATE, size);
}
db_config.compaction = config.db_compaction.compaction_profile(); db_config.compaction = config.db_compaction.compaction_profile();
db_config.wal = config.db_wal; db_config.wal = config.db_wal;
@ -90,7 +96,7 @@ impl ClientService {
let snapshot_params = SnapServiceParams { let snapshot_params = SnapServiceParams {
engine: spec.engine.clone(), engine: spec.engine.clone(),
genesis_block: spec.genesis_block(), genesis_block: spec.genesis_block(),
db_config: db_config, db_config: db_config.clone(),
pruning: pruning, pruning: pruning,
channel: io_service.channel(), channel: io_service.channel(),
snapshot_root: snapshot_path.into(), snapshot_root: snapshot_path.into(),

View File

@ -205,7 +205,7 @@ impl Account {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use account_db::{AccountDB, AccountDBMut}; use account_db::{AccountDB, AccountDBMut};
use tests::helpers::get_temp_journal_db; use tests::helpers::get_temp_state_db;
use snapshot::tests::helpers::fill_storage; use snapshot::tests::helpers::fill_storage;
use util::sha3::{SHA3_EMPTY, SHA3_NULL_RLP}; use util::sha3::{SHA3_EMPTY, SHA3_NULL_RLP};
@ -218,8 +218,7 @@ mod tests {
#[test] #[test]
fn encoding_basic() { fn encoding_basic() {
let mut db = get_temp_journal_db(); let mut db = get_temp_state_db();
let mut db = &mut **db;
let addr = Address::random(); let addr = Address::random();
let account = Account { let account = Account {
@ -239,8 +238,7 @@ mod tests {
#[test] #[test]
fn encoding_storage() { fn encoding_storage() {
let mut db = get_temp_journal_db(); let mut db = get_temp_state_db();
let mut db = &mut **db;
let addr = Address::random(); let addr = Address::random();
let account = { let account = {
@ -265,8 +263,7 @@ mod tests {
#[test] #[test]
fn encoding_code() { fn encoding_code() {
let mut db = get_temp_journal_db(); let mut db = get_temp_state_db();
let mut db = &mut **db;
let addr1 = Address::random(); let addr1 = Address::random();
let addr2 = Address::random(); let addr2 = Address::random();

View File

@ -20,6 +20,7 @@ use common::*;
use engines::{Engine, NullEngine, InstantSeal, BasicAuthority, Tendermint}; use engines::{Engine, NullEngine, InstantSeal, BasicAuthority, Tendermint};
use pod_state::*; use pod_state::*;
use account_db::*; use account_db::*;
use state_db::StateDB;
use super::genesis::Genesis; use super::genesis::Genesis;
use super::seal::Generic as GenericSeal; use super::seal::Generic as GenericSeal;
use ethereum; use ethereum;
@ -36,6 +37,8 @@ pub struct CommonParams {
pub maximum_extra_data_size: usize, pub maximum_extra_data_size: usize,
/// Network id. /// Network id.
pub network_id: U256, pub network_id: U256,
/// Main subprotocol name.
pub subprotocol_name: String,
/// Minimum gas limit. /// Minimum gas limit.
pub min_gas_limit: U256, pub min_gas_limit: U256,
/// Fork block to check. /// Fork block to check.
@ -48,6 +51,7 @@ impl From<ethjson::spec::Params> for CommonParams {
account_start_nonce: p.account_start_nonce.into(), account_start_nonce: p.account_start_nonce.into(),
maximum_extra_data_size: p.maximum_extra_data_size.into(), maximum_extra_data_size: p.maximum_extra_data_size.into(),
network_id: p.network_id.into(), network_id: p.network_id.into(),
subprotocol_name: p.subprotocol_name.unwrap_or_else(|| "eth".to_owned()),
min_gas_limit: p.min_gas_limit.into(), min_gas_limit: p.min_gas_limit.into(),
fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) { Some((n.into(), h.into())) } else { None }, fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) { Some((n.into(), h.into())) } else { None },
} }
@ -156,6 +160,9 @@ impl Spec {
/// Get the configured Network ID. /// Get the configured Network ID.
pub fn network_id(&self) -> U256 { self.params.network_id } pub fn network_id(&self) -> U256 { self.params.network_id }
/// Get the configured Network ID.
pub fn subprotocol_name(&self) -> String { self.params.subprotocol_name.clone() }
/// Get the configured network fork block. /// Get the configured network fork block.
pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { self.params.fork_block } pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { self.params.fork_block }
@ -169,7 +176,7 @@ impl Spec {
header.set_transactions_root(self.transactions_root.clone()); header.set_transactions_root(self.transactions_root.clone());
header.set_uncles_hash(RlpStream::new_list(0).out().sha3()); header.set_uncles_hash(RlpStream::new_list(0).out().sha3());
header.set_extra_data(self.extra_data.clone()); header.set_extra_data(self.extra_data.clone());
header.set_state_root(self.state_root().clone()); header.set_state_root(self.state_root());
header.set_receipts_root(self.receipts_root.clone()); header.set_receipts_root(self.receipts_root.clone());
header.set_log_bloom(H2048::new().clone()); header.set_log_bloom(H2048::new().clone());
header.set_gas_used(self.gas_used.clone()); header.set_gas_used(self.gas_used.clone());
@ -184,6 +191,7 @@ impl Spec {
let r = Rlp::new(&seal); let r = Rlp::new(&seal);
(0..self.seal_fields).map(|i| r.at(i).as_raw().to_vec()).collect() (0..self.seal_fields).map(|i| r.at(i).as_raw().to_vec()).collect()
}); });
trace!(target: "spec", "Header hash is {}", header.hash());
header header
} }
@ -227,19 +235,22 @@ impl Spec {
} }
/// Ensure that the given state DB has the trie nodes in for the genesis state. /// Ensure that the given state DB has the trie nodes in for the genesis state.
pub fn ensure_db_good(&self, db: &mut HashDB) -> Result<bool, Box<TrieError>> { pub fn ensure_db_good(&self, db: &mut StateDB) -> Result<bool, Box<TrieError>> {
if !db.contains(&self.state_root()) { if !db.as_hashdb().contains(&self.state_root()) {
trace!(target: "spec", "ensure_db_good: Fresh database? Cannot find state root {}", self.state_root());
let mut root = H256::new(); let mut root = H256::new();
{ {
let mut t = SecTrieDBMut::new(db, &mut root); let mut t = SecTrieDBMut::new(db.as_hashdb_mut(), &mut root);
for (address, account) in self.genesis_state.get().iter() { for (address, account) in self.genesis_state.get().iter() {
try!(t.insert(&**address, &account.rlp())); try!(t.insert(&**address, &account.rlp()));
} }
} }
trace!(target: "spec", "ensure_db_good: Populated sec trie; root is {}", root);
for (address, account) in self.genesis_state.get().iter() { for (address, account) in self.genesis_state.get().iter() {
account.insert_additional(&mut AccountDBMut::new(db, address)); account.insert_additional(&mut AccountDBMut::new(db.as_hashdb_mut(), address));
} }
assert!(db.contains(&self.state_root())); assert!(db.as_hashdb().contains(&self.state_root()));
Ok(true) Ok(true)
} else { Ok(false) } } else { Ok(false) }
} }

View File

@ -20,11 +20,13 @@ use std::collections::hash_map::Entry;
use util::*; use util::*;
use pod_account::*; use pod_account::*;
use rlp::*; use rlp::*;
use lru_cache::LruCache;
use std::cell::{Ref, RefCell, Cell}; use std::cell::{RefCell, Cell};
const STORAGE_CACHE_ITEMS: usize = 4096;
/// Single account in the system. /// Single account in the system.
#[derive(Clone)]
pub struct Account { pub struct Account {
// Balance of the account. // Balance of the account.
balance: U256, balance: U256,
@ -32,10 +34,16 @@ pub struct Account {
nonce: U256, nonce: U256,
// Trie-backed storage. // Trie-backed storage.
storage_root: H256, storage_root: H256,
// Overlay on trie-backed storage - tuple is (<clean>, <value>). // LRU Cache of the trie-backed storage.
storage_overlay: RefCell<HashMap<H256, (Filth, H256)>>, // This is limited to `STORAGE_CACHE_ITEMS` recent queries
storage_cache: RefCell<LruCache<H256, H256>>,
// Modified storage. Accumulates changes to storage made in `set_storage`
// Takes precedence over `storage_cache`.
storage_changes: HashMap<H256, H256>,
// Code hash of the account. If None, means that it's a contract whose code has not yet been set. // Code hash of the account. If None, means that it's a contract whose code has not yet been set.
code_hash: Option<H256>, code_hash: Option<H256>,
// Size of the accoun code.
code_size: Option<usize>,
// Code cache of the account. // Code cache of the account.
code_cache: Bytes, code_cache: Bytes,
// Account is new or has been modified // Account is new or has been modified
@ -52,23 +60,31 @@ impl Account {
balance: balance, balance: balance,
nonce: nonce, nonce: nonce,
storage_root: SHA3_NULL_RLP, storage_root: SHA3_NULL_RLP,
storage_overlay: RefCell::new(storage.into_iter().map(|(k, v)| (k, (Filth::Dirty, v))).collect()), storage_cache: Self::empty_storage_cache(),
storage_changes: storage,
code_hash: Some(code.sha3()), code_hash: Some(code.sha3()),
code_size: Some(code.len()),
code_cache: code, code_cache: code,
filth: Filth::Dirty, filth: Filth::Dirty,
address_hash: Cell::new(None), address_hash: Cell::new(None),
} }
} }
fn empty_storage_cache() -> RefCell<LruCache<H256, H256>> {
RefCell::new(LruCache::new(STORAGE_CACHE_ITEMS))
}
/// General constructor. /// General constructor.
pub fn from_pod(pod: PodAccount) -> Account { pub fn from_pod(pod: PodAccount) -> Account {
Account { Account {
balance: pod.balance, balance: pod.balance,
nonce: pod.nonce, nonce: pod.nonce,
storage_root: SHA3_NULL_RLP, storage_root: SHA3_NULL_RLP,
storage_overlay: RefCell::new(pod.storage.into_iter().map(|(k, v)| (k, (Filth::Dirty, v))).collect()), storage_cache: Self::empty_storage_cache(),
storage_changes: pod.storage.into_iter().collect(),
code_hash: pod.code.as_ref().map(|c| c.sha3()), code_hash: pod.code.as_ref().map(|c| c.sha3()),
code_cache: pod.code.as_ref().map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c.clone()), code_size: Some(pod.code.as_ref().map_or(0, |c| c.len())),
code_cache: pod.code.map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c),
filth: Filth::Dirty, filth: Filth::Dirty,
address_hash: Cell::new(None), address_hash: Cell::new(None),
} }
@ -80,9 +96,11 @@ impl Account {
balance: balance, balance: balance,
nonce: nonce, nonce: nonce,
storage_root: SHA3_NULL_RLP, storage_root: SHA3_NULL_RLP,
storage_overlay: RefCell::new(HashMap::new()), storage_cache: Self::empty_storage_cache(),
storage_changes: HashMap::new(),
code_hash: Some(SHA3_EMPTY), code_hash: Some(SHA3_EMPTY),
code_cache: vec![], code_cache: vec![],
code_size: Some(0),
filth: Filth::Dirty, filth: Filth::Dirty,
address_hash: Cell::new(None), address_hash: Cell::new(None),
} }
@ -95,9 +113,11 @@ impl Account {
nonce: r.val_at(0), nonce: r.val_at(0),
balance: r.val_at(1), balance: r.val_at(1),
storage_root: r.val_at(2), storage_root: r.val_at(2),
storage_overlay: RefCell::new(HashMap::new()), storage_cache: Self::empty_storage_cache(),
storage_changes: HashMap::new(),
code_hash: Some(r.val_at(3)), code_hash: Some(r.val_at(3)),
code_cache: vec![], code_cache: vec![],
code_size: None,
filth: Filth::Clean, filth: Filth::Clean,
address_hash: Cell::new(None), address_hash: Cell::new(None),
} }
@ -110,9 +130,11 @@ impl Account {
balance: balance, balance: balance,
nonce: nonce, nonce: nonce,
storage_root: SHA3_NULL_RLP, storage_root: SHA3_NULL_RLP,
storage_overlay: RefCell::new(HashMap::new()), storage_cache: Self::empty_storage_cache(),
storage_changes: HashMap::new(),
code_hash: None, code_hash: None,
code_cache: vec![], code_cache: vec![],
code_size: None,
filth: Filth::Dirty, filth: Filth::Dirty,
address_hash: Cell::new(None), address_hash: Cell::new(None),
} }
@ -123,44 +145,62 @@ impl Account {
pub fn init_code(&mut self, code: Bytes) { pub fn init_code(&mut self, code: Bytes) {
assert!(self.code_hash.is_none()); assert!(self.code_hash.is_none());
self.code_cache = code; self.code_cache = code;
self.code_size = Some(self.code_cache.len());
self.filth = Filth::Dirty; self.filth = Filth::Dirty;
} }
/// Reset this account's code to the given code. /// Reset this account's code to the given code.
pub fn reset_code(&mut self, code: Bytes) { pub fn reset_code(&mut self, code: Bytes) {
self.code_hash = None; self.code_hash = None;
self.code_size = Some(0);
self.init_code(code); self.init_code(code);
} }
/// Set (and cache) the contents of the trie's storage at `key` to `value`. /// Set (and cache) the contents of the trie's storage at `key` to `value`.
pub fn set_storage(&mut self, key: H256, value: H256) { pub fn set_storage(&mut self, key: H256, value: H256) {
match self.storage_overlay.borrow_mut().entry(key) { match self.storage_changes.entry(key) {
Entry::Occupied(ref mut entry) if entry.get().1 != value => { Entry::Occupied(ref mut entry) if entry.get() != &value => {
entry.insert((Filth::Dirty, value)); entry.insert(value);
self.filth = Filth::Dirty; self.filth = Filth::Dirty;
}, },
Entry::Vacant(entry) => { Entry::Vacant(entry) => {
entry.insert((Filth::Dirty, value)); entry.insert(value);
self.filth = Filth::Dirty; self.filth = Filth::Dirty;
}, },
_ => (), _ => {},
} }
} }
/// Get (and cache) the contents of the trie's storage at `key`. /// Get (and cache) the contents of the trie's storage at `key`.
/// Takes modifed storage into account.
pub fn storage_at(&self, db: &HashDB, key: &H256) -> H256 { pub fn storage_at(&self, db: &HashDB, key: &H256) -> H256 {
self.storage_overlay.borrow_mut().entry(key.clone()).or_insert_with(||{ if let Some(value) = self.cached_storage_at(key) {
let db = SecTrieDB::new(db, &self.storage_root) return value;
.expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \ }
SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \ let db = SecTrieDB::new(db, &self.storage_root)
using it will not fail."); .expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \
SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \
using it will not fail.");
let item: U256 = match db.get(key){ let item: U256 = match db.get(key){
Ok(x) => x.map_or_else(U256::zero, decode), Ok(x) => x.map_or_else(U256::zero, decode),
Err(e) => panic!("Encountered potential DB corruption: {}", e), Err(e) => panic!("Encountered potential DB corruption: {}", e),
}; };
(Filth::Clean, item.into()) let value: H256 = item.into();
}).1.clone() self.storage_cache.borrow_mut().insert(key.clone(), value.clone());
value
}
/// Get cached storage value if any. Returns `None` if the
/// key is not in the cache.
pub fn cached_storage_at(&self, key: &H256) -> Option<H256> {
if let Some(value) = self.storage_changes.get(key) {
return Some(value.clone())
}
if let Some(value) = self.storage_cache.borrow_mut().get_mut(key) {
return Some(value.clone())
}
None
} }
/// return the balance associated with this account. /// return the balance associated with this account.
@ -196,6 +236,12 @@ impl Account {
} }
} }
/// returns the account's code size. If `None` then the code cache or code size cache isn't available -
/// get someone who knows to call `note_code`.
pub fn code_size(&self) -> Option<usize> {
self.code_size.clone()
}
#[cfg(test)] #[cfg(test)]
/// Provide a byte array which hashes to the `code_hash`. returns the hash as a result. /// Provide a byte array which hashes to the `code_hash`. returns the hash as a result.
pub fn note_code(&mut self, code: Bytes) -> Result<(), H256> { pub fn note_code(&mut self, code: Bytes) -> Result<(), H256> {
@ -203,6 +249,7 @@ impl Account {
match self.code_hash { match self.code_hash {
Some(ref i) if h == *i => { Some(ref i) if h == *i => {
self.code_cache = code; self.code_cache = code;
self.code_size = Some(self.code_cache.len());
Ok(()) Ok(())
}, },
_ => Err(h) _ => Err(h)
@ -216,11 +263,12 @@ impl Account {
/// Is this a new or modified account? /// Is this a new or modified account?
pub fn is_dirty(&self) -> bool { pub fn is_dirty(&self) -> bool {
self.filth == Filth::Dirty self.filth == Filth::Dirty || !self.storage_is_clean()
} }
/// Mark account as clean. /// Mark account as clean.
pub fn set_clean(&mut self) { pub fn set_clean(&mut self) {
assert!(self.storage_is_clean());
self.filth = Filth::Clean self.filth = Filth::Clean
} }
@ -231,7 +279,31 @@ impl Account {
self.is_cached() || self.is_cached() ||
match self.code_hash { match self.code_hash {
Some(ref h) => match db.get(h) { Some(ref h) => match db.get(h) {
Some(x) => { self.code_cache = x.to_vec(); true }, Some(x) => {
self.code_cache = x.to_vec();
self.code_size = Some(x.len());
true
},
_ => {
warn!("Failed reverse get of {}", h);
false
},
},
_ => false,
}
}
/// Provide a database to get `code_size`. Should not be called if it is a contract without code.
pub fn cache_code_size(&mut self, db: &HashDB) -> bool {
// TODO: fill out self.code_cache;
trace!("Account::cache_code_size: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty());
self.code_size.is_some() ||
match self.code_hash {
Some(ref h) if h != &SHA3_EMPTY => match db.get(h) {
Some(x) => {
self.code_size = Some(x.len());
true
},
_ => { _ => {
warn!("Failed reverse get of {}", h); warn!("Failed reverse get of {}", h);
false false
@ -241,16 +313,15 @@ impl Account {
} }
} }
#[cfg(test)]
/// Determine whether there are any un-`commit()`-ed storage-setting operations. /// Determine whether there are any un-`commit()`-ed storage-setting operations.
pub fn storage_is_clean(&self) -> bool { self.storage_overlay.borrow().iter().find(|&(_, &(f, _))| f == Filth::Dirty).is_none() } pub fn storage_is_clean(&self) -> bool { self.storage_changes.is_empty() }
#[cfg(test)] #[cfg(test)]
/// return the storage root associated with this account or None if it has been altered via the overlay. /// return the storage root associated with this account or None if it has been altered via the overlay.
pub fn storage_root(&self) -> Option<&H256> { if self.storage_is_clean() {Some(&self.storage_root)} else {None} } pub fn storage_root(&self) -> Option<&H256> { if self.storage_is_clean() {Some(&self.storage_root)} else {None} }
/// return the storage overlay. /// return the storage overlay.
pub fn storage_overlay(&self) -> Ref<HashMap<H256, (Filth, H256)>> { self.storage_overlay.borrow() } pub fn storage_changes(&self) -> &HashMap<H256, H256> { &self.storage_changes }
/// Increment the nonce of the account by one. /// Increment the nonce of the account by one.
pub fn inc_nonce(&mut self) { pub fn inc_nonce(&mut self) {
@ -276,26 +347,24 @@ impl Account {
} }
} }
/// Commit the `storage_overlay` to the backing DB and update `storage_root`. /// Commit the `storage_changes` to the backing DB and update `storage_root`.
pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB) { pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB) {
let mut t = trie_factory.from_existing(db, &mut self.storage_root) let mut t = trie_factory.from_existing(db, &mut self.storage_root)
.expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \ .expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \
SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \ SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \
using it will not fail."); using it will not fail.");
for (k, &mut (ref mut f, ref mut v)) in self.storage_overlay.borrow_mut().iter_mut() { for (k, v) in self.storage_changes.drain() {
if f == &Filth::Dirty { // cast key and value to trait type,
// cast key and value to trait type, // so we can call overloaded `to_bytes` method
// so we can call overloaded `to_bytes` method let res = match v.is_zero() {
let res = match v.is_zero() { true => t.remove(&k),
true => t.remove(k), false => t.insert(&k, &encode(&U256::from(&*v))),
false => t.insert(k, &encode(&U256::from(&*v))), };
};
if let Err(e) = res { if let Err(e) = res {
warn!("Encountered potential DB corruption: {}", e); warn!("Encountered potential DB corruption: {}", e);
}
*f = Filth::Clean;
} }
self.storage_cache.borrow_mut().insert(k, v);
} }
} }
@ -303,9 +372,13 @@ impl Account {
pub fn commit_code(&mut self, db: &mut HashDB) { pub fn commit_code(&mut self, db: &mut HashDB) {
trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_hash.is_none(), self.code_cache.is_empty()); trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_hash.is_none(), self.code_cache.is_empty());
match (self.code_hash.is_none(), self.code_cache.is_empty()) { match (self.code_hash.is_none(), self.code_cache.is_empty()) {
(true, true) => self.code_hash = Some(SHA3_EMPTY), (true, true) => {
self.code_hash = Some(SHA3_EMPTY);
self.code_size = Some(0);
},
(true, false) => { (true, false) => {
self.code_hash = Some(db.insert(&self.code_cache)); self.code_hash = Some(db.insert(&self.code_cache));
self.code_size = Some(self.code_cache.len());
}, },
(false, _) => {}, (false, _) => {},
} }
@ -317,9 +390,57 @@ impl Account {
stream.append(&self.nonce); stream.append(&self.nonce);
stream.append(&self.balance); stream.append(&self.balance);
stream.append(&self.storage_root); stream.append(&self.storage_root);
stream.append(self.code_hash.as_ref().expect("Cannot form RLP of contract account without code.")); stream.append(self.code_hash.as_ref().unwrap_or(&SHA3_EMPTY));
stream.out() stream.out()
} }
/// Clone basic account data
pub fn clone_basic(&self) -> Account {
Account {
balance: self.balance.clone(),
nonce: self.nonce.clone(),
storage_root: self.storage_root.clone(),
storage_cache: Self::empty_storage_cache(),
storage_changes: HashMap::new(),
code_hash: self.code_hash.clone(),
code_size: self.code_size.clone(),
code_cache: Bytes::new(),
filth: self.filth,
address_hash: self.address_hash.clone(),
}
}
/// Clone account data and dirty storage keys
pub fn clone_dirty(&self) -> Account {
let mut account = self.clone_basic();
account.storage_changes = self.storage_changes.clone();
account.code_cache = self.code_cache.clone();
account
}
/// Clone account data, dirty storage keys and cached storage keys.
pub fn clone_all(&self) -> Account {
let mut account = self.clone_dirty();
account.storage_cache = self.storage_cache.clone();
account
}
/// Replace self with the data from other account merging storage cache
pub fn merge_with(&mut self, other: Account) {
assert!(self.storage_is_clean());
assert!(other.storage_is_clean());
self.balance = other.balance;
self.nonce = other.nonce;
self.storage_root = other.storage_root;
self.code_hash = other.code_hash;
self.code_cache = other.code_cache;
self.code_size = other.code_size;
self.address_hash = other.address_hash;
let mut cache = self.storage_cache.borrow_mut();
for (k, v) in other.storage_cache.into_inner().into_iter() {
cache.insert(k.clone() , v.clone()); //TODO: cloning should not be required here
}
}
} }
impl fmt::Debug for Account { impl fmt::Debug for Account {
@ -416,6 +537,7 @@ mod tests {
let mut db = AccountDBMut::new(&mut db, &Address::new()); let mut db = AccountDBMut::new(&mut db, &Address::new());
a.init_code(vec![0x55, 0x44, 0xffu8]); a.init_code(vec![0x55, 0x44, 0xffu8]);
assert_eq!(a.code_hash(), SHA3_EMPTY); assert_eq!(a.code_hash(), SHA3_EMPTY);
assert_eq!(a.code_size(), Some(3));
a.commit_code(&mut db); a.commit_code(&mut db);
assert_eq!(a.code_hash().hex(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb"); assert_eq!(a.code_hash().hex(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb");
} }

View File

@ -23,6 +23,7 @@ use trace::FlatTrace;
use pod_account::*; use pod_account::*;
use pod_state::{self, PodState}; use pod_state::{self, PodState};
use types::state_diff::StateDiff; use types::state_diff::StateDiff;
use state_db::StateDB;
mod account; mod account;
mod substate; mod substate;
@ -41,23 +42,92 @@ pub struct ApplyOutcome {
/// Result type for the execution ("application") of a transaction. /// Result type for the execution ("application") of a transaction.
pub type ApplyResult = Result<ApplyOutcome, Error>; pub type ApplyResult = Result<ApplyOutcome, Error>;
#[derive(Debug)]
enum AccountEntry {
/// Contains account data.
Cached(Account),
/// Account has been deleted.
Killed,
/// Account does not exist.
Missing,
}
impl AccountEntry {
fn is_dirty(&self) -> bool {
match *self {
AccountEntry::Cached(ref a) => a.is_dirty(),
AccountEntry::Killed => true,
AccountEntry::Missing => false,
}
}
/// Clone dirty data into new `AccountEntry`.
/// Returns None if clean.
fn clone_dirty(&self) -> Option<AccountEntry> {
match *self {
AccountEntry::Cached(ref acc) if acc.is_dirty() => Some(AccountEntry::Cached(acc.clone_dirty())),
AccountEntry::Killed => Some(AccountEntry::Killed),
_ => None,
}
}
/// Clone account entry data that needs to be saved in the snapshot.
/// This includes basic account information and all locally cached storage keys
fn clone_for_snapshot(&self) -> AccountEntry {
match *self {
AccountEntry::Cached(ref acc) => AccountEntry::Cached(acc.clone_all()),
AccountEntry::Killed => AccountEntry::Killed,
AccountEntry::Missing => AccountEntry::Missing,
}
}
}
/// Representation of the entire state of all accounts in the system. /// Representation of the entire state of all accounts in the system.
///
/// `State` can work together with `StateDB` to share account cache.
///
/// Local cache contains changes made locally and changes accumulated
/// locally from previous commits. Global cache reflects the database
/// state and never contains any changes.
///
/// Account data can be in the following cache states:
/// * In global but not local - something that was queried from the database,
/// but never modified
/// * In local but not global - something that was just added (e.g. new account)
/// * In both with the same value - something that was changed to a new value,
/// but changed back to a previous block in the same block (same State instance)
/// * In both with different values - something that was overwritten with a
/// new value.
///
/// All read-only state queries check local cache/modifications first,
/// then global state cache. If data is not found in any of the caches
/// it is loaded from the DB to the local cache.
///
/// Upon destruction all the local cache data merged into the global cache.
/// The merge might be rejected if current state is non-canonical.
pub struct State { pub struct State {
db: Box<JournalDB>, db: StateDB,
root: H256, root: H256,
cache: RefCell<HashMap<Address, Option<Account>>>, cache: RefCell<HashMap<Address, AccountEntry>>,
snapshots: RefCell<Vec<HashMap<Address, Option<Option<Account>>>>>, snapshots: RefCell<Vec<HashMap<Address, Option<AccountEntry>>>>,
account_start_nonce: U256, account_start_nonce: U256,
factories: Factories, factories: Factories,
} }
#[derive(Copy, Clone)]
enum RequireCache {
None,
CodeSize,
Code,
}
const SEC_TRIE_DB_UNWRAP_STR: &'static str = "A state can only be created with valid root. Creating a SecTrieDB with a valid root will not fail. \ const SEC_TRIE_DB_UNWRAP_STR: &'static str = "A state can only be created with valid root. Creating a SecTrieDB with a valid root will not fail. \
Therefore creating a SecTrieDB with this state's root will not fail."; Therefore creating a SecTrieDB with this state's root will not fail.";
impl State { impl State {
/// Creates new state with empty state root /// Creates new state with empty state root
#[cfg(test)] #[cfg(test)]
pub fn new(mut db: Box<JournalDB>, account_start_nonce: U256, factories: Factories) -> State { pub fn new(mut db: StateDB, account_start_nonce: U256, factories: Factories) -> State {
let mut root = H256::new(); let mut root = H256::new();
{ {
// init trie and reset root too null // init trie and reset root too null
@ -75,7 +145,7 @@ impl State {
} }
/// Creates new state with existing state root /// Creates new state with existing state root
pub fn from_existing(db: Box<JournalDB>, root: H256, account_start_nonce: U256, factories: Factories) -> Result<State, TrieError> { pub fn from_existing(db: StateDB, root: H256, account_start_nonce: U256, factories: Factories) -> Result<State, TrieError> {
if !db.as_hashdb().contains(&root) { if !db.as_hashdb().contains(&root) {
return Err(TrieError::InvalidStateRoot(root)); return Err(TrieError::InvalidStateRoot(root));
} }
@ -119,14 +189,21 @@ impl State {
self.cache.borrow_mut().insert(k, v); self.cache.borrow_mut().insert(k, v);
}, },
None => { None => {
self.cache.borrow_mut().remove(&k); match self.cache.borrow_mut().entry(k) {
::std::collections::hash_map::Entry::Occupied(e) => {
if e.get().is_dirty() {
e.remove();
}
},
_ => {}
}
} }
} }
} }
} }
} }
fn insert_cache(&self, address: &Address, account: Option<Account>) { fn insert_cache(&self, address: &Address, account: AccountEntry) {
if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() {
if !snapshot.contains_key(address) { if !snapshot.contains_key(address) {
snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account)); snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account));
@ -139,13 +216,14 @@ impl State {
fn note_cache(&self, address: &Address) { fn note_cache(&self, address: &Address) {
if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() {
if !snapshot.contains_key(address) { if !snapshot.contains_key(address) {
snapshot.insert(address.clone(), self.cache.borrow().get(address).cloned()); snapshot.insert(address.clone(), self.cache.borrow().get(address).map(AccountEntry::clone_for_snapshot));
} }
} }
} }
/// Destroy the current object and return root and database. /// Destroy the current object and return root and database.
pub fn drop(self) -> (H256, Box<JournalDB>) { pub fn drop(mut self) -> (H256, StateDB) {
self.commit_cache();
(self.root, self.db) (self.root, self.db)
} }
@ -157,50 +235,99 @@ impl State {
/// Create a new contract at address `contract`. If there is already an account at the address /// Create a new contract at address `contract`. If there is already an account at the address
/// it will have its code reset, ready for `init_code()`. /// it will have its code reset, ready for `init_code()`.
pub fn new_contract(&mut self, contract: &Address, balance: U256) { pub fn new_contract(&mut self, contract: &Address, balance: U256) {
self.insert_cache(contract, Some(Account::new_contract(balance, self.account_start_nonce))); self.insert_cache(contract, AccountEntry::Cached(Account::new_contract(balance, self.account_start_nonce)));
} }
/// Remove an existing account. /// Remove an existing account.
pub fn kill_account(&mut self, account: &Address) { pub fn kill_account(&mut self, account: &Address) {
self.insert_cache(account, None); self.insert_cache(account, AccountEntry::Killed);
} }
/// Determine whether an account exists. /// Determine whether an account exists.
pub fn exists(&self, a: &Address) -> bool { pub fn exists(&self, a: &Address) -> bool {
self.ensure_cached(a, false, |a| a.is_some()) self.ensure_cached(a, RequireCache::None, |a| a.is_some())
} }
/// Get the balance of account `a`. /// Get the balance of account `a`.
pub fn balance(&self, a: &Address) -> U256 { pub fn balance(&self, a: &Address) -> U256 {
self.ensure_cached(a, false, self.ensure_cached(a, RequireCache::None,
|a| a.as_ref().map_or(U256::zero(), |account| *account.balance())) |a| a.as_ref().map_or(U256::zero(), |account| *account.balance()))
} }
/// Get the nonce of account `a`. /// Get the nonce of account `a`.
pub fn nonce(&self, a: &Address) -> U256 { pub fn nonce(&self, a: &Address) -> U256 {
self.ensure_cached(a, false, self.ensure_cached(a, RequireCache::None,
|a| a.as_ref().map_or(self.account_start_nonce, |account| *account.nonce())) |a| a.as_ref().map_or(self.account_start_nonce, |account| *account.nonce()))
} }
/// Mutate storage of account `address` so that it is `value` for `key`. /// Mutate storage of account `address` so that it is `value` for `key`.
pub fn storage_at(&self, address: &Address, key: &H256) -> H256 { pub fn storage_at(&self, address: &Address, key: &H256) -> H256 {
self.ensure_cached(address, false, |a| a.as_ref().map_or(H256::new(), |a| { // Storage key search and update works like this:
let addr_hash = a.address_hash(address); // 1. If there's an entry for the account in the local cache check for the key and return it if found.
let db = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash); // 2. If there's an entry for the account in the global cache check for the key or load it into that account.
a.storage_at(db.as_hashdb(), key) // 3. If account is missing in the global cache load it into the local cache and cache the key there.
}))
// check local cache first without updating
{
let local_cache = self.cache.borrow_mut();
let mut local_account = None;
if let Some(maybe_acc) = local_cache.get(address) {
match *maybe_acc {
AccountEntry::Cached(ref account) => {
if let Some(value) = account.cached_storage_at(key) {
return value;
} else {
local_account = Some(maybe_acc);
}
},
_ => return H256::new(),
}
}
// check the global cache and and cache storage key there if found,
// otherwise cache the account localy and cache storage key there.
if let Some(result) = self.db.get_cached(address, |acc| acc.map_or(H256::new(), |a| {
let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), a.address_hash(address));
a.storage_at(account_db.as_hashdb(), key)
})) {
return result;
}
if let Some(ref mut acc) = local_account {
if let AccountEntry::Cached(ref account) = **acc {
let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(address));
return account.storage_at(account_db.as_hashdb(), key)
} else {
return H256::new()
}
}
}
// account is not found in the global cache, get from the DB and insert into local
let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
let maybe_acc = match db.get(address) {
Ok(acc) => acc.map(Account::from_rlp),
Err(e) => panic!("Potential DB corruption encountered: {}", e),
};
let r = maybe_acc.as_ref().map_or(H256::new(), |a| {
let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), a.address_hash(address));
a.storage_at(account_db.as_hashdb(), key)
});
match maybe_acc {
Some(account) => self.insert_cache(address, AccountEntry::Cached(account)),
None => self.insert_cache(address, AccountEntry::Missing),
}
r
} }
/// Get the code of account `a`. /// Get accounts' code.
pub fn code(&self, a: &Address) -> Option<Bytes> { pub fn code(&self, a: &Address) -> Option<Bytes> {
self.ensure_cached(a, true, self.ensure_cached(a, RequireCache::Code,
|a| a.as_ref().map_or(None, |a| a.code().map(|x| x.to_vec()))) |a| a.as_ref().map_or(None, |a| a.code().map(|x|x.to_vec())))
} }
/// Get the code size of account `a`. /// Get accounts' code size.
pub fn code_size(&self, a: &Address) -> Option<usize> { pub fn code_size(&self, a: &Address) -> Option<usize> {
self.ensure_cached(a, true, self.ensure_cached(a, RequireCache::CodeSize,
|a| a.as_ref().map_or(None, |a| a.code().map(|x| x.len()))) |a| a.as_ref().and_then(|a| a.code_size()))
} }
/// Add `incr` to the balance of account `a`. /// Add `incr` to the balance of account `a`.
@ -262,19 +389,19 @@ impl State {
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
/// `accounts` is mutable because we may need to commit the code or storage and record that. /// `accounts` is mutable because we may need to commit the code or storage and record that.
#[cfg_attr(feature="dev", allow(match_ref_pats))] #[cfg_attr(feature="dev", allow(match_ref_pats))]
pub fn commit_into( fn commit_into(
factories: &Factories, factories: &Factories,
db: &mut HashDB, db: &mut StateDB,
root: &mut H256, root: &mut H256,
accounts: &mut HashMap<Address, Option<Account>> accounts: &mut HashMap<Address, AccountEntry>
) -> Result<(), Error> { ) -> Result<(), Error> {
// first, commit the sub trees. // first, commit the sub trees.
// TODO: is this necessary or can we dispense with the `ref mut a` for just `a`? // TODO: is this necessary or can we dispense with the `ref mut a` for just `a`?
for (address, ref mut a) in accounts.iter_mut() { for (address, ref mut a) in accounts.iter_mut() {
match a { match a {
&mut&mut Some(ref mut account) if account.is_dirty() => { &mut&mut AccountEntry::Cached(ref mut account) if account.is_dirty() => {
let addr_hash = account.address_hash(address); let addr_hash = account.address_hash(address);
let mut account_db = factories.accountdb.create(db, addr_hash); let mut account_db = factories.accountdb.create(db.as_hashdb_mut(), addr_hash);
account.commit_storage(&factories.trie, account_db.as_hashdb_mut()); account.commit_storage(&factories.trie, account_db.as_hashdb_mut());
account.commit_code(account_db.as_hashdb_mut()); account.commit_code(account_db.as_hashdb_mut());
} }
@ -283,15 +410,18 @@ impl State {
} }
{ {
let mut trie = factories.trie.from_existing(db, root).unwrap(); let mut trie = factories.trie.from_existing(db.as_hashdb_mut(), root).unwrap();
for (address, ref mut a) in accounts.iter_mut() { for (address, ref mut a) in accounts.iter_mut() {
match **a { match **a {
Some(ref mut account) if account.is_dirty() => { AccountEntry::Cached(ref mut account) if account.is_dirty() => {
account.set_clean(); account.set_clean();
try!(trie.insert(address, &account.rlp())) try!(trie.insert(address, &account.rlp()));
}, },
None => try!(trie.remove(address)), AccountEntry::Killed => {
_ => (), try!(trie.remove(address));
**a = AccountEntry::Missing;
},
_ => {},
} }
} }
} }
@ -299,10 +429,27 @@ impl State {
Ok(()) Ok(())
} }
fn commit_cache(&mut self) {
let mut addresses = self.cache.borrow_mut();
for (address, a) in addresses.drain() {
match a {
AccountEntry::Cached(account) => {
if !account.is_dirty() {
self.db.cache_account(address, Some(account));
}
},
AccountEntry::Missing => {
self.db.cache_account(address, None);
},
_ => {},
}
}
}
/// Commits our cached account changes into the trie. /// Commits our cached account changes into the trie.
pub fn commit(&mut self) -> Result<(), Error> { pub fn commit(&mut self) -> Result<(), Error> {
assert!(self.snapshots.borrow().is_empty()); assert!(self.snapshots.borrow().is_empty());
Self::commit_into(&self.factories, self.db.as_hashdb_mut(), &mut self.root, &mut *self.cache.borrow_mut()) Self::commit_into(&self.factories, &mut self.db, &mut self.root, &mut *self.cache.borrow_mut())
} }
/// Clear state cache /// Clear state cache
@ -316,7 +463,7 @@ impl State {
pub fn populate_from(&mut self, accounts: PodState) { pub fn populate_from(&mut self, accounts: PodState) {
assert!(self.snapshots.borrow().is_empty()); assert!(self.snapshots.borrow().is_empty());
for (add, acc) in accounts.drain().into_iter() { for (add, acc) in accounts.drain().into_iter() {
self.cache.borrow_mut().insert(add, Some(Account::from_pod(acc))); self.cache.borrow_mut().insert(add, AccountEntry::Cached(Account::from_pod(acc)));
} }
} }
@ -326,7 +473,7 @@ impl State {
// TODO: handle database rather than just the cache. // TODO: handle database rather than just the cache.
// will need fat db. // will need fat db.
PodState::from(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| { PodState::from(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| {
if let Some(ref acc) = *opt { if let AccountEntry::Cached(ref acc) = *opt {
m.insert(add.clone(), PodAccount::from_account(acc)); m.insert(add.clone(), PodAccount::from_account(acc));
} }
m m
@ -335,7 +482,7 @@ impl State {
fn query_pod(&mut self, query: &PodState) { fn query_pod(&mut self, query: &PodState) {
for (address, pod_account) in query.get() { for (address, pod_account) in query.get() {
self.ensure_cached(address, true, |a| { self.ensure_cached(address, RequireCache::Code, |a| {
if a.is_some() { if a.is_some() {
for key in pod_account.storage.keys() { for key in pod_account.storage.keys() {
self.storage_at(address, key); self.storage_at(address, key);
@ -354,28 +501,61 @@ impl State {
pod_state::diff_pod(&state_pre.to_pod(), &pod_state_post) pod_state::diff_pod(&state_pre.to_pod(), &pod_state_post)
} }
/// Ensure account `a` is in our cache of the trie DB and return a handle for getting it. fn update_account_cache(require: RequireCache, account: &mut Account, db: &HashDB) {
/// `require_code` requires that the code be cached, too. match require {
fn ensure_cached<'a, F, U>(&'a self, a: &'a Address, require_code: bool, f: F) -> U RequireCache::None => {},
where F: FnOnce(&Option<Account>) -> U { RequireCache::Code => {
let have_key = self.cache.borrow().contains_key(a); account.cache_code(db);
if !have_key { }
let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); RequireCache::CodeSize => {
let maybe_acc = match db.get(a) { account.cache_code_size(db);
Ok(acc) => acc.map(Account::from_rlp),
Err(e) => panic!("Potential DB corruption encountered: {}", e),
};
self.insert_cache(a, maybe_acc);
}
if require_code {
if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() {
let addr_hash = account.address_hash(a);
let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash);
account.cache_code(accountdb.as_hashdb());
} }
} }
}
f(self.cache.borrow().get(a).unwrap()) /// Check caches for required data
/// First searches for account in the local, then the shared cache.
/// Populates local cache if nothing found.
fn ensure_cached<F, U>(&self, a: &Address, require: RequireCache, f: F) -> U
where F: Fn(Option<&Account>) -> U {
// check local cache first
if let Some(ref mut maybe_acc) = self.cache.borrow_mut().get_mut(a) {
if let AccountEntry::Cached(ref mut account) = **maybe_acc {
let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a));
Self::update_account_cache(require, account, accountdb.as_hashdb());
return f(Some(account));
}
return f(None);
}
// check global cache
let result = self.db.get_cached(a, |mut acc| {
if let Some(ref mut account) = acc {
let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a));
Self::update_account_cache(require, account, accountdb.as_hashdb());
}
f(acc.map(|a| &*a))
});
match result {
Some(r) => r,
None => {
// not found in the global cache, get from the DB and insert into local
let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
let mut maybe_acc = match db.get(a) {
Ok(acc) => acc.map(Account::from_rlp),
Err(e) => panic!("Potential DB corruption encountered: {}", e),
};
if let Some(ref mut account) = maybe_acc.as_mut() {
let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a));
Self::update_account_cache(require, account, accountdb.as_hashdb());
}
let r = f(maybe_acc.as_ref());
match maybe_acc {
Some(account) => self.insert_cache(a, AccountEntry::Cached(account)),
None => self.insert_cache(a, AccountEntry::Missing),
}
r
}
}
} }
/// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too.
@ -390,30 +570,40 @@ impl State {
{ {
let contains_key = self.cache.borrow().contains_key(a); let contains_key = self.cache.borrow().contains_key(a);
if !contains_key { if !contains_key {
let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); match self.db.get_cached_account(a) {
let maybe_acc = match db.get(a) { Some(Some(acc)) => self.insert_cache(a, AccountEntry::Cached(acc)),
Ok(acc) => acc.map(Account::from_rlp), Some(None) => self.insert_cache(a, AccountEntry::Missing),
Err(e) => panic!("Potential DB corruption encountered: {}", e), None => {
}; let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
let maybe_acc = match db.get(a) {
self.insert_cache(a, maybe_acc); Ok(Some(acc)) => AccountEntry::Cached(Account::from_rlp(acc)),
Ok(None) => AccountEntry::Missing,
Err(e) => panic!("Potential DB corruption encountered: {}", e),
};
self.insert_cache(a, maybe_acc);
}
}
} else { } else {
self.note_cache(a); self.note_cache(a);
} }
match self.cache.borrow_mut().get_mut(a).unwrap() { match self.cache.borrow_mut().get_mut(a).unwrap() {
&mut Some(ref mut acc) => not_default(acc), &mut AccountEntry::Cached(ref mut acc) => not_default(acc),
slot @ &mut None => *slot = Some(default()), slot => *slot = AccountEntry::Cached(default()),
} }
RefMut::map(self.cache.borrow_mut(), |c| { RefMut::map(self.cache.borrow_mut(), |c| {
let account = c.get_mut(a).unwrap().as_mut().unwrap(); match c.get_mut(a).unwrap() {
if require_code { &mut AccountEntry::Cached(ref mut account) => {
let addr_hash = account.address_hash(a); if require_code {
let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash); let addr_hash = account.address_hash(a);
account.cache_code(accountdb.as_hashdb()); let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash);
account.cache_code(accountdb.as_hashdb());
}
account
},
_ => panic!("Required account must always exist; qed"),
} }
account
}) })
} }
} }
@ -427,17 +617,10 @@ impl fmt::Debug for State {
impl Clone for State { impl Clone for State {
fn clone(&self) -> State { fn clone(&self) -> State {
let cache = { let cache = {
let mut cache = HashMap::new(); let mut cache: HashMap<Address, AccountEntry> = HashMap::new();
for (key, val) in self.cache.borrow().iter() { for (key, val) in self.cache.borrow().iter() {
let key = key.clone(); if let Some(entry) = val.clone_dirty() {
match *val { cache.insert(key.clone(), entry);
Some(ref acc) if acc.is_dirty() => {
cache.insert(key, Some(acc.clone()));
},
None => {
cache.insert(key, None);
},
_ => {},
} }
} }
cache cache
@ -447,7 +630,7 @@ impl Clone for State {
db: self.db.boxed_clone(), db: self.db.boxed_clone(),
root: self.root.clone(), root: self.root.clone(),
cache: RefCell::new(cache), cache: RefCell::new(cache),
snapshots: RefCell::new(self.snapshots.borrow().clone()), snapshots: RefCell::new(Vec::new()),
account_start_nonce: self.account_start_nonce.clone(), account_start_nonce: self.account_start_nonce.clone(),
factories: self.factories.clone(), factories: self.factories.clone(),
} }

161
ethcore/src/state_db.rs Normal file
View File

@ -0,0 +1,161 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use lru_cache::LruCache;
use util::journaldb::JournalDB;
use util::hash::{H256};
use util::hashdb::HashDB;
use util::{Arc, Address, DBTransaction, UtilError, Mutex};
use state::Account;
const STATE_CACHE_ITEMS: usize = 65536;
struct AccountCache {
/// DB Account cache. `None` indicates that account is known to be missing.
accounts: LruCache<Address, Option<Account>>,
}
/// State database abstraction.
/// Manages shared global state cache.
/// A clone of `StateDB` may be created as canonical or not.
/// For canonical clones cache changes are accumulated and applied
/// on commit.
/// For non-canonical clones cache is cleared on commit.
pub struct StateDB {
db: Box<JournalDB>,
account_cache: Arc<Mutex<AccountCache>>,
cache_overlay: Vec<(Address, Option<Account>)>,
is_canon: bool,
}
impl StateDB {
/// Create a new instance wrapping `JournalDB`
pub fn new(db: Box<JournalDB>) -> StateDB {
StateDB {
db: db,
account_cache: Arc::new(Mutex::new(AccountCache { accounts: LruCache::new(STATE_CACHE_ITEMS) })),
cache_overlay: Vec::new(),
is_canon: false,
}
}
/// Commit all recent insert operations and canonical historical commits' removals from the
/// old era to the backing database, reverting any non-canonical historical commit's inserts.
pub fn commit(&mut self, batch: &mut DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
let records = try!(self.db.commit(batch, now, id, end));
if self.is_canon {
self.commit_cache();
} else {
self.clear_cache();
}
Ok(records)
}
/// Returns an interface to HashDB.
pub fn as_hashdb(&self) -> &HashDB {
self.db.as_hashdb()
}
/// Returns an interface to mutable HashDB.
pub fn as_hashdb_mut(&mut self) -> &mut HashDB {
self.db.as_hashdb_mut()
}
/// Clone the database.
pub fn boxed_clone(&self) -> StateDB {
StateDB {
db: self.db.boxed_clone(),
account_cache: self.account_cache.clone(),
cache_overlay: Vec::new(),
is_canon: false,
}
}
/// Clone the database for a canonical state.
pub fn boxed_clone_canon(&self) -> StateDB {
StateDB {
db: self.db.boxed_clone(),
account_cache: self.account_cache.clone(),
cache_overlay: Vec::new(),
is_canon: true,
}
}
/// Check if pruning is enabled on the database.
pub fn is_pruned(&self) -> bool {
self.db.is_pruned()
}
/// Heap size used.
pub fn mem_used(&self) -> usize {
self.db.mem_used() //TODO: + self.account_cache.lock().heap_size_of_children()
}
/// Returns underlying `JournalDB`.
pub fn journal_db(&self) -> &JournalDB {
&*self.db
}
/// Enqueue cache change.
pub fn cache_account(&mut self, addr: Address, data: Option<Account>) {
self.cache_overlay.push((addr, data));
}
/// Apply pending cache changes.
fn commit_cache(&mut self) {
let mut cache = self.account_cache.lock();
for (address, account) in self.cache_overlay.drain(..) {
if let Some(&mut Some(ref mut existing)) = cache.accounts.get_mut(&address) {
if let Some(new) = account {
existing.merge_with(new);
continue;
}
}
cache.accounts.insert(address, account);
}
}
/// Clear the cache.
pub fn clear_cache(&mut self) {
self.cache_overlay.clear();
let mut cache = self.account_cache.lock();
cache.accounts.clear();
}
/// Get basic copy of the cached account. Does not include storage.
/// Returns 'None' if the state is non-canonical and cache is disabled
/// or if the account is not cached.
pub fn get_cached_account(&self, addr: &Address) -> Option<Option<Account>> {
if !self.is_canon {
return None;
}
let mut cache = self.account_cache.lock();
cache.accounts.get_mut(&addr).map(|a| a.as_ref().map(|a| a.clone_basic()))
}
/// Get value from a cached account.
/// Returns 'None' if the state is non-canonical and cache is disabled
/// or if the account is not cached.
pub fn get_cached<F, U>(&self, a: &Address, f: F) -> Option<U>
where F: FnOnce(Option<&mut Account>) -> U {
if !self.is_canon {
return None;
}
let mut cache = self.account_cache.lock();
cache.accounts.get_mut(a).map(|c| f(c.as_mut()))
}
}

View File

@ -19,6 +19,7 @@ use io::*;
use client::{BlockChainClient, Client, ClientConfig}; use client::{BlockChainClient, Client, ClientConfig};
use common::*; use common::*;
use spec::*; use spec::*;
use state_db::StateDB;
use block::{OpenBlock, Drain}; use block::{OpenBlock, Drain};
use blockchain::{BlockChain, Config as BlockChainConfig}; use blockchain::{BlockChain, Config as BlockChainConfig};
use state::*; use state::*;
@ -146,9 +147,9 @@ pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_numbe
).unwrap(); ).unwrap();
let test_engine = &*test_spec.engine; let test_engine = &*test_spec.engine;
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_state_db();
let mut db = db_result.take(); let mut db = db_result.take();
test_spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); test_spec.ensure_db_good(&mut db).unwrap();
let genesis_header = test_spec.genesis_header(); let genesis_header = test_spec.genesis_header();
let mut rolling_timestamp = 40; let mut rolling_timestamp = 40;
@ -321,9 +322,9 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> {
} }
} }
pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> { pub fn get_temp_state_db() -> GuardedTempResult<StateDB> {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let journal_db = get_temp_journal_db_in(temp.as_path()); let journal_db = get_temp_state_db_in(temp.as_path());
GuardedTempResult { GuardedTempResult {
_temp: temp, _temp: temp,
@ -333,7 +334,7 @@ pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
pub fn get_temp_state() -> GuardedTempResult<State> { pub fn get_temp_state() -> GuardedTempResult<State> {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let journal_db = get_temp_journal_db_in(temp.as_path()); let journal_db = get_temp_state_db_in(temp.as_path());
GuardedTempResult { GuardedTempResult {
_temp: temp, _temp: temp,
@ -341,13 +342,14 @@ pub fn get_temp_state() -> GuardedTempResult<State> {
} }
} }
pub fn get_temp_journal_db_in(path: &Path) -> Box<JournalDB> { pub fn get_temp_state_db_in(path: &Path) -> StateDB {
let db = new_db(path.to_str().expect("Only valid utf8 paths for tests.")); let db = new_db(path.to_str().expect("Only valid utf8 paths for tests."));
journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, None) let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, None);
StateDB::new(journal_db)
} }
pub fn get_temp_state_in(path: &Path) -> State { pub fn get_temp_state_in(path: &Path) -> State {
let journal_db = get_temp_journal_db_in(path); let journal_db = get_temp_state_db_in(path);
State::new(journal_db, U256::from(0), Default::default()) State::new(journal_db, U256::from(0), Default::default())
} }

View File

@ -31,7 +31,7 @@ fn top_level_subtraces(traces: &[FlatTrace]) -> usize {
traces.iter().filter(|t| t.trace_address.is_empty()).count() traces.iter().filter(|t| t.trace_address.is_empty()).count()
} }
fn update_trace_address(traces: Vec<FlatTrace>) -> Vec<FlatTrace> { fn prefix_subtrace_addresses(mut traces: Vec<FlatTrace>) -> Vec<FlatTrace> {
// input traces are expected to be ordered like // input traces are expected to be ordered like
// [] // []
// [0] // [0]
@ -48,26 +48,38 @@ fn update_trace_address(traces: Vec<FlatTrace>) -> Vec<FlatTrace> {
// [0, 0, 1] // [0, 0, 1]
// [1] // [1]
// [1, 0] // [1, 0]
let mut top_subtrace_index = 0; let mut current_subtrace_index = 0;
let mut subtrace_subtraces_left = 0; let mut first = true;
traces.into_iter().map(|mut trace| { for trace in traces.iter_mut() {
let is_top_subtrace = trace.trace_address.is_empty(); match (first, trace.trace_address.is_empty()) {
let is_subtrace = trace.trace_address.len() == 1; (true, _) => first = false,
trace.trace_address.push_front(top_subtrace_index); (_, true) => current_subtrace_index += 1,
_ => {}
if is_top_subtrace { }
subtrace_subtraces_left = trace.subtraces; trace.trace_address.push_front(current_subtrace_index);
} else if is_subtrace { }
subtrace_subtraces_left -= 1; traces
}
if subtrace_subtraces_left == 0 {
top_subtrace_index += 1;
}
trace
}).collect()
} }
#[test]
fn should_prefix_address_properly() {
use super::trace::{Action, Res, Suicide};
let f = |v: Vec<usize>| FlatTrace {
action: Action::Suicide(Suicide {
address: Default::default(),
balance: Default::default(),
refund_address: Default::default(),
}),
result: Res::None,
subtraces: 0,
trace_address: v.into_iter().collect(),
};
let t = vec![vec![], vec![0], vec![0, 0], vec![0], vec![], vec![], vec![0], vec![]].into_iter().map(&f).collect();
let t = prefix_subtrace_addresses(t);
assert_eq!(t, vec![vec![0], vec![0, 0], vec![0, 0, 0], vec![0, 0], vec![1], vec![2], vec![2, 0], vec![3]].into_iter().map(&f).collect::<Vec<_>>());
}
impl Tracer for ExecutiveTracer { impl Tracer for ExecutiveTracer {
fn prepare_trace_call(&self, params: &ActionParams) -> Option<Call> { fn prepare_trace_call(&self, params: &ActionParams) -> Option<Call> {
Some(Call::from(params.clone())) Some(Call::from(params.clone()))
@ -93,7 +105,7 @@ impl Tracer for ExecutiveTracer {
}; };
debug!(target: "trace", "Traced call {:?}", trace); debug!(target: "trace", "Traced call {:?}", trace);
self.traces.push(trace); self.traces.push(trace);
self.traces.extend(update_trace_address(subs)); self.traces.extend(prefix_subtrace_addresses(subs));
} }
fn trace_create(&mut self, create: Option<Create>, gas_used: U256, code: Option<Bytes>, address: Address, subs: Vec<FlatTrace>) { fn trace_create(&mut self, create: Option<Create>, gas_used: U256, code: Option<Bytes>, address: Address, subs: Vec<FlatTrace>) {
@ -109,7 +121,7 @@ impl Tracer for ExecutiveTracer {
}; };
debug!(target: "trace", "Traced create {:?}", trace); debug!(target: "trace", "Traced create {:?}", trace);
self.traces.push(trace); self.traces.push(trace);
self.traces.extend(update_trace_address(subs)); self.traces.extend(prefix_subtrace_addresses(subs));
} }
fn trace_failed_call(&mut self, call: Option<Call>, subs: Vec<FlatTrace>, error: TraceError) { fn trace_failed_call(&mut self, call: Option<Call>, subs: Vec<FlatTrace>, error: TraceError) {
@ -121,7 +133,7 @@ impl Tracer for ExecutiveTracer {
}; };
debug!(target: "trace", "Traced failed call {:?}", trace); debug!(target: "trace", "Traced failed call {:?}", trace);
self.traces.push(trace); self.traces.push(trace);
self.traces.extend(update_trace_address(subs)); self.traces.extend(prefix_subtrace_addresses(subs));
} }
fn trace_failed_create(&mut self, create: Option<Create>, subs: Vec<FlatTrace>, error: TraceError) { fn trace_failed_create(&mut self, create: Option<Create>, subs: Vec<FlatTrace>, error: TraceError) {
@ -133,7 +145,7 @@ impl Tracer for ExecutiveTracer {
}; };
debug!(target: "trace", "Traced failed create {:?}", trace); debug!(target: "trace", "Traced failed create {:?}", trace);
self.traces.push(trace); self.traces.push(trace);
self.traces.extend(update_trace_address(subs)); self.traces.extend(prefix_subtrace_addresses(subs));
} }
fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address) { fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address) {

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Block status description module //! Block status description module
use verification::queue::Status as QueueStatus;
/// General block status /// General block status
#[derive(Debug, Eq, PartialEq, Binary)] #[derive(Debug, Eq, PartialEq, Binary)]
@ -28,3 +29,13 @@ pub enum BlockStatus {
/// Unknown. /// Unknown.
Unknown, Unknown,
} }
impl From<QueueStatus> for BlockStatus {
fn from(status: QueueStatus) -> Self {
match status {
QueueStatus::Queued => BlockStatus::Queued,
QueueStatus::Bad => BlockStatus::Bad,
QueueStatus::Unknown => BlockStatus::Unknown,
}
}
}

View File

@ -25,7 +25,7 @@ pub mod executed;
pub mod block_status; pub mod block_status;
pub mod account_diff; pub mod account_diff;
pub mod state_diff; pub mod state_diff;
pub mod block_queue_info; pub mod verification_queue_info;
pub mod filter; pub mod filter;
pub mod trace_filter; pub mod trace_filter;
pub mod call_analytics; pub mod call_analytics;

View File

@ -20,7 +20,7 @@ use std::ops::Deref;
use std::cell::*; use std::cell::*;
use rlp::*; use rlp::*;
use util::sha3::Hashable; use util::sha3::Hashable;
use util::{H256, Address, U256, Bytes}; use util::{H256, Address, U256, Bytes, HeapSizeOf};
use ethkey::{Signature, sign, Secret, Public, recover, public_to_address, Error as EthkeyError}; use ethkey::{Signature, sign, Secret, Public, recover, public_to_address, Error as EthkeyError};
use error::*; use error::*;
use evm::Schedule; use evm::Schedule;
@ -86,6 +86,12 @@ impl Transaction {
} }
} }
impl HeapSizeOf for Transaction {
fn heap_size_of_children(&self) -> usize {
self.data.heap_size_of_children()
}
}
impl From<ethjson::state::Transaction> for SignedTransaction { impl From<ethjson::state::Transaction> for SignedTransaction {
fn from(t: ethjson::state::Transaction) -> Self { fn from(t: ethjson::state::Transaction) -> Self {
let to: Option<ethjson::hash::Address> = t.to.into(); let to: Option<ethjson::hash::Address> = t.to.into();
@ -251,6 +257,12 @@ impl Encodable for SignedTransaction {
fn rlp_append(&self, s: &mut RlpStream) { self.rlp_append_sealed_transaction(s) } fn rlp_append(&self, s: &mut RlpStream) { self.rlp_append_sealed_transaction(s) }
} }
impl HeapSizeOf for SignedTransaction {
fn heap_size_of_children(&self) -> usize {
self.unsigned.heap_size_of_children()
}
}
impl SignedTransaction { impl SignedTransaction {
/// Append object with a signature into RLP stream /// Append object with a signature into RLP stream
pub fn rlp_append_sealed_transaction(&self, s: &mut RlpStream) { pub fn rlp_append_sealed_transaction(&self, s: &mut RlpStream) {

View File

@ -0,0 +1,53 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Verification queue info types
/// Verification queue status
#[derive(Debug, Binary)]
pub struct VerificationQueueInfo {
/// Number of queued items pending verification
pub unverified_queue_size: usize,
/// Number of verified queued items pending import
pub verified_queue_size: usize,
/// Number of items being verified
pub verifying_queue_size: usize,
/// Configured maximum number of items in the queue
pub max_queue_size: usize,
/// Configured maximum number of bytes to use
pub max_mem_use: usize,
/// Heap memory used in bytes
pub mem_used: usize,
}
impl VerificationQueueInfo {
/// The total size of the queues.
pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size }
/// The size of the unverified and verifying queues.
pub fn incomplete_queue_size(&self) -> usize { self.unverified_queue_size + self.verifying_queue_size }
/// Indicates that queue is full
pub fn is_full(&self) -> bool {
self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size > self.max_queue_size ||
self.mem_used > self.max_mem_use
}
/// Indicates that queue is empty
pub fn is_empty(&self) -> bool {
self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size == 0
}
}

View File

@ -16,6 +16,7 @@
pub mod verification; pub mod verification;
pub mod verifier; pub mod verifier;
pub mod queue;
mod canon_verifier; mod canon_verifier;
mod noop_verifier; mod noop_verifier;
@ -23,6 +24,7 @@ pub use self::verification::*;
pub use self::verifier::Verifier; pub use self::verifier::Verifier;
pub use self::canon_verifier::CanonVerifier; pub use self::canon_verifier::CanonVerifier;
pub use self::noop_verifier::NoopVerifier; pub use self::noop_verifier::NoopVerifier;
pub use self::queue::{BlockQueue, Config as QueueConfig, VerificationQueue, QueueInfo};
/// Verifier type. /// Verifier type.
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]

View File

@ -0,0 +1,182 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Definition of valid items for the verification queue.
use engines::Engine;
use error::Error;
use util::{HeapSizeOf, H256};
pub use self::blocks::Blocks;
pub use self::headers::Headers;
/// Something which can produce a hash and a parent hash.
pub trait HasHash {
/// Get the hash of this item.
fn hash(&self) -> H256;
/// Get the hash of this item's parent.
fn parent_hash(&self) -> H256;
}
/// Defines transitions between stages of verification.
///
/// It starts with a fallible transformation from an "input" into the unverified item.
/// This consists of quick, simply done checks as well as extracting particular data.
///
/// Then, there is a `verify` function which performs more expensive checks and
/// produces the verified output.
///
/// For correctness, the hashes produced by each stage of the pipeline should be
/// consistent.
pub trait Kind: 'static + Sized + Send + Sync {
/// The first stage: completely unverified.
type Input: Sized + Send + HasHash + HeapSizeOf;
/// The second stage: partially verified.
type Unverified: Sized + Send + HasHash + HeapSizeOf;
/// The third stage: completely verified.
type Verified: Sized + Send + HasHash + HeapSizeOf;
/// Attempt to create the `Unverified` item from the input.
fn create(input: Self::Input, engine: &Engine) -> Result<Self::Unverified, Error>;
/// Attempt to verify the `Unverified` item using the given engine.
fn verify(unverified: Self::Unverified, engine: &Engine) -> Result<Self::Verified, Error>;
}
/// The blocks verification module.
pub mod blocks {
use super::{Kind, HasHash};
use engines::Engine;
use error::Error;
use header::Header;
use verification::{PreverifiedBlock, verify_block_basic, verify_block_unordered};
use util::{Bytes, HeapSizeOf, H256};
/// A mode for verifying blocks.
pub struct Blocks;
impl Kind for Blocks {
type Input = Unverified;
type Unverified = Unverified;
type Verified = PreverifiedBlock;
fn create(input: Self::Input, engine: &Engine) -> Result<Self::Unverified, Error> {
match verify_block_basic(&input.header, &input.bytes, engine) {
Ok(()) => Ok(input),
Err(e) => {
warn!(target: "client", "Stage 1 block verification failed for {}: {:?}", input.hash(), e);
Err(e)
}
}
}
fn verify(un: Self::Unverified, engine: &Engine) -> Result<Self::Verified, Error> {
let hash = un.hash();
match verify_block_unordered(un.header, un.bytes, engine) {
Ok(verified) => Ok(verified),
Err(e) => {
warn!(target: "client", "Stage 2 block verification failed for {}: {:?}", hash, e);
Err(e)
}
}
}
}
/// An unverified block.
pub struct Unverified {
header: Header,
bytes: Bytes,
}
impl Unverified {
/// Create an `Unverified` from raw bytes.
pub fn new(bytes: Bytes) -> Self {
use views::BlockView;
let header = BlockView::new(&bytes).header();
Unverified {
header: header,
bytes: bytes,
}
}
}
impl HeapSizeOf for Unverified {
fn heap_size_of_children(&self) -> usize {
self.header.heap_size_of_children() + self.bytes.heap_size_of_children()
}
}
impl HasHash for Unverified {
fn hash(&self) -> H256 {
self.header.hash()
}
fn parent_hash(&self) -> H256 {
self.header.parent_hash().clone()
}
}
impl HasHash for PreverifiedBlock {
fn hash(&self) -> H256 {
self.header.hash()
}
fn parent_hash(&self) -> H256 {
self.header.parent_hash().clone()
}
}
}
/// Verification for headers.
pub mod headers {
use super::{Kind, HasHash};
use engines::Engine;
use error::Error;
use header::Header;
use verification::verify_header_params;
use util::hash::H256;
impl HasHash for Header {
fn hash(&self) -> H256 { self.hash() }
fn parent_hash(&self) -> H256 { self.parent_hash().clone() }
}
/// A mode for verifying headers.
pub struct Headers;
impl Kind for Headers {
type Input = Header;
type Unverified = Header;
type Verified = Header;
fn create(input: Self::Input, engine: &Engine) -> Result<Self::Unverified, Error> {
verify_header_params(&input, engine).map(|_| input)
}
fn verify(unverified: Self::Unverified, engine: &Engine) -> Result<Self::Verified, Error> {
engine.verify_block_unordered(&unverified, None).map(|_| unverified)
}
}
}

View File

@ -16,30 +16,35 @@
//! A queue of blocks. Sits between network or other I/O and the `BlockChain`. //! A queue of blocks. Sits between network or other I/O and the `BlockChain`.
//! Sorts them ready for blockchain insertion. //! Sorts them ready for blockchain insertion.
use std::thread::{JoinHandle, self}; use std::thread::{JoinHandle, self};
use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
use std::sync::{Condvar as SCondvar, Mutex as SMutex}; use std::sync::{Condvar as SCondvar, Mutex as SMutex};
use util::*; use util::*;
use io::*; use io::*;
use verification::*;
use error::*; use error::*;
use engines::Engine; use engines::Engine;
use views::*;
use header::*;
use service::*; use service::*;
use client::BlockStatus;
pub use types::block_queue_info::BlockQueueInfo; use self::kind::{HasHash, Kind};
known_heap_size!(0, UnverifiedBlock, VerifyingBlock, PreverifiedBlock); pub use types::verification_queue_info::VerificationQueueInfo as QueueInfo;
pub mod kind;
const MIN_MEM_LIMIT: usize = 16384; const MIN_MEM_LIMIT: usize = 16384;
const MIN_QUEUE_LIMIT: usize = 512; const MIN_QUEUE_LIMIT: usize = 512;
/// Block queue configuration /// Type alias for block queue convenience.
pub type BlockQueue = VerificationQueue<self::kind::Blocks>;
/// Type alias for header queue convenience.
pub type HeaderQueue = VerificationQueue<self::kind::Headers>;
/// Verification queue configuration
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct BlockQueueConfig { pub struct Config {
/// Maximum number of blocks to keep in unverified queue. /// Maximum number of items to keep in unverified queue.
/// When the limit is reached, is_full returns true. /// When the limit is reached, is_full returns true.
pub max_queue_size: usize, pub max_queue_size: usize,
/// Maximum heap memory to use. /// Maximum heap memory to use.
@ -47,42 +52,44 @@ pub struct BlockQueueConfig {
pub max_mem_use: usize, pub max_mem_use: usize,
} }
impl Default for BlockQueueConfig { impl Default for Config {
fn default() -> Self { fn default() -> Self {
BlockQueueConfig { Config {
max_queue_size: 30000, max_queue_size: 30000,
max_mem_use: 50 * 1024 * 1024, max_mem_use: 50 * 1024 * 1024,
} }
} }
} }
/// An item which is in the process of being verified.
pub struct Verifying<K: Kind> {
hash: H256,
output: Option<K::Verified>,
}
impl BlockQueueInfo { impl<K: Kind> HeapSizeOf for Verifying<K> {
/// The total size of the queues. fn heap_size_of_children(&self) -> usize {
pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size } self.output.heap_size_of_children()
/// The size of the unverified and verifying queues.
pub fn incomplete_queue_size(&self) -> usize { self.unverified_queue_size + self.verifying_queue_size }
/// Indicates that queue is full
pub fn is_full(&self) -> bool {
self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size > self.max_queue_size ||
self.mem_used > self.max_mem_use
}
/// Indicates that queue is empty
pub fn is_empty(&self) -> bool {
self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size == 0
} }
} }
/// A queue of blocks. Sits between network or other I/O and the `BlockChain`. /// Status of items in the queue.
/// Sorts them ready for blockchain insertion. pub enum Status {
pub struct BlockQueue { /// Currently queued.
Queued,
/// Known to be bad.
Bad,
/// Unknown.
Unknown,
}
/// A queue of items to be verified. Sits between network or other I/O and the `BlockChain`.
/// Keeps them in the same order as inserted, minus invalid items.
pub struct VerificationQueue<K: Kind> {
panic_handler: Arc<PanicHandler>, panic_handler: Arc<PanicHandler>,
engine: Arc<Engine>, engine: Arc<Engine>,
more_to_verify: Arc<SCondvar>, more_to_verify: Arc<SCondvar>,
verification: Arc<Verification>, verification: Arc<Verification<K>>,
verifiers: Vec<JoinHandle<()>>, verifiers: Vec<JoinHandle<()>>,
deleting: Arc<AtomicBool>, deleting: Arc<AtomicBool>,
ready_signal: Arc<QueueSignal>, ready_signal: Arc<QueueSignal>,
@ -92,16 +99,6 @@ pub struct BlockQueue {
max_mem_use: usize, max_mem_use: usize,
} }
struct UnverifiedBlock {
header: Header,
bytes: Bytes,
}
struct VerifyingBlock {
hash: H256,
block: Option<PreverifiedBlock>,
}
struct QueueSignal { struct QueueSignal {
deleting: Arc<AtomicBool>, deleting: Arc<AtomicBool>,
signalled: AtomicBool, signalled: AtomicBool,
@ -128,19 +125,19 @@ impl QueueSignal {
} }
} }
struct Verification { struct Verification<K: Kind> {
// All locks must be captured in the order declared here. // All locks must be captured in the order declared here.
unverified: Mutex<VecDeque<UnverifiedBlock>>, unverified: Mutex<VecDeque<K::Unverified>>,
verified: Mutex<VecDeque<PreverifiedBlock>>, verified: Mutex<VecDeque<K::Verified>>,
verifying: Mutex<VecDeque<VerifyingBlock>>, verifying: Mutex<VecDeque<Verifying<K>>>,
bad: Mutex<HashSet<H256>>, bad: Mutex<HashSet<H256>>,
more_to_verify: SMutex<()>, more_to_verify: SMutex<()>,
empty: SMutex<()>, empty: SMutex<()>,
} }
impl BlockQueue { impl<K: Kind> VerificationQueue<K> {
/// Creates a new queue instance. /// Creates a new queue instance.
pub fn new(config: BlockQueueConfig, engine: Arc<Engine>, message_channel: IoChannel<ClientIoMessage>) -> BlockQueue { pub fn new(config: Config, engine: Arc<Engine>, message_channel: IoChannel<ClientIoMessage>) -> Self {
let verification = Arc::new(Verification { let verification = Arc::new(Verification {
unverified: Mutex::new(VecDeque::new()), unverified: Mutex::new(VecDeque::new()),
verified: Mutex::new(VecDeque::new()), verified: Mutex::new(VecDeque::new()),
@ -175,13 +172,13 @@ impl BlockQueue {
.name(format!("Verifier #{}", i)) .name(format!("Verifier #{}", i))
.spawn(move || { .spawn(move || {
panic_handler.catch_panic(move || { panic_handler.catch_panic(move || {
BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty) VerificationQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty)
}).unwrap() }).unwrap()
}) })
.expect("Error starting block verification thread") .expect("Error starting block verification thread")
); );
} }
BlockQueue { VerificationQueue {
engine: engine, engine: engine,
panic_handler: panic_handler, panic_handler: panic_handler,
ready_signal: ready_signal.clone(), ready_signal: ready_signal.clone(),
@ -196,7 +193,7 @@ impl BlockQueue {
} }
} }
fn verify(verification: Arc<Verification>, engine: Arc<Engine>, wait: Arc<SCondvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<SCondvar>) { fn verify(verification: Arc<Verification<K>>, engine: Arc<Engine>, wait: Arc<SCondvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<SCondvar>) {
while !deleting.load(AtomicOrdering::Acquire) { while !deleting.load(AtomicOrdering::Acquire) {
{ {
let mut more_to_verify = verification.more_to_verify.lock().unwrap(); let mut more_to_verify = verification.more_to_verify.lock().unwrap();
@ -214,57 +211,66 @@ impl BlockQueue {
} }
} }
let block = { let item = {
// acquire these locks before getting the item to verify.
let mut unverified = verification.unverified.lock(); let mut unverified = verification.unverified.lock();
if unverified.is_empty() {
continue;
}
let mut verifying = verification.verifying.lock(); let mut verifying = verification.verifying.lock();
let block = unverified.pop_front().unwrap();
verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None }); let item = match unverified.pop_front() {
block Some(item) => item,
None => continue,
};
verifying.push_back(Verifying { hash: item.hash(), output: None });
item
}; };
let block_hash = block.header.hash(); let hash = item.hash();
match verify_block_unordered(block.header, block.bytes, &*engine) { match K::verify(item, &*engine) {
Ok(verified) => { Ok(verified) => {
let mut verifying = verification.verifying.lock(); let mut verifying = verification.verifying.lock();
for e in verifying.iter_mut() { let mut idx = None;
if e.hash == block_hash { for (i, e) in verifying.iter_mut().enumerate() {
e.block = Some(verified); if e.hash == hash {
idx = Some(i);
e.output = Some(verified);
break; break;
} }
} }
if !verifying.is_empty() && verifying.front().unwrap().hash == block_hash {
if idx == Some(0) {
// we're next! // we're next!
let mut verified = verification.verified.lock(); let mut verified = verification.verified.lock();
let mut bad = verification.bad.lock(); let mut bad = verification.bad.lock();
BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad); VerificationQueue::drain_verifying(&mut verifying, &mut verified, &mut bad);
ready.set(); ready.set();
} }
}, },
Err(err) => { Err(_) => {
let mut verifying = verification.verifying.lock(); let mut verifying = verification.verifying.lock();
let mut verified = verification.verified.lock(); let mut verified = verification.verified.lock();
let mut bad = verification.bad.lock(); let mut bad = verification.bad.lock();
warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err);
bad.insert(block_hash.clone()); bad.insert(hash.clone());
verifying.retain(|e| e.hash != block_hash); verifying.retain(|e| e.hash != hash);
BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad);
ready.set(); if verifying.front().map_or(false, |x| x.output.is_some()) {
VerificationQueue::drain_verifying(&mut verifying, &mut verified, &mut bad);
ready.set();
}
} }
} }
} }
} }
fn drain_verifying(verifying: &mut VecDeque<VerifyingBlock>, verified: &mut VecDeque<PreverifiedBlock>, bad: &mut HashSet<H256>) { fn drain_verifying(verifying: &mut VecDeque<Verifying<K>>, verified: &mut VecDeque<K::Verified>, bad: &mut HashSet<H256>) {
while !verifying.is_empty() && verifying.front().unwrap().block.is_some() { while let Some(output) = verifying.front_mut().and_then(|x| x.output.take()) {
let block = verifying.pop_front().unwrap().block.unwrap(); assert!(verifying.pop_front().is_some());
if bad.contains(block.header.parent_hash()) {
bad.insert(block.header.hash()); if bad.contains(&output.parent_hash()) {
} bad.insert(output.hash());
else { } else {
verified.push_back(block); verified.push_back(output);
} }
} }
} }
@ -288,21 +294,20 @@ impl BlockQueue {
} }
} }
/// Check if the block is currently in the queue /// Check if the item is currently in the queue
pub fn block_status(&self, hash: &H256) -> BlockStatus { pub fn status(&self, hash: &H256) -> Status {
if self.processing.read().contains(hash) { if self.processing.read().contains(hash) {
return BlockStatus::Queued; return Status::Queued;
} }
if self.verification.bad.lock().contains(hash) { if self.verification.bad.lock().contains(hash) {
return BlockStatus::Bad; return Status::Bad;
} }
BlockStatus::Unknown Status::Unknown
} }
/// Add a block to the queue. /// Add a block to the queue.
pub fn import_block(&self, bytes: Bytes) -> ImportResult { pub fn import(&self, input: K::Input) -> ImportResult {
let header = BlockView::new(&bytes).header(); let h = input.hash();
let h = header.hash();
{ {
if self.processing.read().contains(&h) { if self.processing.read().contains(&h) {
return Err(ImportError::AlreadyQueued.into()); return Err(ImportError::AlreadyQueued.into());
@ -313,74 +318,71 @@ impl BlockQueue {
return Err(ImportError::KnownBad.into()); return Err(ImportError::KnownBad.into());
} }
if bad.contains(header.parent_hash()) { if bad.contains(&input.parent_hash()) {
bad.insert(h.clone()); bad.insert(h.clone());
return Err(ImportError::KnownBad.into()); return Err(ImportError::KnownBad.into());
} }
} }
match verify_block_basic(&header, &bytes, &*self.engine) { match K::create(input, &*self.engine) {
Ok(()) => { Ok(item) => {
self.processing.write().insert(h.clone()); self.processing.write().insert(h.clone());
self.verification.unverified.lock().push_back(UnverifiedBlock { header: header, bytes: bytes }); self.verification.unverified.lock().push_back(item);
self.more_to_verify.notify_all(); self.more_to_verify.notify_all();
Ok(h) Ok(h)
}, },
Err(err) => { Err(err) => {
warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err);
self.verification.bad.lock().insert(h.clone()); self.verification.bad.lock().insert(h.clone());
Err(err) Err(err)
} }
} }
} }
/// Mark given block and all its children as bad. Stops verification. /// Mark given item and all its children as bad. pauses verification
pub fn mark_as_bad(&self, block_hashes: &[H256]) { /// until complete.
if block_hashes.is_empty() { pub fn mark_as_bad(&self, hashes: &[H256]) {
if hashes.is_empty() {
return; return;
} }
let mut verified_lock = self.verification.verified.lock(); let mut verified_lock = self.verification.verified.lock();
let mut verified = &mut *verified_lock; let mut verified = &mut *verified_lock;
let mut bad = self.verification.bad.lock(); let mut bad = self.verification.bad.lock();
let mut processing = self.processing.write(); let mut processing = self.processing.write();
bad.reserve(block_hashes.len()); bad.reserve(hashes.len());
for hash in block_hashes { for hash in hashes {
bad.insert(hash.clone()); bad.insert(hash.clone());
processing.remove(hash); processing.remove(hash);
} }
let mut new_verified = VecDeque::new(); let mut new_verified = VecDeque::new();
for block in verified.drain(..) { for output in verified.drain(..) {
if bad.contains(block.header.parent_hash()) { if bad.contains(&output.parent_hash()) {
bad.insert(block.header.hash()); bad.insert(output.hash());
processing.remove(&block.header.hash()); processing.remove(&output.hash());
} else { } else {
new_verified.push_back(block); new_verified.push_back(output);
} }
} }
*verified = new_verified; *verified = new_verified;
} }
/// Mark given block as processed /// Mark given item as processed
pub fn mark_as_good(&self, block_hashes: &[H256]) { pub fn mark_as_good(&self, hashes: &[H256]) {
if block_hashes.is_empty() { if hashes.is_empty() {
return; return;
} }
let mut processing = self.processing.write(); let mut processing = self.processing.write();
for hash in block_hashes { for hash in hashes {
processing.remove(hash); processing.remove(hash);
} }
} }
/// Removes up to `max` verified blocks from the queue /// Removes up to `max` verified items from the queue
pub fn drain(&self, max: usize) -> Vec<PreverifiedBlock> { pub fn drain(&self, max: usize) -> Vec<K::Verified> {
let mut verified = self.verification.verified.lock(); let mut verified = self.verification.verified.lock();
let count = min(max, verified.len()); let count = min(max, verified.len());
let mut result = Vec::with_capacity(count); let result = verified.drain(..count).collect::<Vec<_>>();
for _ in 0..count {
let block = verified.pop_front().unwrap();
result.push(block);
}
self.ready_signal.reset(); self.ready_signal.reset();
if !verified.is_empty() { if !verified.is_empty() {
self.ready_signal.set(); self.ready_signal.set();
@ -389,7 +391,7 @@ impl BlockQueue {
} }
/// Get queue status. /// Get queue status.
pub fn queue_info(&self) -> BlockQueueInfo { pub fn queue_info(&self) -> QueueInfo {
let (unverified_len, unverified_bytes) = { let (unverified_len, unverified_bytes) = {
let v = self.verification.unverified.lock(); let v = self.verification.unverified.lock();
(v.len(), v.heap_size_of_children()) (v.len(), v.heap_size_of_children())
@ -402,7 +404,8 @@ impl BlockQueue {
let v = self.verification.verified.lock(); let v = self.verification.verified.lock();
(v.len(), v.heap_size_of_children()) (v.len(), v.heap_size_of_children())
}; };
BlockQueueInfo {
QueueInfo {
unverified_queue_size: unverified_len, unverified_queue_size: unverified_len,
verifying_queue_size: verifying_len, verifying_queue_size: verifying_len,
verified_queue_size: verified_len, verified_queue_size: verified_len,
@ -428,22 +431,22 @@ impl BlockQueue {
} }
} }
impl MayPanic for BlockQueue { impl<K: Kind> MayPanic for VerificationQueue<K> {
fn on_panic<F>(&self, closure: F) where F: OnPanicListener { fn on_panic<F>(&self, closure: F) where F: OnPanicListener {
self.panic_handler.on_panic(closure); self.panic_handler.on_panic(closure);
} }
} }
impl Drop for BlockQueue { impl<K: Kind> Drop for VerificationQueue<K> {
fn drop(&mut self) { fn drop(&mut self) {
trace!(target: "shutdown", "[BlockQueue] Closing..."); trace!(target: "shutdown", "[VerificationQueue] Closing...");
self.clear(); self.clear();
self.deleting.store(true, AtomicOrdering::Release); self.deleting.store(true, AtomicOrdering::Release);
self.more_to_verify.notify_all(); self.more_to_verify.notify_all();
for t in self.verifiers.drain(..) { for t in self.verifiers.drain(..) {
t.join().unwrap(); t.join().unwrap();
} }
trace!(target: "shutdown", "[BlockQueue] Closed."); trace!(target: "shutdown", "[VerificationQueue] Closed.");
} }
} }
@ -452,7 +455,8 @@ mod tests {
use util::*; use util::*;
use io::*; use io::*;
use spec::*; use spec::*;
use block_queue::*; use super::{BlockQueue, Config};
use super::kind::blocks::Unverified;
use tests::helpers::*; use tests::helpers::*;
use error::*; use error::*;
use views::*; use views::*;
@ -460,7 +464,7 @@ mod tests {
fn get_test_queue() -> BlockQueue { fn get_test_queue() -> BlockQueue {
let spec = get_test_spec(); let spec = get_test_spec();
let engine = spec.engine; let engine = spec.engine;
BlockQueue::new(BlockQueueConfig::default(), engine, IoChannel::disconnected()) BlockQueue::new(Config::default(), engine, IoChannel::disconnected())
} }
#[test] #[test]
@ -468,13 +472,13 @@ mod tests {
// TODO better test // TODO better test
let spec = Spec::new_test(); let spec = Spec::new_test();
let engine = spec.engine; let engine = spec.engine;
let _ = BlockQueue::new(BlockQueueConfig::default(), engine, IoChannel::disconnected()); let _ = BlockQueue::new(Config::default(), engine, IoChannel::disconnected());
} }
#[test] #[test]
fn can_import_blocks() { fn can_import_blocks() {
let queue = get_test_queue(); let queue = get_test_queue();
if let Err(e) = queue.import_block(get_good_dummy_block()) { if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) {
panic!("error importing block that is valid by definition({:?})", e); panic!("error importing block that is valid by definition({:?})", e);
} }
} }
@ -482,11 +486,11 @@ mod tests {
#[test] #[test]
fn returns_error_for_duplicates() { fn returns_error_for_duplicates() {
let queue = get_test_queue(); let queue = get_test_queue();
if let Err(e) = queue.import_block(get_good_dummy_block()) { if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) {
panic!("error importing block that is valid by definition({:?})", e); panic!("error importing block that is valid by definition({:?})", e);
} }
let duplicate_import = queue.import_block(get_good_dummy_block()); let duplicate_import = queue.import(Unverified::new(get_good_dummy_block()));
match duplicate_import { match duplicate_import {
Err(e) => { Err(e) => {
match e { match e {
@ -503,14 +507,14 @@ mod tests {
let queue = get_test_queue(); let queue = get_test_queue();
let block = get_good_dummy_block(); let block = get_good_dummy_block();
let hash = BlockView::new(&block).header().hash().clone(); let hash = BlockView::new(&block).header().hash().clone();
if let Err(e) = queue.import_block(block) { if let Err(e) = queue.import(Unverified::new(block)) {
panic!("error importing block that is valid by definition({:?})", e); panic!("error importing block that is valid by definition({:?})", e);
} }
queue.flush(); queue.flush();
queue.drain(10); queue.drain(10);
queue.mark_as_good(&[ hash ]); queue.mark_as_good(&[ hash ]);
if let Err(e) = queue.import_block(get_good_dummy_block()) { if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) {
panic!("error importing block that has already been drained ({:?})", e); panic!("error importing block that has already been drained ({:?})", e);
} }
} }
@ -518,7 +522,8 @@ mod tests {
#[test] #[test]
fn returns_empty_once_finished() { fn returns_empty_once_finished() {
let queue = get_test_queue(); let queue = get_test_queue();
queue.import_block(get_good_dummy_block()).expect("error importing block that is valid by definition"); queue.import(Unverified::new(get_good_dummy_block()))
.expect("error importing block that is valid by definition");
queue.flush(); queue.flush();
queue.drain(1); queue.drain(1);
@ -529,13 +534,13 @@ mod tests {
fn test_mem_limit() { fn test_mem_limit() {
let spec = get_test_spec(); let spec = get_test_spec();
let engine = spec.engine; let engine = spec.engine;
let mut config = BlockQueueConfig::default(); let mut config = Config::default();
config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000 config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000
let queue = BlockQueue::new(config, engine, IoChannel::disconnected()); let queue = BlockQueue::new(config, engine, IoChannel::disconnected());
assert!(!queue.queue_info().is_full()); assert!(!queue.queue_info().is_full());
let mut blocks = get_good_dummy_block_seq(50); let mut blocks = get_good_dummy_block_seq(50);
for b in blocks.drain(..) { for b in blocks.drain(..) {
queue.import_block(b).unwrap(); queue.import(Unverified::new(b)).unwrap();
} }
assert!(queue.queue_info().is_full()); assert!(queue.queue_info().is_full());
} }

View File

@ -36,14 +36,22 @@ pub struct PreverifiedBlock {
pub bytes: Bytes, pub bytes: Bytes,
} }
impl HeapSizeOf for PreverifiedBlock {
fn heap_size_of_children(&self) -> usize {
self.header.heap_size_of_children()
+ self.transactions.heap_size_of_children()
+ self.bytes.heap_size_of_children()
}
}
/// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block /// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block
pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Result<(), Error> { pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Result<(), Error> {
try!(verify_header(&header, engine)); try!(verify_header_params(&header, engine));
try!(verify_block_integrity(bytes, &header.transactions_root(), &header.uncles_hash())); try!(verify_block_integrity(bytes, &header.transactions_root(), &header.uncles_hash()));
try!(engine.verify_block_basic(&header, Some(bytes))); try!(engine.verify_block_basic(&header, Some(bytes)));
for u in try!(UntrustedRlp::new(bytes).at(2)).iter().map(|rlp| rlp.as_val::<Header>()) { for u in try!(UntrustedRlp::new(bytes).at(2)).iter().map(|rlp| rlp.as_val::<Header>()) {
let u = try!(u); let u = try!(u);
try!(verify_header(&u, engine)); try!(verify_header_params(&u, engine));
try!(engine.verify_block_basic(&u, None)); try!(engine.verify_block_basic(&u, None));
} }
// Verify transactions. // Verify transactions.
@ -179,7 +187,7 @@ pub fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error>
} }
/// Check basic header parameters. /// Check basic header parameters.
fn verify_header(header: &Header, engine: &Engine) -> Result<(), Error> { pub fn verify_header_params(header: &Header, engine: &Engine) -> Result<(), Error> {
if header.number() >= From::from(BlockNumber::max_value()) { if header.number() >= From::from(BlockNumber::max_value()) {
return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { max: Some(From::from(BlockNumber::max_value())), min: None, found: header.number() }))) return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { max: Some(From::from(BlockNumber::max_value())), min: None, found: header.number() })))
} }

View File

@ -26,7 +26,6 @@ use bigint::hash::{H520, H256, FixedHash};
use {Secret, Public, SECP256K1, Error, Message, public_to_address, Address}; use {Secret, Public, SECP256K1, Error, Message, public_to_address, Address};
#[repr(C)] #[repr(C)]
#[derive(Eq)]
pub struct Signature([u8; 65]); pub struct Signature([u8; 65]);
impl Signature { impl Signature {
@ -77,6 +76,9 @@ impl PartialEq for Signature {
} }
} }
// manual implementation required in Rust 1.13+, see `std::cmp::AssertParamIsEq`.
impl Eq for Signature { }
// also manual for the same reason, but the pretty printing might be useful. // also manual for the same reason, but the pretty printing might be useful.
impl fmt::Debug for Signature { impl fmt::Debug for Signature {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {

View File

@ -16,14 +16,14 @@
use ethkey::{KeyPair, sign, Address, Secret, Signature, Message}; use ethkey::{KeyPair, sign, Address, Secret, Signature, Message};
use {json, Error, crypto}; use {json, Error, crypto};
use crypto::{Keccak256}; use crypto::Keccak256;
use random::Random; use random::Random;
use account::{Version, Cipher, Kdf, Aes128Ctr, Pbkdf2, Prf}; use account::{Version, Cipher, Kdf, Aes128Ctr, Pbkdf2, Prf};
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct Crypto { pub struct Crypto {
pub cipher: Cipher, pub cipher: Cipher,
pub ciphertext: [u8; 32], pub ciphertext: Vec<u8>,
pub kdf: Kdf, pub kdf: Kdf,
pub mac: [u8; 32], pub mac: [u8; 32],
} }
@ -95,7 +95,7 @@ impl Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr { cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: iv, iv: iv,
}), }),
ciphertext: ciphertext, ciphertext: ciphertext.to_vec(),
kdf: Kdf::Pbkdf2(Pbkdf2 { kdf: Kdf::Pbkdf2(Pbkdf2 {
dklen: crypto::KEY_LENGTH as u32, dklen: crypto::KEY_LENGTH as u32,
salt: salt, salt: salt,
@ -107,6 +107,10 @@ impl Crypto {
} }
pub fn secret(&self, password: &str) -> Result<Secret, Error> { pub fn secret(&self, password: &str) -> Result<Secret, Error> {
if self.ciphertext.len() > 32 {
return Err(Error::InvalidSecret);
}
let (derived_left_bits, derived_right_bits) = match self.kdf { let (derived_left_bits, derived_right_bits) = match self.kdf {
Kdf::Pbkdf2(ref params) => crypto::derive_key_iterations(password, &params.salt, params.c), Kdf::Pbkdf2(ref params) => crypto::derive_key_iterations(password, &params.salt, params.c),
Kdf::Scrypt(ref params) => crypto::derive_key_scrypt(password, &params.salt, params.n, params.p, params.r), Kdf::Scrypt(ref params) => crypto::derive_key_scrypt(password, &params.salt, params.n, params.p, params.r),
@ -122,7 +126,8 @@ impl Crypto {
match self.cipher { match self.cipher {
Cipher::Aes128Ctr(ref params) => { Cipher::Aes128Ctr(ref params) => {
crypto::aes::decrypt(&derived_left_bits, &params.iv, &self.ciphertext, &mut *secret) let from = 32 - self.ciphertext.len();
crypto::aes::decrypt(&derived_left_bits, &params.iv, &self.ciphertext, &mut (&mut *secret)[from..])
}, },
} }

View File

@ -0,0 +1,58 @@
use std::{ops, str};
use serde::{Deserialize, Deserializer, Error, Serialize, Serializer};
use rustc_serialize::hex::{ToHex, FromHex, FromHexError};
#[derive(Debug, PartialEq)]
pub struct Bytes(Vec<u8>);
impl ops::Deref for Bytes {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Deserialize for Bytes {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
where D: Deserializer
{
let s = try!(String::deserialize(deserializer));
let data = try!(s.from_hex().map_err(|e| Error::custom(format!("Invalid hex value {}", e))));
Ok(Bytes(data))
}
}
impl Serialize for Bytes {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: Serializer {
serializer.serialize_str(&self.0.to_hex())
}
}
impl str::FromStr for Bytes {
type Err = FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.from_hex().map(Bytes)
}
}
impl From<&'static str> for Bytes {
fn from(s: &'static str) -> Self {
s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s))
}
}
impl From<Vec<u8>> for Bytes {
fn from(v: Vec<u8>) -> Self {
Bytes(v)
}
}
impl From<Bytes> for Vec<u8> {
fn from(b: Bytes) -> Self {
b.0
}
}

View File

@ -16,12 +16,14 @@
use serde::{Deserialize, Deserializer, Serialize, Serializer, Error}; use serde::{Deserialize, Deserializer, Serialize, Serializer, Error};
use serde::de::{Visitor, MapVisitor}; use serde::de::{Visitor, MapVisitor};
use super::{Cipher, CipherSer, CipherSerParams, Kdf, KdfSer, KdfSerParams, H256}; use super::{Cipher, CipherSer, CipherSerParams, Kdf, KdfSer, KdfSerParams, H256, Bytes};
pub type CipherText = Bytes;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub struct Crypto { pub struct Crypto {
pub cipher: Cipher, pub cipher: Cipher,
pub ciphertext: H256, pub ciphertext: CipherText,
pub kdf: Kdf, pub kdf: Kdf,
pub mac: H256, pub mac: H256,
} }

View File

@ -14,9 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fmt; use std::{ops, fmt, str};
use std::ops;
use std::str::FromStr;
use rustc_serialize::hex::{FromHex, ToHex}; use rustc_serialize::hex::{FromHex, ToHex};
use serde::{Serialize, Serializer, Deserialize, Deserializer, Error as SerdeError}; use serde::{Serialize, Serializer, Deserialize, Deserializer, Error as SerdeError};
use serde::de::Visitor; use serde::de::Visitor;
@ -65,7 +63,7 @@ macro_rules! impl_hash {
type Value = $name; type Value = $name;
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: SerdeError { fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: SerdeError {
FromStr::from_str(value).map_err(SerdeError::custom) value.parse().map_err(SerdeError::custom)
} }
fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: SerdeError { fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: SerdeError {
@ -77,7 +75,7 @@ macro_rules! impl_hash {
} }
} }
impl FromStr for $name { impl str::FromStr for $name {
type Err = Error; type Err = Error;
fn from_str(value: &str) -> Result<Self, Self::Err> { fn from_str(value: &str) -> Result<Self, Self::Err> {
@ -92,6 +90,12 @@ macro_rules! impl_hash {
} }
} }
impl From<&'static str> for $name {
fn from(s: &'static str) -> Self {
s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s))
}
}
impl From<[u8; $size]> for $name { impl From<[u8; $size]> for $name {
fn from(bytes: [u8; $size]) -> Self { fn from(bytes: [u8; $size]) -> Self {
$name(bytes) $name(bytes)

View File

@ -15,8 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Universaly unique identifier. //! Universaly unique identifier.
use std::str::FromStr; use std::{fmt, str};
use std::fmt;
use rustc_serialize::hex::{ToHex, FromHex}; use rustc_serialize::hex::{ToHex, FromHex};
use serde::{Deserialize, Serialize, Deserializer, Serializer, Error as SerdeError}; use serde::{Deserialize, Serialize, Deserializer, Serializer, Error as SerdeError};
use serde::de::Visitor; use serde::de::Visitor;
@ -73,7 +72,7 @@ fn copy_into(from: &str, into: &mut [u8]) -> Result<(), Error> {
Ok(()) Ok(())
} }
impl FromStr for UUID { impl str::FromStr for UUID {
type Err = Error; type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
@ -95,6 +94,12 @@ impl FromStr for UUID {
} }
} }
impl From<&'static str> for UUID {
fn from(s: &'static str) -> Self {
s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s))
}
}
impl Serialize for UUID { impl Serialize for UUID {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error> fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: Serializer { where S: Serializer {
@ -116,7 +121,7 @@ impl Visitor for UUIDVisitor {
type Value = UUID; type Value = UUID;
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: SerdeError { fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: SerdeError {
UUID::from_str(value).map_err(SerdeError::custom) value.parse().map_err(SerdeError::custom)
} }
fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: SerdeError { fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: SerdeError {
@ -126,19 +131,18 @@ impl Visitor for UUIDVisitor {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::str::FromStr;
use super::UUID; use super::UUID;
#[test] #[test]
fn uuid_from_str() { fn uuid_from_str() {
let uuid = UUID::from_str("3198bc9c-6672-5ab3-d995-4942343ae5b6").unwrap(); let uuid: UUID = "3198bc9c-6672-5ab3-d995-4942343ae5b6".into();
assert_eq!(uuid, UUID::from([0x31, 0x98, 0xbc, 0x9c, 0x66, 0x72, 0x5a, 0xb3, 0xd9, 0x95, 0x49, 0x42, 0x34, 0x3a, 0xe5, 0xb6])); assert_eq!(uuid, UUID::from([0x31, 0x98, 0xbc, 0x9c, 0x66, 0x72, 0x5a, 0xb3, 0xd9, 0x95, 0x49, 0x42, 0x34, 0x3a, 0xe5, 0xb6]));
} }
#[test] #[test]
fn uuid_from_and_to_str() { fn uuid_from_and_to_str() {
let from = "3198bc9c-6672-5ab3-d995-4942343ae5b6"; let from = "3198bc9c-6672-5ab3-d995-4942343ae5b6";
let uuid = UUID::from_str(from).unwrap(); let uuid: UUID = from.into();
let to: String = uuid.into(); let to: String = uuid.into();
assert_eq!(from, &to); assert_eq!(from, &to);
} }

View File

@ -98,7 +98,7 @@ impl Visitor for KeyFileVisitor {
Some(KeyFileField::Version) => { version = Some(try!(visitor.visit_value())); } Some(KeyFileField::Version) => { version = Some(try!(visitor.visit_value())); }
Some(KeyFileField::Crypto) => { crypto = Some(try!(visitor.visit_value())); } Some(KeyFileField::Crypto) => { crypto = Some(try!(visitor.visit_value())); }
Some(KeyFileField::Address) => { address = Some(try!(visitor.visit_value())); } Some(KeyFileField::Address) => { address = Some(try!(visitor.visit_value())); }
Some(KeyFileField::Name) => { name = visitor.visit_value().ok(); } // ignore anyhing that is not a string to be permissive. Some(KeyFileField::Name) => { name = visitor.visit_value().ok(); } // ignore anyhing that is not a string to be permissive.
Some(KeyFileField::Meta) => { meta = visitor.visit_value().ok(); } // ignore anyhing that is not a string to be permissive. Some(KeyFileField::Meta) => { meta = visitor.visit_value().ok(); } // ignore anyhing that is not a string to be permissive.
None => { break; } None => { break; }
} }
@ -153,7 +153,7 @@ impl KeyFile {
mod tests { mod tests {
use std::str::FromStr; use std::str::FromStr;
use serde_json; use serde_json;
use json::{KeyFile, UUID, Version, Crypto, Cipher, Aes128Ctr, Kdf, Scrypt, H128, H160, H256}; use json::{KeyFile, UUID, Version, Crypto, Cipher, Aes128Ctr, Kdf, Scrypt};
#[test] #[test]
fn basic_keyfile() { fn basic_keyfile() {
@ -185,20 +185,20 @@ mod tests {
let expected = KeyFile { let expected = KeyFile {
id: UUID::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(), id: UUID::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(),
version: Version::V3, version: Version::V3,
address: H160::from_str("6edddfc6349aff20bc6467ccf276c5b52487f7a8").unwrap(), address: "6edddfc6349aff20bc6467ccf276c5b52487f7a8".into(),
crypto: Crypto { crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr { cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: H128::from_str("b5a7ec855ec9e2c405371356855fec83").unwrap(), iv: "b5a7ec855ec9e2c405371356855fec83".into(),
}), }),
ciphertext: H256::from_str("7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc").unwrap(), ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc".into(),
kdf: Kdf::Scrypt(Scrypt { kdf: Kdf::Scrypt(Scrypt {
n: 262144, n: 262144,
dklen: 32, dklen: 32,
p: 1, p: 1,
r: 8, r: 8,
salt: H256::from_str("1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209").unwrap(), salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(),
}), }),
mac: H256::from_str("46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f").unwrap(), mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(),
}, },
name: Some("Test".to_owned()), name: Some("Test".to_owned()),
meta: Some("{}".to_owned()), meta: Some("{}".to_owned()),
@ -234,22 +234,22 @@ mod tests {
}"#; }"#;
let expected = KeyFile { let expected = KeyFile {
id: UUID::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(), id: "8777d9f6-7860-4b9b-88b7-0b57ee6b3a73".into(),
version: Version::V3, version: Version::V3,
address: H160::from_str("6edddfc6349aff20bc6467ccf276c5b52487f7a8").unwrap(), address: "6edddfc6349aff20bc6467ccf276c5b52487f7a8".into(),
crypto: Crypto { crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr { cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: H128::from_str("b5a7ec855ec9e2c405371356855fec83").unwrap(), iv: "b5a7ec855ec9e2c405371356855fec83".into(),
}), }),
ciphertext: H256::from_str("7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc").unwrap(), ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc".into(),
kdf: Kdf::Scrypt(Scrypt { kdf: Kdf::Scrypt(Scrypt {
n: 262144, n: 262144,
dklen: 32, dklen: 32,
p: 1, p: 1,
r: 8, r: 8,
salt: H256::from_str("1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209").unwrap(), salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(),
}), }),
mac: H256::from_str("46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f").unwrap(), mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(),
}, },
name: None, name: None,
meta: None, meta: None,
@ -262,22 +262,22 @@ mod tests {
#[test] #[test]
fn to_and_from_json() { fn to_and_from_json() {
let file = KeyFile { let file = KeyFile {
id: UUID::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(), id: "8777d9f6-7860-4b9b-88b7-0b57ee6b3a73".into(),
version: Version::V3, version: Version::V3,
address: H160::from_str("6edddfc6349aff20bc6467ccf276c5b52487f7a8").unwrap(), address: "6edddfc6349aff20bc6467ccf276c5b52487f7a8".into(),
crypto: Crypto { crypto: Crypto {
cipher: Cipher::Aes128Ctr(Aes128Ctr { cipher: Cipher::Aes128Ctr(Aes128Ctr {
iv: H128::from_str("b5a7ec855ec9e2c405371356855fec83").unwrap(), iv: "b5a7ec855ec9e2c405371356855fec83".into(),
}), }),
ciphertext: H256::from_str("7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc").unwrap(), ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc".into(),
kdf: Kdf::Scrypt(Scrypt { kdf: Kdf::Scrypt(Scrypt {
n: 262144, n: 262144,
dklen: 32, dklen: 32,
p: 1, p: 1,
r: 8, r: 8,
salt: H256::from_str("1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209").unwrap(), salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(),
}), }),
mac: H256::from_str("46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f").unwrap(), mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(),
}, },
name: Some("Test".to_owned()), name: Some("Test".to_owned()),
meta: None, meta: None,

View File

@ -1,3 +1,4 @@
mod bytes;
mod cipher; mod cipher;
mod crypto; mod crypto;
mod error; mod error;
@ -8,8 +9,9 @@ mod key_file;
mod presale; mod presale;
mod version; mod version;
pub use self::bytes::Bytes;
pub use self::cipher::{Cipher, CipherSer, CipherSerParams, Aes128Ctr}; pub use self::cipher::{Cipher, CipherSer, CipherSerParams, Aes128Ctr};
pub use self::crypto::Crypto; pub use self::crypto::{Crypto, CipherText};
pub use self::error::Error; pub use self::error::Error;
pub use self::hash::{H128, H160, H256}; pub use self::hash::{H128, H160, H256};
pub use self::id::UUID; pub use self::id::UUID;

View File

@ -1,30 +1,8 @@
use std::io::Read; use std::io::Read;
use std::ops::Deref;
use serde_json; use serde_json;
use serde::{Deserialize, Deserializer, Error}; use super::{H160, Bytes};
use rustc_serialize::hex::FromHex;
use super::{H160};
#[derive(Debug, PartialEq)] pub type Encseed = Bytes;
pub struct Encseed(Vec<u8>);
impl Deref for Encseed {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Deserialize for Encseed {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
where D: Deserializer
{
let s = try!(String::deserialize(deserializer));
let data = try!(s.from_hex().map_err(|e| Error::custom(format!("Invalid hex value {}", e))));
Ok(Encseed(data))
}
}
#[derive(Debug, PartialEq, Deserialize)] #[derive(Debug, PartialEq, Deserialize)]
pub struct PresaleWallet { pub struct PresaleWallet {
@ -43,8 +21,7 @@ impl PresaleWallet {
mod tests { mod tests {
use std::str::FromStr; use std::str::FromStr;
use serde_json; use serde_json;
use rustc_serialize::hex::FromHex; use json::{PresaleWallet, H160};
use json::{PresaleWallet, H160, Encseed};
#[test] #[test]
fn presale_wallet() { fn presale_wallet() {
@ -57,7 +34,7 @@ mod tests {
} "#; } "#;
let expected = PresaleWallet { let expected = PresaleWallet {
encseed: Encseed("137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066".from_hex().unwrap()), encseed: "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066".into(),
address: H160::from_str("ede84640d1a1d3e06902048e67aa7db8d52c2ce1").unwrap(), address: H160::from_str("ede84640d1a1d3e06902048e67aa7db8d52c2ce1").unwrap(),
}; };
@ -77,7 +54,7 @@ mod tests {
} "#; } "#;
let expected = PresaleWallet { let expected = PresaleWallet {
encseed: Encseed("137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0d".from_hex().unwrap()), encseed: "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0d".into(),
address: H160::from_str("ede84640d1a1d3e06902048e67aa7db8d52c2ce1").unwrap(), address: H160::from_str("ede84640d1a1d3e06902048e67aa7db8d52c2ce1").unwrap(),
}; };

View File

@ -19,9 +19,8 @@ extern crate ethstore;
mod util; mod util;
use std::str::FromStr;
use ethstore::{SecretStore, EthStore}; use ethstore::{SecretStore, EthStore};
use ethstore::ethkey::{Random, Generator, Secret, Address}; use ethstore::ethkey::{Random, Generator, Secret, KeyPair, verify_address};
use ethstore::dir::DiskDirectory; use ethstore::dir::DiskDirectory;
use util::TransientDir; use util::TransientDir;
@ -103,14 +102,21 @@ fn pat_path() -> &'static str {
} }
} }
fn ciphertext_path() -> &'static str {
match ::std::fs::metadata("ethstore") {
Ok(_) => "ethstore/tests/res/ciphertext",
Err(_) => "tests/res/ciphertext",
}
}
#[test] #[test]
fn secret_store_laod_geth_files() { fn secret_store_laod_geth_files() {
let dir = DiskDirectory::at(test_path()); let dir = DiskDirectory::at(test_path());
let store = EthStore::open(Box::new(dir)).unwrap(); let store = EthStore::open(Box::new(dir)).unwrap();
assert_eq!(store.accounts().unwrap(), vec![ assert_eq!(store.accounts().unwrap(), vec![
Address::from_str("3f49624084b67849c7b4e805c5988c21a430f9d9").unwrap(), "3f49624084b67849c7b4e805c5988c21a430f9d9".into(),
Address::from_str("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf").unwrap(), "5ba4dcf897e97c2bdf8315b9ef26c13c085988cf".into(),
Address::from_str("63121b431a52f8043c16fcf0d1df9cb7b5f66649").unwrap(), "63121b431a52f8043c16fcf0d1df9cb7b5f66649".into(),
]); ]);
} }
@ -119,9 +125,30 @@ fn secret_store_load_pat_files() {
let dir = DiskDirectory::at(pat_path()); let dir = DiskDirectory::at(pat_path());
let store = EthStore::open(Box::new(dir)).unwrap(); let store = EthStore::open(Box::new(dir)).unwrap();
assert_eq!(store.accounts().unwrap(), vec![ assert_eq!(store.accounts().unwrap(), vec![
Address::from_str("3f49624084b67849c7b4e805c5988c21a430f9d9").unwrap(), "3f49624084b67849c7b4e805c5988c21a430f9d9".into(),
Address::from_str("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf").unwrap(), "5ba4dcf897e97c2bdf8315b9ef26c13c085988cf".into(),
]); ]);
} }
#[test]
fn test_decrypting_files_with_short_ciphertext() {
// 31e9d1e6d844bd3a536800ef8d8be6a9975db509, 30
let kp1 = KeyPair::from_secret("000081c29e8142bb6a81bef5a92bda7a8328a5c85bb2f9542e76f9b0f94fc018".into()).unwrap();
// d1e64e5480bfaf733ba7d48712decb8227797a4e , 31
let kp2 = KeyPair::from_secret("00fa7b3db73dc7dfdf8c5fbdb796d741e4488628c41fc4febd9160a866ba0f35".into()).unwrap();
let dir = DiskDirectory::at(ciphertext_path());
let store = EthStore::open(Box::new(dir)).unwrap();
let accounts = store.accounts().unwrap();
assert_eq!(accounts, vec![
"31e9d1e6d844bd3a536800ef8d8be6a9975db509".into(),
"d1e64e5480bfaf733ba7d48712decb8227797a4e".into(),
]);
let message = Default::default();
let s1 = store.sign(&accounts[0], "foo", &message).unwrap();
let s2 = store.sign(&accounts[1], "foo", &message).unwrap();
assert!(verify_address(&accounts[0], &s1, &message).unwrap());
assert!(verify_address(&kp1.address(), &s1, &message).unwrap());
assert!(verify_address(&kp2.address(), &s2, &message).unwrap());
}

View File

@ -0,0 +1,21 @@
{
"address" : "31e9d1e6d844bd3a536800ef8d8be6a9975db509",
"crypto" : {
"cipher" : "aes-128-ctr",
"cipherparams" : {
"iv" : "3ca92af36ad7c2cd92454c59cea5ef00"
},
"ciphertext" : "108b7d34f3442fc26ab1ab90ca91476ba6bfa8c00975a49ef9051dc675aa",
"kdf" : "scrypt",
"kdfparams" : {
"dklen" : 32,
"n" : 2,
"r" : 8,
"p" : 1,
"salt" : "d0769e608fb86cda848065642a9c6fa046845c928175662b8e356c77f914cd3b"
},
"mac" : "75d0e6759f7b3cefa319c3be41680ab6beea7d8328653474bd06706d4cc67420"
},
"id" : "a37e1559-5955-450d-8075-7b8931b392b2",
"version" : 3
}

View File

@ -0,0 +1,21 @@
{
"address" : "d1e64e5480bfaf733ba7d48712decb8227797a4e",
"crypto" : {
"cipher" : "aes-128-ctr",
"cipherparams" : {
"iv" : "e0c41130a323adc1446fc82f724bca2f"
},
"ciphertext" : "9517cd5bdbe69076f9bf5057248c6c050141e970efa36ce53692d5d59a3984",
"kdf" : "scrypt",
"kdfparams" : {
"dklen" : 32,
"n" : 2,
"r" : 8,
"p" : 1,
"salt" : "711f816911c92d649fb4c84b047915679933555030b3552c1212609b38208c63"
},
"mac" : "d5e116151c6aa71470e67a7d42c9620c75c4d23229847dcc127794f0732b0db5"
},
"id" : "fecfc4ce-e956-48fd-953b-30f8b52ed66c",
"version" : 3
}

View File

@ -32,6 +32,9 @@ pub struct EthashParams {
#[serde(rename="difficultyBoundDivisor")] #[serde(rename="difficultyBoundDivisor")]
pub difficulty_bound_divisor: Uint, pub difficulty_bound_divisor: Uint,
/// See main EthashParams docs. /// See main EthashParams docs.
#[serde(rename="difficultyIncrementDivisor")]
pub difficulty_increment_divisor: Option<Uint>,
/// See main EthashParams docs.
#[serde(rename="durationLimit")] #[serde(rename="durationLimit")]
pub duration_limit: Uint, pub duration_limit: Uint,
/// See main EthashParams docs. /// See main EthashParams docs.
@ -39,9 +42,11 @@ pub struct EthashParams {
pub block_reward: Uint, pub block_reward: Uint,
/// See main EthashParams docs. /// See main EthashParams docs.
pub registrar: Option<Address>, pub registrar: Option<Address>,
/// See main EthashParams docs. /// See main EthashParams docs.
#[serde(rename="frontierCompatibilityModeLimit")] #[serde(rename="frontierCompatibilityModeLimit")]
pub frontier_compatibility_mode_limit: Option<Uint>, pub frontier_compatibility_mode_limit: Option<Uint>,
/// See main EthashParams docs. /// See main EthashParams docs.
#[serde(rename="daoHardforkTransition")] #[serde(rename="daoHardforkTransition")]
pub dao_hardfork_transition: Option<Uint>, pub dao_hardfork_transition: Option<Uint>,
@ -51,6 +56,16 @@ pub struct EthashParams {
/// See main EthashParams docs. /// See main EthashParams docs.
#[serde(rename="daoHardforkAccounts")] #[serde(rename="daoHardforkAccounts")]
pub dao_hardfork_accounts: Option<Vec<Address>>, pub dao_hardfork_accounts: Option<Vec<Address>>,
/// See main EthashParams docs.
#[serde(rename="difficultyHardforkTransition")]
pub difficulty_hardfork_transition: Option<Uint>,
/// See main EthashParams docs.
#[serde(rename="difficultyHardforkBoundDivisor")]
pub difficulty_hardfork_bound_divisor: Option<Uint>,
/// See main EthashParams docs.
#[serde(rename="bombDefuseTransition")]
pub bomb_defuse_transition: Option<Uint>,
} }
/// Ethash engine deserialization. /// Ethash engine deserialization.
@ -99,7 +114,10 @@ mod tests {
"0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97", "0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97",
"0xbb9bc244d798123fde783fcc1c72d3bb8c189413", "0xbb9bc244d798123fde783fcc1c72d3bb8c189413",
"0x807640a13483f8ac783c557fcdf27be11ea4ac7a" "0x807640a13483f8ac783c557fcdf27be11ea4ac7a"
] ],
"difficultyHardforkTransition": "0x59d9",
"difficultyHardforkBoundDivisor": "0x0200",
"bombDefuseTransition": "0x42"
} }
}"#; }"#;

View File

@ -31,6 +31,9 @@ pub struct Params {
/// Network id. /// Network id.
#[serde(rename="networkID")] #[serde(rename="networkID")]
pub network_id: Uint, pub network_id: Uint,
/// Name of the main ("eth") subprotocol.
#[serde(rename="subprotocolName")]
pub subprotocol_name: Option<String>,
/// Minimum gas limit. /// Minimum gas limit.
#[serde(rename="minGasLimit")] #[serde(rename="minGasLimit")]
pub min_gas_limit: Uint, pub min_gas_limit: Uint,
@ -53,6 +56,7 @@ mod tests {
"frontierCompatibilityModeLimit": "0x118c30", "frontierCompatibilityModeLimit": "0x118c30",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"networkID" : "0x1", "networkID" : "0x1",
"subprotocolName" : "exp",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"accountStartNonce": "0x00" "accountStartNonce": "0x00"
}"#; }"#;

View File

@ -91,10 +91,10 @@ pub fn setup_log(config: &Config) -> Result<Arc<RotatingLogger>, String> {
let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap(); let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap();
let with_color = if max_log_level() <= LogLevelFilter::Info { let with_color = if max_log_level() <= LogLevelFilter::Info {
format!("{}{}", Colour::Black.bold().paint(timestamp), record.args()) format!("{} {}", Colour::Black.bold().paint(timestamp), record.args())
} else { } else {
let name = thread::current().name().map_or_else(Default::default, |x| format!("{}", Colour::Blue.bold().paint(x))); let name = thread::current().name().map_or_else(Default::default, |x| format!("{}", Colour::Blue.bold().paint(x)));
format!("{}{} {} {} {}", Colour::Black.bold().paint(timestamp), name, record.level(), record.target(), record.args()) format!("{} {} {} {} {}", Colour::Black.bold().paint(timestamp), name, record.level(), record.target(), record.args())
}; };
let removed_color = kill_color(with_color.as_ref()); let removed_color = kill_color(with_color.as_ref());

View File

@ -31,8 +31,8 @@ Operating Options:
(default: {flag_mode_alarm}). (default: {flag_mode_alarm}).
--chain CHAIN Specify the blockchain type. CHAIN may be either a --chain CHAIN Specify the blockchain type. CHAIN may be either a
JSON chain specification file or olympic, frontier, JSON chain specification file or olympic, frontier,
homestead, mainnet, morden, classic or testnet homestead, mainnet, morden, classic, expanse or
(default: {flag_chain}). testnet (default: {flag_chain}).
-d --db-path PATH Specify the database & configuration directory path -d --db-path PATH Specify the database & configuration directory path
(default: {flag_db_path}). (default: {flag_db_path}).
--keys-path PATH Specify the path for JSON key files to be found --keys-path PATH Specify the path for JSON key files to be found

View File

@ -43,13 +43,15 @@ pub enum Error {
/// Returned when current version cannot be read or guessed. /// Returned when current version cannot be read or guessed.
UnknownDatabaseVersion, UnknownDatabaseVersion,
/// Migration does not support existing pruning algorithm. /// Migration does not support existing pruning algorithm.
UnsuportedPruningMethod, UnsupportedPruningMethod,
/// Existing DB is newer than the known one. /// Existing DB is newer than the known one.
FutureDBVersion, FutureDBVersion,
/// Migration is not possible. /// Migration is not possible.
MigrationImpossible, MigrationImpossible,
/// Migration unexpectadly failed. /// Migration unexpectadly failed.
MigrationFailed, MigrationFailed,
/// Internal migration error.
Internal(MigrationError),
/// Migration was completed succesfully, /// Migration was completed succesfully,
/// but there was a problem with io. /// but there was a problem with io.
Io(IoError), Io(IoError),
@ -59,10 +61,11 @@ impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
let out = match *self { let out = match *self {
Error::UnknownDatabaseVersion => "Current database version cannot be read".into(), Error::UnknownDatabaseVersion => "Current database version cannot be read".into(),
Error::UnsuportedPruningMethod => "Unsupported pruning method for database migration. Delete DB and resync.".into(), Error::UnsupportedPruningMethod => "Unsupported pruning method for database migration. Delete DB and resync.".into(),
Error::FutureDBVersion => "Database was created with newer client version. Upgrade your client or delete DB and resync.".into(), Error::FutureDBVersion => "Database was created with newer client version. Upgrade your client or delete DB and resync.".into(),
Error::MigrationImpossible => format!("Database migration to version {} is not possible.", CURRENT_VERSION), Error::MigrationImpossible => format!("Database migration to version {} is not possible.", CURRENT_VERSION),
Error::MigrationFailed => "Database migration unexpectedly failed".into(), Error::MigrationFailed => "Database migration unexpectedly failed".into(),
Error::Internal(ref err) => format!("{}", err),
Error::Io(ref err) => format!("Unexpected io error on DB migration: {}.", err), Error::Io(ref err) => format!("Unexpected io error on DB migration: {}.", err),
}; };
@ -80,7 +83,7 @@ impl From<MigrationError> for Error {
fn from(err: MigrationError) -> Self { fn from(err: MigrationError) -> Self {
match err { match err {
MigrationError::Io(e) => Error::Io(e), MigrationError::Io(e) => Error::Io(e),
_ => Error::MigrationFailed, _ => Error::Internal(err),
} }
} }
} }
@ -160,7 +163,7 @@ fn consolidate_database(
let config = default_migration_settings(compaction_profile); let config = default_migration_settings(compaction_profile);
let mut db_config = DatabaseConfig { let mut db_config = DatabaseConfig {
max_open_files: 64, max_open_files: 64,
cache_size: None, cache_sizes: Default::default(),
compaction: config.compaction_profile, compaction: config.compaction_profile,
columns: None, columns: None,
wal: true, wal: true,
@ -320,7 +323,7 @@ mod legacy {
let res = match pruning { let res = match pruning {
Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()), Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()),
Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()), Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()),
_ => return Err(Error::UnsuportedPruningMethod), _ => return Err(Error::UnsupportedPruningMethod),
}; };
try!(res.map_err(|_| Error::MigrationImpossible)); try!(res.map_err(|_| Error::MigrationImpossible));

View File

@ -29,6 +29,7 @@ pub enum SpecType {
Testnet, Testnet,
Olympic, Olympic,
Classic, Classic,
Expanse,
Custom(String), Custom(String),
} }
@ -47,6 +48,7 @@ impl str::FromStr for SpecType {
"frontier-dogmatic" | "homestead-dogmatic" | "classic" => SpecType::Classic, "frontier-dogmatic" | "homestead-dogmatic" | "classic" => SpecType::Classic,
"morden" | "testnet" => SpecType::Testnet, "morden" | "testnet" => SpecType::Testnet,
"olympic" => SpecType::Olympic, "olympic" => SpecType::Olympic,
"expanse" => SpecType::Expanse,
other => SpecType::Custom(other.into()), other => SpecType::Custom(other.into()),
}; };
Ok(spec) Ok(spec)
@ -60,6 +62,7 @@ impl SpecType {
SpecType::Testnet => Ok(ethereum::new_morden()), SpecType::Testnet => Ok(ethereum::new_morden()),
SpecType::Olympic => Ok(ethereum::new_olympic()), SpecType::Olympic => Ok(ethereum::new_olympic()),
SpecType::Classic => Ok(ethereum::new_classic()), SpecType::Classic => Ok(ethereum::new_classic()),
SpecType::Expanse => Ok(ethereum::new_expanse()),
SpecType::Custom(ref filename) => { SpecType::Custom(ref filename) => {
let file = try!(fs::File::open(filename).map_err(|_| "Could not load specification file.")); let file = try!(fs::File::open(filename).map_err(|_| "Could not load specification file."));
Spec::load(file) Spec::load(file)

View File

@ -148,6 +148,11 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
Some(id) => id, Some(id) => id,
None => spec.network_id(), None => spec.network_id(),
}; };
if spec.subprotocol_name().len() != 3 {
warn!("Your chain specification's subprotocol length is not 3. Ignoring.");
} else {
sync_config.subprotocol_name.clone_from_slice(spec.subprotocol_name().as_bytes());
}
sync_config.fork_block = spec.fork_block(); sync_config.fork_block = spec.fork_block();
// prepare account provider // prepare account provider

View File

@ -25,6 +25,7 @@ ethsync = { path = "../sync" }
ethjson = { path = "../json" } ethjson = { path = "../json" }
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
rlp = { path = "../util/rlp" } rlp = { path = "../util/rlp" }
fetch = { path = "../util/fetch" }
rustc-serialize = "0.3" rustc-serialize = "0.3"
transient-hashmap = "0.1" transient-hashmap = "0.1"
serde_macros = { version = "0.8.0", optional = true } serde_macros = { version = "0.8.0", optional = true }

View File

@ -36,6 +36,7 @@ extern crate json_ipc_server as ipc;
extern crate ethcore_ipc; extern crate ethcore_ipc;
extern crate time; extern crate time;
extern crate rlp; extern crate rlp;
extern crate fetch;
#[macro_use] #[macro_use]
extern crate log; extern crate log;

View File

@ -23,6 +23,7 @@ macro_rules! rpc_unimplemented {
use std::fmt; use std::fmt;
use ethcore::error::Error as EthcoreError; use ethcore::error::Error as EthcoreError;
use ethcore::account_provider::{Error as AccountError}; use ethcore::account_provider::{Error as AccountError};
use fetch::FetchError;
use jsonrpc_core::{Error, ErrorCode, Value}; use jsonrpc_core::{Error, ErrorCode, Value};
mod codes { mod codes {
@ -41,6 +42,7 @@ mod codes {
pub const REQUEST_REJECTED_LIMIT: i64 = -32041; pub const REQUEST_REJECTED_LIMIT: i64 = -32041;
pub const REQUEST_NOT_FOUND: i64 = -32042; pub const REQUEST_NOT_FOUND: i64 = -32042;
pub const COMPILATION_ERROR: i64 = -32050; pub const COMPILATION_ERROR: i64 = -32050;
pub const FETCH_ERROR: i64 = -32060;
} }
pub fn unimplemented() -> Error { pub fn unimplemented() -> Error {
@ -155,6 +157,14 @@ pub fn signer_disabled() -> Error {
} }
} }
pub fn from_fetch_error(error: FetchError) -> Error {
Error {
code: ErrorCode::ServerError(codes::FETCH_ERROR),
message: "Error while fetching content.".into(),
data: Some(Value::String(format!("{:?}", error))),
}
}
pub fn from_signing_error(error: AccountError) -> Error { pub fn from_signing_error(error: AccountError) -> Error {
Error { Error {
code: ErrorCode::ServerError(codes::ACCOUNT_LOCKED), code: ErrorCode::ServerError(codes::ACCOUNT_LOCKED),

View File

@ -424,13 +424,9 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
fn transaction_by_hash(&self, hash: RpcH256) -> Result<Option<Transaction>, Error> { fn transaction_by_hash(&self, hash: RpcH256) -> Result<Option<Transaction>, Error> {
try!(self.active()); try!(self.active());
let miner = take_weak!(self.miner);
let hash: H256 = hash.into(); let hash: H256 = hash.into();
match miner.transaction(&hash) { let miner = take_weak!(self.miner);
Some(pending_tx) => Ok(Some(pending_tx.into())), Ok(try!(self.transaction(TransactionID::Hash(hash))).or_else(|| miner.transaction(&hash).map(Into::into)))
None => self.transaction(TransactionID::Hash(hash))
}
} }
fn transaction_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result<Option<Transaction>, Error> { fn transaction_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result<Option<Transaction>, Error> {

View File

@ -15,30 +15,33 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Ethcore-specific rpc implementation. //! Ethcore-specific rpc implementation.
use std::sync::{Arc, Weak}; use std::{fs, io};
use std::sync::{mpsc, Arc, Weak};
use std::str::FromStr; use std::str::FromStr;
use std::collections::{BTreeMap}; use std::collections::{BTreeMap};
use util::{RotatingLogger, Address}; use util::{RotatingLogger, Address, Mutex, sha3};
use util::misc::version_data; use util::misc::version_data;
use crypto::ecies; use crypto::ecies;
use fetch::{Client as FetchClient, Fetch};
use ethkey::{Brain, Generator}; use ethkey::{Brain, Generator};
use ethstore::random_phrase; use ethstore::random_phrase;
use ethsync::{SyncProvider, ManageNetwork}; use ethsync::{SyncProvider, ManageNetwork};
use ethcore::miner::MinerService; use ethcore::miner::MinerService;
use ethcore::client::{MiningBlockChainClient}; use ethcore::client::{MiningBlockChainClient};
use jsonrpc_core::*; use jsonrpc_core::{from_params, to_value, Value, Error, Params, Ready};
use v1::traits::Ethcore; use v1::traits::Ethcore;
use v1::types::{Bytes, U256, H160, H512, Peers, Transaction}; use v1::types::{Bytes, U256, H160, H256, H512, Peers, Transaction};
use v1::helpers::{errors, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::{errors, SigningQueue, SignerService, NetworkSettings};
use v1::helpers::params::expect_no_params; use v1::helpers::params::expect_no_params;
/// Ethcore implementation. /// Ethcore implementation.
pub struct EthcoreClient<C, M, S: ?Sized> where pub struct EthcoreClient<C, M, S: ?Sized, F=FetchClient> where
C: MiningBlockChainClient, C: MiningBlockChainClient,
M: MinerService, M: MinerService,
S: SyncProvider { S: SyncProvider,
F: Fetch {
client: Weak<C>, client: Weak<C>,
miner: Weak<M>, miner: Weak<M>,
@ -47,10 +50,14 @@ pub struct EthcoreClient<C, M, S: ?Sized> where
logger: Arc<RotatingLogger>, logger: Arc<RotatingLogger>,
settings: Arc<NetworkSettings>, settings: Arc<NetworkSettings>,
signer: Option<Arc<SignerService>>, signer: Option<Arc<SignerService>>,
fetch: Mutex<F>
} }
impl<C, M, S: ?Sized> EthcoreClient<C, M, S> where C: MiningBlockChainClient, M: MinerService, S: SyncProvider { impl<C, M, S: ?Sized> EthcoreClient<C, M, S> where
/// Creates new `EthcoreClient`. C: MiningBlockChainClient,
M: MinerService,
S: SyncProvider, {
/// Creates new `EthcoreClient` with default `Fetch`.
pub fn new( pub fn new(
client: &Arc<C>, client: &Arc<C>,
miner: &Arc<M>, miner: &Arc<M>,
@ -60,6 +67,26 @@ impl<C, M, S: ?Sized> EthcoreClient<C, M, S> where C: MiningBlockChainClient, M:
settings: Arc<NetworkSettings>, settings: Arc<NetworkSettings>,
signer: Option<Arc<SignerService>> signer: Option<Arc<SignerService>>
) -> Self { ) -> Self {
Self::with_fetch(client, miner, sync, net, logger, settings, signer)
}
}
impl<C, M, S: ?Sized, F> EthcoreClient<C, M, S, F> where
C: MiningBlockChainClient,
M: MinerService,
S: SyncProvider,
F: Fetch, {
/// Creates new `EthcoreClient` with customizable `Fetch`.
pub fn with_fetch(
client: &Arc<C>,
miner: &Arc<M>,
sync: &Arc<S>,
net: &Arc<ManageNetwork>,
logger: Arc<RotatingLogger>,
settings: Arc<NetworkSettings>,
signer: Option<Arc<SignerService>>
) -> Self {
EthcoreClient { EthcoreClient {
client: Arc::downgrade(client), client: Arc::downgrade(client),
miner: Arc::downgrade(miner), miner: Arc::downgrade(miner),
@ -68,6 +95,7 @@ impl<C, M, S: ?Sized> EthcoreClient<C, M, S> where C: MiningBlockChainClient, M:
logger: logger, logger: logger,
settings: settings, settings: settings,
signer: signer, signer: signer,
fetch: Mutex::new(F::default()),
} }
} }
@ -78,7 +106,11 @@ impl<C, M, S: ?Sized> EthcoreClient<C, M, S> where C: MiningBlockChainClient, M:
} }
} }
impl<C, M, S: ?Sized> Ethcore for EthcoreClient<C, M, S> where M: MinerService + 'static, C: MiningBlockChainClient + 'static, S: SyncProvider + 'static { impl<C, M, S: ?Sized, F> Ethcore for EthcoreClient<C, M, S, F> where
M: MinerService + 'static,
C: MiningBlockChainClient + 'static,
S: SyncProvider + 'static,
F: Fetch + 'static {
fn transactions_limit(&self, params: Params) -> Result<Value, Error> { fn transactions_limit(&self, params: Params) -> Result<Value, Error> {
try!(self.active()); try!(self.active());
@ -233,4 +265,42 @@ impl<C, M, S: ?Sized> Ethcore for EthcoreClient<C, M, S> where M: MinerService +
Ok(to_value(&take_weak!(self.miner).all_transactions().into_iter().map(Into::into).collect::<Vec<Transaction>>())) Ok(to_value(&take_weak!(self.miner).all_transactions().into_iter().map(Into::into).collect::<Vec<Transaction>>()))
} }
fn hash_content(&self, params: Params, ready: Ready) {
let res = self.active().and_then(|_| from_params::<(String,)>(params));
let hash_content = |result| {
let path = try!(result);
let mut file = io::BufReader::new(try!(fs::File::open(&path)));
// Try to hash
let result = sha3(&mut file);
// Remove file (always)
try!(fs::remove_file(&path));
// Return the result
Ok(try!(result))
};
match res {
Err(e) => ready.ready(Err(e)),
Ok((url, )) => {
let (tx, rx) = mpsc::channel();
let res = self.fetch.lock().request_async(&url, Default::default(), Box::new(move |result| {
let result = hash_content(result)
.map_err(errors::from_fetch_error)
.map(|hash| to_value(H256::from(hash)));
// Receive ready and invoke with result.
let ready: Ready = rx.try_recv().expect("When on_done is invoked ready object is always sent.");
ready.ready(result);
}));
// Either invoke ready right away or transfer it to the closure.
if let Err(e) = res {
ready.ready(Err(errors::from_fetch_error(e)));
} else {
tx.send(ready).expect("Rx end is sent to on_done closure.");
}
}
}
}
} }

View File

@ -0,0 +1,44 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Test implementation of fetch client.
use std::io::Write;
use std::{env, fs, thread};
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use fetch::{Fetch, FetchError, FetchResult};
/// Test implementation of fetcher. Will always return the same file.
#[derive(Default)]
pub struct TestFetch;
impl Fetch for TestFetch {
fn request_async(&mut self, _url: &str, _abort: Arc<AtomicBool>, on_done: Box<Fn(FetchResult) + Send>) -> Result<(), FetchError> {
thread::spawn(move || {
let mut path = env::temp_dir();
path.push(Self::random_filename());
let mut file = fs::File::create(&path).unwrap();
file.write_all(b"Some content").unwrap();
on_done(Ok(path));
});
Ok(())
}
}

View File

@ -18,6 +18,8 @@
mod sync_provider; mod sync_provider;
mod miner_service; mod miner_service;
mod fetch;
pub use self::sync_provider::{Config, TestSyncProvider}; pub use self::sync_provider::{Config, TestSyncProvider};
pub use self::miner_service::TestMinerService; pub use self::miner_service::TestMinerService;
pub use self::fetch::TestFetch;

View File

@ -23,7 +23,7 @@ use ethcore::client::{TestBlockChainClient};
use jsonrpc_core::IoHandler; use jsonrpc_core::IoHandler;
use v1::{Ethcore, EthcoreClient}; use v1::{Ethcore, EthcoreClient};
use v1::helpers::{SignerService, NetworkSettings}; use v1::helpers::{SignerService, NetworkSettings};
use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService}; use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService, TestFetch};
use super::manage_network::TestManageNetwork; use super::manage_network::TestManageNetwork;
fn miner_service() -> Arc<TestMinerService> { fn miner_service() -> Arc<TestMinerService> {
@ -60,12 +60,15 @@ fn network_service() -> Arc<ManageNetwork> {
Arc::new(TestManageNetwork) Arc::new(TestManageNetwork)
} }
type TestEthcoreClient = EthcoreClient<TestBlockChainClient, TestMinerService, TestSyncProvider, TestFetch>;
fn ethcore_client( fn ethcore_client(
client: &Arc<TestBlockChainClient>, client: &Arc<TestBlockChainClient>,
miner: &Arc<TestMinerService>, miner: &Arc<TestMinerService>,
sync: &Arc<TestSyncProvider>, sync: &Arc<TestSyncProvider>,
net: &Arc<ManageNetwork>) -> EthcoreClient<TestBlockChainClient, TestMinerService, TestSyncProvider> { net: &Arc<ManageNetwork>)
EthcoreClient::new(client, miner, sync, net, logger(), settings(), None) -> TestEthcoreClient {
EthcoreClient::with_fetch(client, miner, sync, net, logger(), settings(), None)
} }
#[test] #[test]
@ -140,9 +143,9 @@ fn rpc_ethcore_dev_logs() {
let logger = logger(); let logger = logger();
logger.append("a".to_owned()); logger.append("a".to_owned());
logger.append("b".to_owned()); logger.append("b".to_owned());
let ethcore = EthcoreClient::new(&client, &miner, &sync, &net, logger.clone(), settings(), None).to_delegate(); let ethcore: TestEthcoreClient = EthcoreClient::with_fetch(&client, &miner, &sync, &net, logger.clone(), settings(), None);
let io = IoHandler::new(); let io = IoHandler::new();
io.add_delegate(ethcore); io.add_delegate(ethcore.to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_devLogs", "params":[], "id": 1}"#; let request = r#"{"jsonrpc": "2.0", "method": "ethcore_devLogs", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":["b","a"],"id":1}"#; let response = r#"{"jsonrpc":"2.0","result":["b","a"],"id":1}"#;
@ -263,8 +266,8 @@ fn rpc_ethcore_unsigned_transactions_count() {
let net = network_service(); let net = network_service();
let io = IoHandler::new(); let io = IoHandler::new();
let signer = Arc::new(SignerService::new_test()); let signer = Arc::new(SignerService::new_test());
let ethcore = EthcoreClient::new(&client, &miner, &sync, &net, logger(), settings(), Some(signer)).to_delegate(); let ethcore: TestEthcoreClient = EthcoreClient::with_fetch(&client, &miner, &sync, &net, logger(), settings(), Some(signer));
io.add_delegate(ethcore); io.add_delegate(ethcore.to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_unsignedTransactionsCount", "params":[], "id": 1}"#; let request = r#"{"jsonrpc": "2.0", "method": "ethcore_unsignedTransactionsCount", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":0,"id":1}"#; let response = r#"{"jsonrpc":"2.0","result":0,"id":1}"#;
@ -287,6 +290,21 @@ fn rpc_ethcore_unsigned_transactions_count_when_signer_disabled() {
assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); assert_eq!(io.handle_request_sync(request), Some(response.to_owned()));
} }
#[test]
fn rpc_ethcore_hash_content() {
let miner = miner_service();
let client = client_service();
let sync = sync_provider();
let net = network_service();
let io = IoHandler::new();
io.add_delegate(ethcore_client(&client, &miner, &sync, &net).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_hashContent", "params":["https://ethcore.io/assets/images/ethcore-black-horizontal.png"], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"0x2be00befcf008bc0e7d9cdefc194db9c75352e8632f48498b5a6bfce9f02c88e","id":1}"#;
assert_eq!(io.handle_request_sync(request), Some(response.to_owned()));
}
#[test] #[test]
fn rpc_ethcore_pending_transactions() { fn rpc_ethcore_pending_transactions() {
let miner = miner_service(); let miner = miner_service();

View File

@ -83,6 +83,9 @@ pub trait Ethcore: Sized + Send + Sync + 'static {
/// Returns all pending (current) transactions from transaction queue. /// Returns all pending (current) transactions from transaction queue.
fn pending_transactions(&self, _: Params) -> Result<Value, Error>; fn pending_transactions(&self, _: Params) -> Result<Value, Error>;
/// Hash a file content under given URL.
fn hash_content(&self, _: Params, _: Ready);
/// Should be used to convert object to io delegate. /// Should be used to convert object to io delegate.
fn to_delegate(self) -> IoDelegate<Self> { fn to_delegate(self) -> IoDelegate<Self> {
let mut delegate = IoDelegate::new(Arc::new(self)); let mut delegate = IoDelegate::new(Arc::new(self));
@ -107,6 +110,8 @@ pub trait Ethcore: Sized + Send + Sync + 'static {
delegate.add_method("ethcore_registryAddress", Ethcore::registry_address); delegate.add_method("ethcore_registryAddress", Ethcore::registry_address);
delegate.add_method("ethcore_encryptMessage", Ethcore::encrypt_message); delegate.add_method("ethcore_encryptMessage", Ethcore::encrypt_message);
delegate.add_method("ethcore_pendingTransactions", Ethcore::pending_transactions); delegate.add_method("ethcore_pendingTransactions", Ethcore::pending_transactions);
delegate.add_async_method("ethcore_hashContent", Ethcore::hash_content);
delegate delegate
} }
} }

View File

@ -25,9 +25,10 @@ use util::{H64 as Eth64, H160 as Eth160, H256 as Eth256, H520 as Eth520, H512 as
macro_rules! impl_hash { macro_rules! impl_hash {
($name: ident, $other: ident, $size: expr) => { ($name: ident, $other: ident, $size: expr) => {
/// Hash serialization /// Hash serialization
#[derive(Eq)]
pub struct $name([u8; $size]); pub struct $name([u8; $size]);
impl Eq for $name { }
impl Default for $name { impl Default for $name {
fn default() -> Self { fn default() -> Self {
$name([0; $size]) $name([0; $size])

View File

@ -414,15 +414,15 @@ pub struct LocalizedTrace {
/// Result /// Result
result: Res, result: Res,
/// Trace address /// Trace address
trace_address: Vec<U256>, trace_address: Vec<usize>,
/// Subtraces /// Subtraces
subtraces: U256, subtraces: usize,
/// Transaction position /// Transaction position
transaction_position: U256, transaction_position: usize,
/// Transaction hash /// Transaction hash
transaction_hash: H256, transaction_hash: H256,
/// Block Number /// Block Number
block_number: U256, block_number: u64,
/// Block Hash /// Block Hash
block_hash: H256, block_hash: H256,
} }
@ -485,9 +485,9 @@ impl From<EthLocalizedTrace> for LocalizedTrace {
#[derive(Debug)] #[derive(Debug)]
pub struct Trace { pub struct Trace {
/// Trace address /// Trace address
trace_address: Vec<U256>, trace_address: Vec<usize>,
/// Subtraces /// Subtraces
subtraces: U256, subtraces: usize,
/// Action /// Action
action: Action, action: Action,
/// Result /// Result
@ -601,15 +601,15 @@ mod tests {
gas_used: 8.into(), gas_used: 8.into(),
output: vec![0x56, 0x78].into(), output: vec![0x56, 0x78].into(),
}), }),
trace_address: vec![10.into()], trace_address: vec![10],
subtraces: 1.into(), subtraces: 1,
transaction_position: 11.into(), transaction_position: 11,
transaction_hash: 12.into(), transaction_hash: 12.into(),
block_number: 13.into(), block_number: 13,
block_hash: 14.into(), block_hash: 14.into(),
}; };
let serialized = serde_json::to_string(&t).unwrap(); let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"type":"call","action":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x6","gas":"0x7","input":"0x1234","callType":"call"},"result":{"gasUsed":"0x8","output":"0x5678"},"traceAddress":["0xa"],"subtraces":"0x1","transactionPosition":"0xb","transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":"0xd","blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); assert_eq!(serialized, r#"{"type":"call","action":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x6","gas":"0x7","input":"0x1234","callType":"call"},"result":{"gasUsed":"0x8","output":"0x5678"},"traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#);
} }
#[test] #[test]
@ -624,15 +624,15 @@ mod tests {
call_type: CallType::Call, call_type: CallType::Call,
}), }),
result: Res::FailedCall(TraceError::OutOfGas), result: Res::FailedCall(TraceError::OutOfGas),
trace_address: vec![10.into()], trace_address: vec![10],
subtraces: 1.into(), subtraces: 1,
transaction_position: 11.into(), transaction_position: 11,
transaction_hash: 12.into(), transaction_hash: 12.into(),
block_number: 13.into(), block_number: 13,
block_hash: 14.into(), block_hash: 14.into(),
}; };
let serialized = serde_json::to_string(&t).unwrap(); let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"type":"call","action":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x6","gas":"0x7","input":"0x1234","callType":"call"},"error":"Out of gas","traceAddress":["0xa"],"subtraces":"0x1","transactionPosition":"0xb","transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":"0xd","blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); assert_eq!(serialized, r#"{"type":"call","action":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x6","gas":"0x7","input":"0x1234","callType":"call"},"error":"Out of gas","traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#);
} }
#[test] #[test]
@ -649,15 +649,15 @@ mod tests {
code: vec![0x56, 0x78].into(), code: vec![0x56, 0x78].into(),
address: 0xff.into(), address: 0xff.into(),
}), }),
trace_address: vec![10.into()], trace_address: vec![10],
subtraces: 1.into(), subtraces: 1,
transaction_position: 11.into(), transaction_position: 11,
transaction_hash: 12.into(), transaction_hash: 12.into(),
block_number: 13.into(), block_number: 13,
block_hash: 14.into(), block_hash: 14.into(),
}; };
let serialized = serde_json::to_string(&t).unwrap(); let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"type":"create","action":{"from":"0x0000000000000000000000000000000000000004","value":"0x6","gas":"0x7","init":"0x1234"},"result":{"gasUsed":"0x8","code":"0x5678","address":"0x00000000000000000000000000000000000000ff"},"traceAddress":["0xa"],"subtraces":"0x1","transactionPosition":"0xb","transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":"0xd","blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); assert_eq!(serialized, r#"{"type":"create","action":{"from":"0x0000000000000000000000000000000000000004","value":"0x6","gas":"0x7","init":"0x1234"},"result":{"gasUsed":"0x8","code":"0x5678","address":"0x00000000000000000000000000000000000000ff"},"traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#);
} }
#[test] #[test]
@ -670,15 +670,15 @@ mod tests {
init: Bytes::new(vec![0x12, 0x34]), init: Bytes::new(vec![0x12, 0x34]),
}), }),
result: Res::FailedCreate(TraceError::OutOfGas), result: Res::FailedCreate(TraceError::OutOfGas),
trace_address: vec![10.into()], trace_address: vec![10],
subtraces: 1.into(), subtraces: 1,
transaction_position: 11.into(), transaction_position: 11,
transaction_hash: 12.into(), transaction_hash: 12.into(),
block_number: 13.into(), block_number: 13,
block_hash: 14.into(), block_hash: 14.into(),
}; };
let serialized = serde_json::to_string(&t).unwrap(); let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"type":"create","action":{"from":"0x0000000000000000000000000000000000000004","value":"0x6","gas":"0x7","init":"0x1234"},"error":"Out of gas","traceAddress":["0xa"],"subtraces":"0x1","transactionPosition":"0xb","transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":"0xd","blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); assert_eq!(serialized, r#"{"type":"create","action":{"from":"0x0000000000000000000000000000000000000004","value":"0x6","gas":"0x7","init":"0x1234"},"error":"Out of gas","traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#);
} }
#[test] #[test]
@ -690,15 +690,15 @@ mod tests {
balance: 7.into(), balance: 7.into(),
}), }),
result: Res::None, result: Res::None,
trace_address: vec![10.into()], trace_address: vec![10],
subtraces: 1.into(), subtraces: 1,
transaction_position: 11.into(), transaction_position: 11,
transaction_hash: 12.into(), transaction_hash: 12.into(),
block_number: 13.into(), block_number: 13,
block_hash: 14.into(), block_hash: 14.into(),
}; };
let serialized = serde_json::to_string(&t).unwrap(); let serialized = serde_json::to_string(&t).unwrap();
assert_eq!(serialized, r#"{"type":"suicide","action":{"address":"0x0000000000000000000000000000000000000004","refundAddress":"0x0000000000000000000000000000000000000006","balance":"0x7"},"result":null,"traceAddress":["0xa"],"subtraces":"0x1","transactionPosition":"0xb","transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":"0xd","blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); assert_eq!(serialized, r#"{"type":"suicide","action":{"address":"0x0000000000000000000000000000000000000004","refundAddress":"0x0000000000000000000000000000000000000006","balance":"0x7"},"result":null,"traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#);
} }
#[test] #[test]

View File

@ -23,9 +23,11 @@ use util::{U256 as EthU256, Uint};
macro_rules! impl_uint { macro_rules! impl_uint {
($name: ident, $other: ident, $size: expr) => { ($name: ident, $other: ident, $size: expr) => {
/// Uint serialization. /// Uint serialization.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] #[derive(Debug, Default, Clone, Copy, PartialEq, Hash)]
pub struct $name($other); pub struct $name($other);
impl Eq for $name { }
impl<T> From<T> for $name where $other: From<T> { impl<T> From<T> for $name where $other: From<T> {
fn from(o: T) -> Self { fn from(o: T) -> Self {
$name($other::from(o)) $name($other::from(o))

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc; use std::sync::Arc;
use std::str;
use network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, use network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId,
NetworkConfiguration as BasicNetworkConfiguration, NonReservedPeerMode, NetworkError}; NetworkConfiguration as BasicNetworkConfiguration, NonReservedPeerMode, NetworkError};
use util::{U256, H256}; use util::{U256, H256};
@ -31,9 +32,9 @@ use std::str::FromStr;
use parking_lot::RwLock; use parking_lot::RwLock;
/// Ethereum sync protocol /// Ethereum sync protocol
pub const ETH_PROTOCOL: &'static str = "eth"; pub const ETH_PROTOCOL: [u8; 3] = *b"eth";
/// Infinity protocol /// Infinity protocol
pub const INF_PROTOCOL: &'static str = "inf"; pub const INF_PROTOCOL: [u8; 3] = *b"inf";
/// Sync configuration /// Sync configuration
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
@ -42,6 +43,8 @@ pub struct SyncConfig {
pub max_download_ahead_blocks: usize, pub max_download_ahead_blocks: usize,
/// Network ID /// Network ID
pub network_id: U256, pub network_id: U256,
/// Main "eth" subprotocol name.
pub subprotocol_name: [u8; 3],
/// Fork block to check /// Fork block to check
pub fork_block: Option<(BlockNumber, H256)>, pub fork_block: Option<(BlockNumber, H256)>,
} }
@ -51,6 +54,7 @@ impl Default for SyncConfig {
SyncConfig { SyncConfig {
max_download_ahead_blocks: 20000, max_download_ahead_blocks: 20000,
network_id: U256::from(1), network_id: U256::from(1),
subprotocol_name: ETH_PROTOCOL,
fork_block: None, fork_block: None,
} }
} }
@ -73,6 +77,8 @@ pub struct EthSync {
eth_handler: Arc<SyncProtocolHandler>, eth_handler: Arc<SyncProtocolHandler>,
/// Infinity Protocol handler /// Infinity Protocol handler
inf_handler: Arc<InfProtocolHandler>, inf_handler: Arc<InfProtocolHandler>,
/// The main subprotocol name
subprotocol_name: [u8; 3],
} }
impl EthSync { impl EthSync {
@ -85,6 +91,7 @@ impl EthSync {
network: service, network: service,
eth_handler: Arc::new(SyncProtocolHandler { sync: RwLock::new(chain_sync), chain: chain.clone(), snapshot_service: snapshot_service.clone() }), eth_handler: Arc::new(SyncProtocolHandler { sync: RwLock::new(chain_sync), chain: chain.clone(), snapshot_service: snapshot_service.clone() }),
inf_handler: Arc::new(InfProtocolHandler { sync: RwLock::new(inf_sync), chain: chain, snapshot_service: snapshot_service }), inf_handler: Arc::new(InfProtocolHandler { sync: RwLock::new(inf_sync), chain: chain, snapshot_service: snapshot_service }),
subprotocol_name: config.subprotocol_name,
}); });
Ok(sync) Ok(sync)
@ -171,7 +178,7 @@ impl ChainNotify for EthSync {
sealed: Vec<H256>, sealed: Vec<H256>,
_duration: u64) _duration: u64)
{ {
self.network.with_context(ETH_PROTOCOL, |context| { self.network.with_context(self.subprotocol_name, |context| {
let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service); let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service);
self.eth_handler.sync.write().chain_new_blocks( self.eth_handler.sync.write().chain_new_blocks(
&mut sync_io, &mut sync_io,
@ -185,7 +192,7 @@ impl ChainNotify for EthSync {
fn start(&self) { fn start(&self) {
self.network.start().unwrap_or_else(|e| warn!("Error starting network: {:?}", e)); self.network.start().unwrap_or_else(|e| warn!("Error starting network: {:?}", e));
self.network.register_protocol(self.eth_handler.clone(), ETH_PROTOCOL, &[62u8, 63u8, 64u8]) self.network.register_protocol(self.eth_handler.clone(), self.subprotocol_name, &[62u8, 63u8, 64u8])
.unwrap_or_else(|e| warn!("Error registering ethereum protocol: {:?}", e)); .unwrap_or_else(|e| warn!("Error registering ethereum protocol: {:?}", e));
self.network.register_protocol(self.inf_handler.clone(), INF_PROTOCOL, &[1u8]) self.network.register_protocol(self.inf_handler.clone(), INF_PROTOCOL, &[1u8])
.unwrap_or_else(|e| warn!("Error registering infinity protocol: {:?}", e)); .unwrap_or_else(|e| warn!("Error registering infinity protocol: {:?}", e));
@ -249,7 +256,7 @@ impl ManageNetwork for EthSync {
} }
fn stop_network(&self) { fn stop_network(&self) {
self.network.with_context(ETH_PROTOCOL, |context| { self.network.with_context(self.subprotocol_name, |context| {
let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service); let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service);
self.eth_handler.sync.write().abort(&mut sync_io); self.eth_handler.sync.write().abort(&mut sync_io);
}); });

View File

@ -19,7 +19,7 @@ use rlp::*;
use network::NetworkError; use network::NetworkError;
use ethcore::header::{ Header as BlockHeader}; use ethcore::header::{ Header as BlockHeader};
known_heap_size!(0, HeaderId, SyncBlock); known_heap_size!(0, HeaderId);
/// Block data with optional body. /// Block data with optional body.
struct SyncBlock { struct SyncBlock {
@ -27,6 +27,12 @@ struct SyncBlock {
body: Option<Bytes>, body: Option<Bytes>,
} }
impl HeapSizeOf for SyncBlock {
fn heap_size_of_children(&self) -> usize {
self.header.heap_size_of_children() + self.body.heap_size_of_children()
}
}
/// Used to identify header by transactions and uncles hashes /// Used to identify header by transactions and uncles hashes
#[derive(Eq, PartialEq, Hash)] #[derive(Eq, PartialEq, Hash)]
struct HeaderId { struct HeaderId {
@ -219,10 +225,14 @@ impl BlockCollection {
self.blocks.contains_key(hash) self.blocks.contains_key(hash)
} }
/// Return heap size. /// Return used heap size.
pub fn heap_size(&self) -> usize { pub fn heap_size(&self) -> usize {
//TODO: other collections self.heads.heap_size_of_children()
self.blocks.heap_size_of_children() + self.blocks.heap_size_of_children()
+ self.parents.heap_size_of_children()
+ self.header_ids.heap_size_of_children()
+ self.downloading_headers.heap_size_of_children()
+ self.downloading_bodies.heap_size_of_children()
} }
/// Check if given block hash is marked as being downloaded. /// Check if given block hash is marked as being downloaded.

View File

@ -17,7 +17,6 @@
use network::{NetworkContext, PeerId, PacketId, NetworkError}; use network::{NetworkContext, PeerId, PacketId, NetworkError};
use ethcore::client::BlockChainClient; use ethcore::client::BlockChainClient;
use ethcore::snapshot::SnapshotService; use ethcore::snapshot::SnapshotService;
use api::ETH_PROTOCOL;
/// IO interface for the syning handler. /// IO interface for the syning handler.
/// Provides peer connection management and an interface to the blockchain client. /// Provides peer connection management and an interface to the blockchain client.
@ -101,7 +100,7 @@ impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> {
} }
fn eth_protocol_version(&self, peer_id: PeerId) -> u8 { fn eth_protocol_version(&self, peer_id: PeerId) -> u8 {
self.network.protocol_version(peer_id, ETH_PROTOCOL).unwrap_or(0) self.network.protocol_version(peer_id, self.network.subprotocol_name()).unwrap_or(0)
} }
} }

View File

@ -64,11 +64,11 @@ pub fn clean_0x(s: &str) -> &str {
macro_rules! impl_hash { macro_rules! impl_hash {
($from: ident, $size: expr) => { ($from: ident, $size: expr) => {
#[derive(Eq)]
#[repr(C)] #[repr(C)]
/// Unformatted binary data of fixed length. /// Unformatted binary data of fixed length.
pub struct $from (pub [u8; $size]); pub struct $from (pub [u8; $size]);
impl From<[u8; $size]> for $from { impl From<[u8; $size]> for $from {
fn from(bytes: [u8; $size]) -> Self { fn from(bytes: [u8; $size]) -> Self {
$from(bytes) $from(bytes)
@ -210,6 +210,8 @@ macro_rules! impl_hash {
} }
} }
impl Eq for $from {}
impl PartialEq for $from { impl PartialEq for $from {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
for i in 0..$size { for i in 0..$size {

18
util/fetch/Cargo.toml Normal file
View File

@ -0,0 +1,18 @@
[package]
description = "HTTP/HTTPS fetching library"
homepage = "http://ethcore.io"
license = "GPL-3.0"
name = "fetch"
version = "0.1.0"
authors = ["Ethcore <admin@ethcore.io>"]
[dependencies]
log = "0.3"
rand = "0.3"
hyper = { default-features = false, git = "https://github.com/ethcore/hyper" }
https-fetch = { path = "../https-fetch" }
clippy = { version = "0.0.90", optional = true}
[features]
default = []
dev = ["clippy"]

146
util/fetch/src/client.rs Normal file
View File

@ -0,0 +1,146 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Fetching
use std::{env, io};
use std::sync::{mpsc, Arc};
use std::sync::atomic::AtomicBool;
use std::path::PathBuf;
use hyper;
use https_fetch as https;
use fetch_file::{FetchHandler, Error as HttpFetchError};
pub type FetchResult = Result<PathBuf, FetchError>;
#[derive(Debug)]
pub enum FetchError {
InvalidUrl,
Http(HttpFetchError),
Https(https::FetchError),
Io(io::Error),
Other(String),
}
impl From<HttpFetchError> for FetchError {
fn from(e: HttpFetchError) -> Self {
FetchError::Http(e)
}
}
impl From<io::Error> for FetchError {
fn from(e: io::Error) -> Self {
FetchError::Io(e)
}
}
pub trait Fetch: Default + Send {
/// Fetch URL and get the result in callback.
fn request_async(&mut self, url: &str, abort: Arc<AtomicBool>, on_done: Box<Fn(FetchResult) + Send>) -> Result<(), FetchError>;
/// Fetch URL and get a result Receiver. You will be notified when receiver is ready by `on_done` callback.
fn request(&mut self, url: &str, abort: Arc<AtomicBool>, on_done: Box<Fn() + Send>) -> Result<mpsc::Receiver<FetchResult>, FetchError> {
let (tx, rx) = mpsc::channel();
try!(self.request_async(url, abort, Box::new(move |result| {
let res = tx.send(result);
if let Err(_) = res {
warn!("Fetch finished, but no one was listening");
}
on_done();
})));
Ok(rx)
}
/// Closes this client
fn close(self) {}
/// Returns a random filename
fn random_filename() -> String {
use ::rand::Rng;
let mut rng = ::rand::OsRng::new().unwrap();
rng.gen_ascii_chars().take(12).collect()
}
}
pub struct Client {
http_client: hyper::Client<FetchHandler>,
https_client: https::Client,
limit: Option<usize>,
}
impl Default for Client {
fn default() -> Self {
// Max 15MB will be downloaded.
Client::with_limit(Some(15*1024*1024))
}
}
impl Client {
fn with_limit(limit: Option<usize>) -> Self {
Client {
http_client: hyper::Client::new().expect("Unable to initialize http client."),
https_client: https::Client::with_limit(limit).expect("Unable to initialize https client."),
limit: limit,
}
}
fn convert_url(url: hyper::Url) -> Result<https::Url, FetchError> {
let host = format!("{}", try!(url.host().ok_or(FetchError::InvalidUrl)));
let port = try!(url.port_or_known_default().ok_or(FetchError::InvalidUrl));
https::Url::new(&host, port, url.path()).map_err(|_| FetchError::InvalidUrl)
}
fn temp_path() -> PathBuf {
let mut dir = env::temp_dir();
dir.push(Self::random_filename());
dir
}
}
impl Fetch for Client {
fn close(self) {
self.http_client.close();
self.https_client.close();
}
fn request_async(&mut self, url: &str, abort: Arc<AtomicBool>, on_done: Box<Fn(FetchResult) + Send>) -> Result<(), FetchError> {
let is_https = url.starts_with("https://");
let url = try!(url.parse().map_err(|_| FetchError::InvalidUrl));
let temp_path = Self::temp_path();
trace!(target: "fetch", "Fetching from: {:?}", url);
if is_https {
let url = try!(Self::convert_url(url));
try!(self.https_client.fetch_to_file(
url,
temp_path.clone(),
abort,
move |result| on_done(result.map(|_| temp_path).map_err(FetchError::Https)),
).map_err(|e| FetchError::Other(format!("{:?}", e))));
} else {
try!(self.http_client.request(
url,
FetchHandler::new(temp_path, abort, Box::new(move |result| on_done(result)), self.limit.map(|v| v as u64).clone()),
).map_err(|e| FetchError::Other(format!("{:?}", e))));
}
Ok(())
}
}

View File

@ -16,12 +16,11 @@
//! Hyper Client Handler to Fetch File //! Hyper Client Handler to Fetch File
use std::{env, io, fs, fmt}; use std::{io, fs, fmt};
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::{mpsc, Arc}; use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Duration; use std::time::Duration;
use random_filename;
use hyper::status::StatusCode; use hyper::status::StatusCode;
use hyper::client::{Request, Response, DefaultTransport as HttpStream}; use hyper::client::{Request, Response, DefaultTransport as HttpStream};
@ -34,30 +33,31 @@ use super::FetchError;
pub enum Error { pub enum Error {
Aborted, Aborted,
NotStarted, NotStarted,
SizeLimit,
UnexpectedStatus(StatusCode), UnexpectedStatus(StatusCode),
IoError(io::Error), IoError(io::Error),
HyperError(hyper::Error), HyperError(hyper::Error),
} }
pub type FetchResult = Result<PathBuf, FetchError>; pub type FetchResult = Result<PathBuf, FetchError>;
pub type OnDone = Box<Fn() + Send>; pub type OnDone = Box<Fn(FetchResult) + Send>;
pub struct Fetch { pub struct FetchHandler {
path: PathBuf, path: PathBuf,
abort: Arc<AtomicBool>, abort: Arc<AtomicBool>,
file: Option<fs::File>, file: Option<fs::File>,
result: Option<FetchResult>, result: Option<FetchResult>,
sender: mpsc::Sender<FetchResult>,
on_done: Option<OnDone>, on_done: Option<OnDone>,
size_limit: Option<u64>,
} }
impl fmt::Debug for Fetch { impl fmt::Debug for FetchHandler {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "Fetch {{ path: {:?}, file: {:?}, result: {:?} }}", self.path, self.file, self.result) write!(f, "Fetch {{ path: {:?}, file: {:?}, result: {:?} }}", self.path, self.file, self.result)
} }
} }
impl Drop for Fetch { impl Drop for FetchHandler {
fn drop(&mut self) { fn drop(&mut self) {
let res = self.result.take().unwrap_or(Err(Error::NotStarted.into())); let res = self.result.take().unwrap_or(Err(Error::NotStarted.into()));
// Remove file if there was an error // Remove file if there was an error
@ -69,40 +69,35 @@ impl Drop for Fetch {
} }
} }
// send result // send result
let _ = self.sender.send(res);
if let Some(f) = self.on_done.take() { if let Some(f) = self.on_done.take() {
f(); f(res);
} }
} }
} }
impl Fetch { impl FetchHandler {
pub fn new(sender: mpsc::Sender<FetchResult>, abort: Arc<AtomicBool>, on_done: OnDone) -> Self { pub fn new(path: PathBuf, abort: Arc<AtomicBool>, on_done: OnDone, size_limit: Option<u64>) -> Self {
let mut dir = env::temp_dir(); FetchHandler {
dir.push(random_filename()); path: path,
Fetch {
path: dir,
abort: abort, abort: abort,
file: None, file: None,
result: None, result: None,
sender: sender,
on_done: Some(on_done), on_done: Some(on_done),
size_limit: size_limit,
} }
} }
}
impl Fetch {
fn is_aborted(&self) -> bool { fn is_aborted(&self) -> bool {
self.abort.load(Ordering::SeqCst) self.abort.load(Ordering::SeqCst)
} }
fn mark_aborted(&mut self) -> Next { fn mark_aborted(&mut self) -> Next {
self.result = Some(Err(Error::Aborted.into())); self.result = Some(Err(Error::Aborted.into()));
Next::end() Next::end()
} }
} }
impl hyper::client::Handler<HttpStream> for Fetch { impl hyper::client::Handler<HttpStream> for FetchHandler {
fn on_request(&mut self, req: &mut Request) -> Next { fn on_request(&mut self, req: &mut Request) -> Next {
if self.is_aborted() { if self.is_aborted() {
return self.mark_aborted(); return self.mark_aborted();
@ -147,7 +142,19 @@ impl hyper::client::Handler<HttpStream> for Fetch {
} }
match io::copy(decoder, self.file.as_mut().expect("File is there because on_response has created it.")) { match io::copy(decoder, self.file.as_mut().expect("File is there because on_response has created it.")) {
Ok(0) => Next::end(), Ok(0) => Next::end(),
Ok(_) => read(), Ok(bytes_read) => match self.size_limit {
None => read(),
// Check limit
Some(limit) if limit > bytes_read => {
self.size_limit = Some(limit - bytes_read);
read()
},
// Size limit reached
_ => {
self.result = Some(Err(Error::SizeLimit.into()));
Next::end()
},
},
Err(e) => match e.kind() { Err(e) => match e.kind() {
io::ErrorKind::WouldBlock => Next::read(), io::ErrorKind::WouldBlock => Next::read(),
_ => { _ => {

View File

@ -14,21 +14,16 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Block queue info types //! A service to fetch any HTTP / HTTPS content.
/// Block queue status #[macro_use]
#[derive(Debug, Binary)] extern crate log;
pub struct BlockQueueInfo { extern crate hyper;
/// Number of queued blocks pending verification extern crate https_fetch;
pub unverified_queue_size: usize, extern crate rand;
/// Number of verified queued blocks pending import
pub verified_queue_size: usize,
/// Number of blocks being verified pub mod client;
pub verifying_queue_size: usize, pub mod fetch_file;
/// Configured maximum number of blocks in the queue
pub max_queue_size: usize, pub use self::client::{Client, Fetch, FetchError, FetchResult};
/// Configured maximum number of bytes to use
pub max_mem_use: usize,
/// Heap memory used in bytes
pub mem_used: usize,
}

View File

@ -78,6 +78,10 @@ impl Drop for Client {
impl Client { impl Client {
pub fn new() -> Result<Self, FetchError> { pub fn new() -> Result<Self, FetchError> {
Self::with_limit(None)
}
pub fn with_limit(size_limit: Option<usize>) -> Result<Self, FetchError> {
let mut event_loop = try!(mio::EventLoop::new()); let mut event_loop = try!(mio::EventLoop::new());
let channel = event_loop.channel(); let channel = event_loop.channel();
@ -85,6 +89,7 @@ impl Client {
let mut client = ClientLoop { let mut client = ClientLoop {
next_token: 0, next_token: 0,
sessions: HashMap::new(), sessions: HashMap::new(),
size_limit: size_limit,
}; };
event_loop.run(&mut client).unwrap(); event_loop.run(&mut client).unwrap();
}); });
@ -128,6 +133,7 @@ impl Client {
pub struct ClientLoop { pub struct ClientLoop {
next_token: usize, next_token: usize,
sessions: HashMap<usize, TlsClient>, sessions: HashMap<usize, TlsClient>,
size_limit: Option<usize>,
} }
impl mio::Handler for ClientLoop { impl mio::Handler for ClientLoop {
@ -154,7 +160,7 @@ impl mio::Handler for ClientLoop {
let token = self.next_token; let token = self.next_token;
self.next_token += 1; self.next_token += 1;
if let Ok(mut tlsclient) = TlsClient::new(mio::Token(token), &url, writer, abort, callback) { if let Ok(mut tlsclient) = TlsClient::new(mio::Token(token), &url, writer, abort, callback, self.size_limit.clone()) {
let httpreq = format!( let httpreq = format!(
"GET {} HTTP/1.1\r\nHost: {}\r\nConnection: close\r\nAccept-Encoding: identity\r\n\r\n", "GET {} HTTP/1.1\r\nHost: {}\r\nConnection: close\r\nAccept-Encoding: identity\r\n\r\n",
url.path(), url.path(),

View File

@ -35,18 +35,20 @@ pub struct HttpProcessor {
status: Option<String>, status: Option<String>,
headers: Vec<String>, headers: Vec<String>,
body_writer: io::BufWriter<Box<io::Write>>, body_writer: io::BufWriter<Box<io::Write>>,
size_limit: Option<usize>,
} }
const BREAK_LEN: usize = 2; const BREAK_LEN: usize = 2;
impl HttpProcessor { impl HttpProcessor {
pub fn new(body_writer: Box<io::Write>) -> Self { pub fn new(body_writer: Box<io::Write>, size_limit: Option<usize>) -> Self {
HttpProcessor { HttpProcessor {
state: State::WaitingForStatus, state: State::WaitingForStatus,
buffer: Cursor::new(Vec::new()), buffer: Cursor::new(Vec::new()),
status: None, status: None,
headers: Vec::new(), headers: Vec::new(),
body_writer: io::BufWriter::new(body_writer) body_writer: io::BufWriter::new(body_writer),
size_limit: size_limit,
} }
} }
@ -140,6 +142,15 @@ impl HttpProcessor {
}, },
State::WritingBody => { State::WritingBody => {
let len = self.buffer.get_ref().len(); let len = self.buffer.get_ref().len();
match self.size_limit {
None => {},
Some(limit) if limit > len => {},
_ => {
warn!("Finishing file fetching because limit was reached.");
self.set_state(State::Finished);
continue;
}
}
try!(self.body_writer.write_all(self.buffer.get_ref())); try!(self.body_writer.write_all(self.buffer.get_ref()));
self.buffer_consume(len); self.buffer_consume(len);
return Ok(()); return Ok(());
@ -167,6 +178,17 @@ impl HttpProcessor {
}, },
// Buffers the data until we have a full chunk // Buffers the data until we have a full chunk
State::WritingChunk(left) if self.buffer.get_ref().len() >= left => { State::WritingChunk(left) if self.buffer.get_ref().len() >= left => {
match self.size_limit {
None => {},
Some(limit) if limit > left => {
self.size_limit = Some(limit - left);
},
_ => {
warn!("Finishing file fetching because limit was reached.");
self.set_state(State::Finished);
continue;
}
}
try!(self.body_writer.write_all(&self.buffer.get_ref()[0..left])); try!(self.body_writer.write_all(&self.buffer.get_ref()[0..left]));
self.buffer_consume(left + BREAK_LEN); self.buffer_consume(left + BREAK_LEN);
@ -230,7 +252,7 @@ mod tests {
#[test] #[test]
fn should_be_able_to_process_status_line() { fn should_be_able_to_process_status_line() {
// given // given
let mut http = HttpProcessor::new(Box::new(Cursor::new(Vec::new()))); let mut http = HttpProcessor::new(Box::new(Cursor::new(Vec::new())), None);
// when // when
let out = let out =
@ -249,7 +271,7 @@ mod tests {
#[test] #[test]
fn should_be_able_to_process_headers() { fn should_be_able_to_process_headers() {
// given // given
let mut http = HttpProcessor::new(Box::new(Cursor::new(Vec::new()))); let mut http = HttpProcessor::new(Box::new(Cursor::new(Vec::new())), None);
// when // when
let out = let out =
@ -274,7 +296,7 @@ mod tests {
fn should_be_able_to_consume_body() { fn should_be_able_to_consume_body() {
// given // given
let (writer, data) = Writer::new(); let (writer, data) = Writer::new();
let mut http = HttpProcessor::new(Box::new(writer)); let mut http = HttpProcessor::new(Box::new(writer), None);
// when // when
let out = let out =
@ -301,7 +323,7 @@ mod tests {
fn should_correctly_handle_chunked_content() { fn should_correctly_handle_chunked_content() {
// given // given
let (writer, data) = Writer::new(); let (writer, data) = Writer::new();
let mut http = HttpProcessor::new(Box::new(writer)); let mut http = HttpProcessor::new(Box::new(writer), None);
// when // when
let out = let out =
@ -331,4 +353,40 @@ mod tests {
assert_eq!(data.borrow().get_ref()[..], b"Parity in\r\n\r\nchunks."[..]); assert_eq!(data.borrow().get_ref()[..], b"Parity in\r\n\r\nchunks."[..]);
assert_eq!(http.state(), State::Finished); assert_eq!(http.state(), State::Finished);
} }
#[test]
fn should_stop_fetching_when_limit_is_reached() {
// given
let (writer, data) = Writer::new();
let mut http = HttpProcessor::new(Box::new(writer), Some(5));
// when
let out =
"\
HTTP/1.1 200 OK\r\n\
Host: 127.0.0.1:8080\r\n\
Transfer-Encoding: chunked\r\n\
Connection: close\r\n\
\r\n\
4\r\n\
Pari\r\n\
3\r\n\
ty \r\n\
D\r\n\
in\r\n\
\r\n\
chunks.\r\n\
0\r\n\
\r\n\
";
http.write_all(out.as_bytes()).unwrap();
http.flush().unwrap();
// then
assert_eq!(http.status().unwrap(), "HTTP/1.1 200 OK");
assert_eq!(http.headers().len(), 3);
assert_eq!(data.borrow().get_ref()[..], b"Pari"[..]);
assert_eq!(http.state(), State::Finished);
}
} }

View File

@ -87,6 +87,7 @@ impl TlsClient {
writer: Box<io::Write + Send>, writer: Box<io::Write + Send>,
abort: Arc<AtomicBool>, abort: Arc<AtomicBool>,
mut callback: Box<FnMut(FetchResult) + Send>, mut callback: Box<FnMut(FetchResult) + Send>,
size_limit: Option<usize>,
) -> Result<Self, FetchError> { ) -> Result<Self, FetchError> {
let res = TlsClient::make_config().and_then(|cfg| { let res = TlsClient::make_config().and_then(|cfg| {
TcpStream::connect(url.address()).map(|sock| { TcpStream::connect(url.address()).map(|sock| {
@ -98,7 +99,7 @@ impl TlsClient {
Ok((cfg, sock)) => Ok(TlsClient { Ok((cfg, sock)) => Ok(TlsClient {
abort: abort, abort: abort,
token: token, token: token,
writer: HttpProcessor::new(writer), writer: HttpProcessor::new(writer, size_limit),
socket: sock, socket: sock,
closing: false, closing: false,
error: None, error: None,

View File

@ -137,7 +137,7 @@ const SYS_TIMER: usize = LAST_SESSION + 1;
/// Protocol handler level packet id /// Protocol handler level packet id
pub type PacketId = u8; pub type PacketId = u8;
/// Protocol / handler id /// Protocol / handler id
pub type ProtocolId = &'static str; pub type ProtocolId = [u8; 3];
/// Messages used to communitate with the event loop from other threads. /// Messages used to communitate with the event loop from other threads.
#[derive(Clone)] #[derive(Clone)]
@ -185,7 +185,7 @@ pub struct CapabilityInfo {
impl Encodable for CapabilityInfo { impl Encodable for CapabilityInfo {
fn rlp_append(&self, s: &mut RlpStream) { fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(2); s.begin_list(2);
s.append(&self.protocol); s.append(&&self.protocol[..]);
s.append(&self.version); s.append(&self.version);
} }
} }
@ -284,10 +284,13 @@ impl<'s> NetworkContext<'s> {
} }
/// Returns max version for a given protocol. /// Returns max version for a given protocol.
pub fn protocol_version(&self, peer: PeerId, protocol: &str) -> Option<u8> { pub fn protocol_version(&self, peer: PeerId, protocol: ProtocolId) -> Option<u8> {
let session = self.resolve_session(peer); let session = self.resolve_session(peer);
session.and_then(|s| s.lock().capability_version(protocol)) session.and_then(|s| s.lock().capability_version(protocol))
} }
/// Returns this object's subprotocol name.
pub fn subprotocol_name(&self) -> ProtocolId { self.protocol }
} }
/// Shared host information /// Shared host information
@ -801,8 +804,8 @@ impl Host {
} }
} }
for (p, _) in self.handlers.read().iter() { for (p, _) in self.handlers.read().iter() {
if s.have_capability(p) { if s.have_capability(*p) {
ready_data.push(p); ready_data.push(*p);
} }
} }
}, },
@ -811,7 +814,7 @@ impl Host {
protocol, protocol,
packet_id, packet_id,
}) => { }) => {
match self.handlers.read().get(protocol) { match self.handlers.read().get(&protocol) {
None => { warn!(target: "network", "No handler found for protocol: {:?}", protocol) }, None => { warn!(target: "network", "No handler found for protocol: {:?}", protocol) },
Some(_) => packet_data.push((protocol, packet_id, data)), Some(_) => packet_data.push((protocol, packet_id, data)),
} }
@ -826,13 +829,13 @@ impl Host {
} }
let handlers = self.handlers.read(); let handlers = self.handlers.read();
for p in ready_data { for p in ready_data {
let h = handlers.get(p).unwrap().clone(); let h = handlers.get(&p).unwrap().clone();
self.stats.inc_sessions(); self.stats.inc_sessions();
let reserved = self.reserved_nodes.read(); let reserved = self.reserved_nodes.read();
h.connected(&NetworkContext::new(io, p, session.clone(), self.sessions.clone(), &reserved), &token); h.connected(&NetworkContext::new(io, p, session.clone(), self.sessions.clone(), &reserved), &token);
} }
for (p, packet_id, data) in packet_data { for (p, packet_id, data) in packet_data {
let h = handlers.get(p).unwrap().clone(); let h = handlers.get(&p).unwrap().clone();
let reserved = self.reserved_nodes.read(); let reserved = self.reserved_nodes.read();
h.read(&NetworkContext::new(io, p, session.clone(), self.sessions.clone(), &reserved), &token, packet_id, &data[1..]); h.read(&NetworkContext::new(io, p, session.clone(), self.sessions.clone(), &reserved), &token, packet_id, &data[1..]);
} }
@ -857,8 +860,8 @@ impl Host {
if s.is_ready() { if s.is_ready() {
self.num_sessions.fetch_sub(1, AtomicOrdering::SeqCst); self.num_sessions.fetch_sub(1, AtomicOrdering::SeqCst);
for (p, _) in self.handlers.read().iter() { for (p, _) in self.handlers.read().iter() {
if s.have_capability(p) { if s.have_capability(*p) {
to_disconnect.push(p); to_disconnect.push(*p);
} }
} }
} }
@ -874,7 +877,7 @@ impl Host {
} }
} }
for p in to_disconnect { for p in to_disconnect {
let h = self.handlers.read().get(p).unwrap().clone(); let h = self.handlers.read().get(&p).unwrap().clone();
let reserved = self.reserved_nodes.read(); let reserved = self.reserved_nodes.read();
h.disconnected(&NetworkContext::new(io, p, expired_session.clone(), self.sessions.clone(), &reserved), &token); h.disconnected(&NetworkContext::new(io, p, expired_session.clone(), self.sessions.clone(), &reserved), &token);
} }
@ -980,7 +983,7 @@ impl IoHandler<NetworkIoMessage> for Host {
self.nodes.write().clear_useless(); self.nodes.write().clear_useless();
}, },
_ => match self.timers.read().get(&token).cloned() { _ => match self.timers.read().get(&token).cloned() {
Some(timer) => match self.handlers.read().get(timer.protocol).cloned() { Some(timer) => match self.handlers.read().get(&timer.protocol).cloned() {
None => { warn!(target: "network", "No handler found for protocol: {:?}", timer.protocol) }, None => { warn!(target: "network", "No handler found for protocol: {:?}", timer.protocol) },
Some(h) => { Some(h) => {
let reserved = self.reserved_nodes.read(); let reserved = self.reserved_nodes.read();
@ -1004,11 +1007,11 @@ impl IoHandler<NetworkIoMessage> for Host {
} => { } => {
let h = handler.clone(); let h = handler.clone();
let reserved = self.reserved_nodes.read(); let reserved = self.reserved_nodes.read();
h.initialize(&NetworkContext::new(io, protocol, None, self.sessions.clone(), &reserved)); h.initialize(&NetworkContext::new(io, *protocol, None, self.sessions.clone(), &reserved));
self.handlers.write().insert(protocol, h); self.handlers.write().insert(*protocol, h);
let mut info = self.info.write(); let mut info = self.info.write();
for v in versions { for v in versions {
info.capabilities.push(CapabilityInfo { protocol: protocol, version: *v, packet_count:0 }); info.capabilities.push(CapabilityInfo { protocol: *protocol, version: *v, packet_count:0 });
} }
}, },
NetworkIoMessage::AddTimer { NetworkIoMessage::AddTimer {
@ -1023,7 +1026,7 @@ impl IoHandler<NetworkIoMessage> for Host {
*counter += 1; *counter += 1;
handler_token handler_token
}; };
self.timers.write().insert(handler_token, ProtocolTimer { protocol: protocol, token: *token }); self.timers.write().insert(handler_token, ProtocolTimer { protocol: *protocol, token: *token });
io.register_timer(handler_token, *delay).unwrap_or_else(|e| debug!("Error registering timer {}: {:?}", token, e)); io.register_timer(handler_token, *delay).unwrap_or_else(|e| debug!("Error registering timer {}: {:?}", token, e));
}, },
NetworkIoMessage::Disconnect(ref peer) => { NetworkIoMessage::Disconnect(ref peer) => {

View File

@ -45,7 +45,7 @@
//! //!
//! fn main () { //! fn main () {
//! let mut service = NetworkService::new(NetworkConfiguration::new_local()).expect("Error creating network service"); //! let mut service = NetworkService::new(NetworkConfiguration::new_local()).expect("Error creating network service");
//! service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]); //! service.register_protocol(Arc::new(MyHandler), *b"myp", &[1u8]);
//! service.start().expect("Error starting service"); //! service.start().expect("Error starting service");
//! //!
//! // Wait for quit condition //! // Wait for quit condition

View File

@ -14,8 +14,8 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::{str, io};
use std::net::SocketAddr; use std::net::SocketAddr;
use std::io;
use std::sync::*; use std::sync::*;
use mio::*; use mio::*;
use mio::tcp::*; use mio::tcp::*;
@ -63,7 +63,7 @@ pub enum SessionData {
/// Packet data /// Packet data
data: Vec<u8>, data: Vec<u8>,
/// Packet protocol ID /// Packet protocol ID
protocol: &'static str, protocol: [u8; 3],
/// Zero based packet ID /// Zero based packet ID
packet_id: u8, packet_id: u8,
}, },
@ -89,15 +89,21 @@ pub struct SessionInfo {
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub struct PeerCapabilityInfo { pub struct PeerCapabilityInfo {
pub protocol: String, pub protocol: ProtocolId,
pub version: u8, pub version: u8,
} }
impl Decodable for PeerCapabilityInfo { impl Decodable for PeerCapabilityInfo {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder { fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let c = decoder.as_rlp(); let c = decoder.as_rlp();
let p: Vec<u8> = try!(c.val_at(0));
if p.len() != 3 {
return Err(DecoderError::Custom("Invalid subprotocol string length. Should be 3"));
}
let mut p2: ProtocolId = [0u8; 3];
p2.clone_from_slice(&p);
Ok(PeerCapabilityInfo { Ok(PeerCapabilityInfo {
protocol: try!(c.val_at(0)), protocol: p2,
version: try!(c.val_at(1)) version: try!(c.val_at(1))
}) })
} }
@ -105,7 +111,7 @@ impl Decodable for PeerCapabilityInfo {
#[derive(Debug)] #[derive(Debug)]
struct SessionCapabilityInfo { struct SessionCapabilityInfo {
pub protocol: &'static str, pub protocol: [u8; 3],
pub version: u8, pub version: u8,
pub packet_count: u8, pub packet_count: u8,
pub id_offset: u8, pub id_offset: u8,
@ -239,12 +245,12 @@ impl Session {
} }
/// Checks if peer supports given capability /// Checks if peer supports given capability
pub fn have_capability(&self, protocol: &str) -> bool { pub fn have_capability(&self, protocol: [u8; 3]) -> bool {
self.info.capabilities.iter().any(|c| c.protocol == protocol) self.info.capabilities.iter().any(|c| c.protocol == protocol)
} }
/// Checks if peer supports given capability /// Checks if peer supports given capability
pub fn capability_version(&self, protocol: &str) -> Option<u8> { pub fn capability_version(&self, protocol: [u8; 3]) -> Option<u8> {
self.info.capabilities.iter().filter_map(|c| if c.protocol == protocol { Some(c.version) } else { None }).max() self.info.capabilities.iter().filter_map(|c| if c.protocol == protocol { Some(c.version) } else { None }).max()
} }
@ -270,10 +276,10 @@ impl Session {
} }
/// Send a protocol packet to peer. /// Send a protocol packet to peer.
pub fn send_packet<Message>(&mut self, io: &IoContext<Message>, protocol: &str, packet_id: u8, data: &[u8]) -> Result<(), NetworkError> pub fn send_packet<Message>(&mut self, io: &IoContext<Message>, protocol: [u8; 3], packet_id: u8, data: &[u8]) -> Result<(), NetworkError>
where Message: Send + Sync + Clone { where Message: Send + Sync + Clone {
if self.info.capabilities.is_empty() || !self.had_hello { if self.info.capabilities.is_empty() || !self.had_hello {
debug!(target: "network", "Sending to unconfirmed session {}, protocol: {}, packet: {}", self.token(), protocol, packet_id); debug!(target: "network", "Sending to unconfirmed session {}, protocol: {}, packet: {}", self.token(), str::from_utf8(&protocol[..]).unwrap_or("??"), packet_id);
return Err(From::from(NetworkError::BadProtocol)); return Err(From::from(NetworkError::BadProtocol));
} }
if self.expired() { if self.expired() {

View File

@ -41,7 +41,7 @@ impl TestProtocol {
/// Creates and register protocol with the network service /// Creates and register protocol with the network service
pub fn register(service: &mut NetworkService, drop_session: bool) -> Arc<TestProtocol> { pub fn register(service: &mut NetworkService, drop_session: bool) -> Arc<TestProtocol> {
let handler = Arc::new(TestProtocol::new(drop_session)); let handler = Arc::new(TestProtocol::new(drop_session));
service.register_protocol(handler.clone(), "test", &[42u8, 43u8]).expect("Error registering test protocol handler"); service.register_protocol(handler.clone(), *b"tst", &[42u8, 43u8]).expect("Error registering test protocol handler");
handler handler
} }
@ -93,7 +93,7 @@ impl NetworkProtocolHandler for TestProtocol {
fn net_service() { fn net_service() {
let service = NetworkService::new(NetworkConfiguration::new_local()).expect("Error creating network service"); let service = NetworkService::new(NetworkConfiguration::new_local()).expect("Error creating network service");
service.start().unwrap(); service.start().unwrap();
service.register_protocol(Arc::new(TestProtocol::new(false)), "myproto", &[1u8]).unwrap(); service.register_protocol(Arc::new(TestProtocol::new(false)), *b"myp", &[1u8]).unwrap();
} }
#[test] #[test]

View File

@ -143,12 +143,12 @@ impl CompactionProfile {
} }
/// Database configuration /// Database configuration
#[derive(Clone, Copy)] #[derive(Clone)]
pub struct DatabaseConfig { pub struct DatabaseConfig {
/// Max number of open files. /// Max number of open files.
pub max_open_files: i32, pub max_open_files: i32,
/// Cache-size /// Cache sizes (in MiB) for specific columns.
pub cache_size: Option<usize>, pub cache_sizes: HashMap<Option<u32>, usize>,
/// Compaction profile /// Compaction profile
pub compaction: CompactionProfile, pub compaction: CompactionProfile,
/// Set number of columns /// Set number of columns
@ -159,17 +159,23 @@ pub struct DatabaseConfig {
impl DatabaseConfig { impl DatabaseConfig {
/// Create new `DatabaseConfig` with default parameters and specified set of columns. /// Create new `DatabaseConfig` with default parameters and specified set of columns.
/// Note that cache sizes must be explicitly set.
pub fn with_columns(columns: Option<u32>) -> Self { pub fn with_columns(columns: Option<u32>) -> Self {
let mut config = Self::default(); let mut config = Self::default();
config.columns = columns; config.columns = columns;
config config
} }
/// Set the column cache size in MiB.
pub fn set_cache(&mut self, col: Option<u32>, size: usize) {
self.cache_sizes.insert(col, size);
}
} }
impl Default for DatabaseConfig { impl Default for DatabaseConfig {
fn default() -> DatabaseConfig { fn default() -> DatabaseConfig {
DatabaseConfig { DatabaseConfig {
cache_size: None, cache_sizes: HashMap::new(),
max_open_files: 512, max_open_files: 512,
compaction: CompactionProfile::default(), compaction: CompactionProfile::default(),
columns: None, columns: None,
@ -213,6 +219,9 @@ impl Database {
/// Open database file. Creates if it does not exist. /// Open database file. Creates if it does not exist.
pub fn open(config: &DatabaseConfig, path: &str) -> Result<Database, String> { pub fn open(config: &DatabaseConfig, path: &str) -> Result<Database, String> {
// default cache size for columns not specified.
const DEFAULT_CACHE: usize = 2;
let mut opts = Options::new(); let mut opts = Options::new();
if let Some(rate_limit) = config.compaction.write_rate_limit { if let Some(rate_limit) = config.compaction.write_rate_limit {
try!(opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit))); try!(opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit)));
@ -232,17 +241,22 @@ impl Database {
let mut cf_options = Vec::with_capacity(config.columns.unwrap_or(0) as usize); let mut cf_options = Vec::with_capacity(config.columns.unwrap_or(0) as usize);
for _ in 0 .. config.columns.unwrap_or(0) { for col in 0 .. config.columns.unwrap_or(0) {
let mut opts = Options::new(); let mut opts = Options::new();
opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction); opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction);
opts.set_target_file_size_base(config.compaction.initial_file_size); opts.set_target_file_size_base(config.compaction.initial_file_size);
opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier); opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier);
if let Some(cache_size) = config.cache_size {
let col_opt = config.columns.map(|_| col);
{
let cache_size = config.cache_sizes.get(&col_opt).cloned().unwrap_or(DEFAULT_CACHE);
let mut block_opts = BlockBasedOptions::new(); let mut block_opts = BlockBasedOptions::new();
// all goes to read cache // all goes to read cache.
block_opts.set_cache(Cache::new(cache_size * 1024 * 1024)); block_opts.set_cache(Cache::new(cache_size * 1024 * 1024));
opts.set_block_based_table_factory(&block_opts); opts.set_block_based_table_factory(&block_opts);
} }
cf_options.push(opts); cf_options.push(opts);
} }

View File

@ -20,6 +20,7 @@ mod tests;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::fs; use std::fs;
use std::fmt;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use ::kvdb::{CompactionProfile, Database, DatabaseConfig, DBTransaction}; use ::kvdb::{CompactionProfile, Database, DatabaseConfig, DBTransaction};
@ -96,6 +97,17 @@ pub enum Error {
Custom(String), Custom(String),
} }
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Error::CannotAddMigration => write!(f, "Cannot add migration"),
Error::MigrationImpossible => write!(f, "Migration impossible"),
Error::Io(ref err) => write!(f, "{}", err),
Error::Custom(ref err) => write!(f, "{}", err),
}
}
}
impl From<::std::io::Error> for Error { impl From<::std::io::Error> for Error {
fn from(e: ::std::io::Error) -> Self { fn from(e: ::std::io::Error) -> Self {
Error::Io(e) Error::Io(e)
@ -104,6 +116,8 @@ impl From<::std::io::Error> for Error {
/// A generalized migration from the given db to a destination db. /// A generalized migration from the given db to a destination db.
pub trait Migration: 'static { pub trait Migration: 'static {
/// Number of columns in the database before the migration.
fn pre_columns(&self) -> Option<u32> { self.columns() }
/// Number of columns in database after the migration. /// Number of columns in database after the migration.
fn columns(&self) -> Option<u32>; fn columns(&self) -> Option<u32>;
/// Version of the database after the migration. /// Version of the database after the migration.
@ -195,6 +209,7 @@ impl Manager {
Some(last) => migration.version() > last.version(), Some(last) => migration.version() > last.version(),
None => true, None => true,
}; };
match is_new { match is_new {
true => Ok(self.migrations.push(Box::new(migration))), true => Ok(self.migrations.push(Box::new(migration))),
false => Err(Error::CannotAddMigration), false => Err(Error::CannotAddMigration),
@ -205,12 +220,14 @@ impl Manager {
/// and producing a path where the final migration lives. /// and producing a path where the final migration lives.
pub fn execute(&mut self, old_path: &Path, version: u32) -> Result<PathBuf, Error> { pub fn execute(&mut self, old_path: &Path, version: u32) -> Result<PathBuf, Error> {
let config = self.config.clone(); let config = self.config.clone();
let columns = self.no_of_columns_at(version);
let migrations = self.migrations_from(version); let migrations = self.migrations_from(version);
if migrations.is_empty() { return Err(Error::MigrationImpossible) }; if migrations.is_empty() { return Err(Error::MigrationImpossible) };
let columns = migrations.iter().find(|m| m.version() == version).and_then(|m| m.pre_columns());
let mut db_config = DatabaseConfig { let mut db_config = DatabaseConfig {
max_open_files: 64, max_open_files: 64,
cache_size: None, cache_sizes: Default::default(),
compaction: config.compaction_profile, compaction: config.compaction_profile,
columns: columns, columns: columns,
wal: true, wal: true,
@ -267,14 +284,6 @@ impl Manager {
fn migrations_from(&mut self, version: u32) -> Vec<&mut Box<Migration>> { fn migrations_from(&mut self, version: u32) -> Vec<&mut Box<Migration>> {
self.migrations.iter_mut().filter(|m| m.version() > version).collect() self.migrations.iter_mut().filter(|m| m.version() > version).collect()
} }
fn no_of_columns_at(&self, version: u32) -> Option<u32> {
let migration = self.migrations.iter().find(|m| m.version() == version);
match migration {
Some(m) => m.columns(),
None => None
}
}
} }
/// Prints a dot every `max` ticks /// Prints a dot every `max` ticks

View File

@ -19,7 +19,7 @@
//! are performed in temp sub-directories. //! are performed in temp sub-directories.
use common::*; use common::*;
use migration::{Config, SimpleMigration, Manager}; use migration::{Batch, Config, Error, SimpleMigration, Migration, Manager};
use kvdb::Database; use kvdb::Database;
use devtools::RandomTempPath; use devtools::RandomTempPath;
@ -62,11 +62,10 @@ impl SimpleMigration for Migration0 {
fn version(&self) -> u32 { 1 } fn version(&self) -> u32 { 1 }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> { fn simple_migrate(&mut self, mut key: Vec<u8>, mut value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
let mut key = key;
key.push(0x11); key.push(0x11);
let mut value = value;
value.push(0x22); value.push(0x22);
Some((key, value)) Some((key, value))
} }
} }
@ -83,6 +82,31 @@ impl SimpleMigration for Migration1 {
} }
} }
struct AddsColumn;
impl Migration for AddsColumn {
fn pre_columns(&self) -> Option<u32> { None }
fn columns(&self) -> Option<u32> { Some(1) }
fn version(&self) -> u32 { 1 }
fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> {
let mut batch = Batch::new(config, col);
for (key, value) in source.iter(col) {
try!(batch.insert(key.to_vec(), value.to_vec(), dest));
}
if col == Some(1) {
try!(batch.insert(vec![1, 2, 3], vec![4, 5, 6], dest));
}
batch.commit(dest)
}
}
#[test] #[test]
fn one_simple_migration() { fn one_simple_migration() {
let dir = RandomTempPath::create_dir(); let dir = RandomTempPath::create_dir();
@ -189,3 +213,16 @@ fn is_migration_needed() {
assert!(manager.is_needed(1)); assert!(manager.is_needed(1));
assert!(!manager.is_needed(2)); assert!(!manager.is_needed(2));
} }
#[test]
fn pre_columns() {
let mut manager = Manager::new(Config::default());
manager.add_migration(AddsColumn).unwrap();
let dir = RandomTempPath::create_dir();
let db_path = db_path(dir.as_path());
// this shouldn't fail to open the database even though it's one column
// short of the one before it.
manager.execute(&db_path, 0).unwrap();
}