Backports for beta (#1888)
* Sync to peers with confirmed fork block only (#1863) * Fixing gas conversion * Validating u256->usize conversion * Update cache usage on commiting block info (#1871) * Use UntrustedRlp for block verification (#1872) * take snapshot at specified block and slightly better informants (#1873) * prettier informant for snapshot creation * allow taking snapshot at a given block * minor tweaks * elaborate on cli * Send new block hashes to all peers (#1875) * Reduce max open files (#1876) * ws-rs update * Fixing test * Fixing miner deadlock (#1885)
This commit is contained in:
parent
6336bdecf3
commit
52ac5a00f5
109
Cargo.lock
generated
109
Cargo.lock
generated
@ -113,14 +113,17 @@ version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "0.5.2"
|
||||
name = "bytes"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
version = "0.4.0-dev"
|
||||
source = "git+https://github.com/carllerche/bytes#6529f6392a9717585b8d67e1db96e6fa0fb6cb1f"
|
||||
dependencies = [
|
||||
"stable-heap 0.1.0 (git+https://github.com/carllerche/stable-heap?rev=3c5cd1ca47)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
@ -155,7 +158,7 @@ version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -288,7 +291,7 @@ dependencies = [
|
||||
"serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -439,7 +442,7 @@ dependencies = [
|
||||
"parity-dapps-signer 0.6.0 (git+https://github.com/ethcore/parity-ui.git)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ws 0.5.0 (git+https://github.com/ethcore/ws-rs.git?branch=stable)",
|
||||
"ws 0.5.2 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -614,7 +617,7 @@ dependencies = [
|
||||
"traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -635,7 +638,7 @@ dependencies = [
|
||||
"traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -686,7 +689,7 @@ dependencies = [
|
||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"miow 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -780,7 +783,7 @@ dependencies = [
|
||||
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"miow 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -796,7 +799,7 @@ dependencies = [
|
||||
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"miow 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -804,9 +807,24 @@ dependencies = [
|
||||
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.6.0-dev"
|
||||
source = "git+https://github.com/carllerche/mio?rev=62ec763c9cc34d8a452ed0392c575c50ddd5fc8d#62ec763c9cc34d8a452ed0392c575c50ddd5fc8d"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"nix 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)",
|
||||
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "miow"
|
||||
version = "0.1.2"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -854,6 +872,19 @@ dependencies = [
|
||||
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"semver 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nodrop"
|
||||
version = "0.1.6"
|
||||
@ -1255,11 +1286,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sha1"
|
||||
version = "0.1.1"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"byteorder 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha3"
|
||||
@ -1273,6 +1301,11 @@ name = "slab"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/carllerche/slab?rev=5476efcafb#5476efcafbc5ef4d7315b1bea3f756d8a1fe975e"
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.2.0"
|
||||
@ -1297,6 +1330,11 @@ name = "spmc"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "stable-heap"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/carllerche/stable-heap?rev=3c5cd1ca47#3c5cd1ca4706f167a1de85658b5af0d6d3e65165"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.3.0"
|
||||
@ -1447,7 +1485,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "url"
|
||||
version = "1.1.1"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"idna 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1506,15 +1544,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "ws"
|
||||
version = "0.5.0"
|
||||
source = "git+https://github.com/ethcore/ws-rs.git?branch=stable#a876fc115c3ef50a17c8822c9bd2f6e94473e005"
|
||||
version = "0.5.2"
|
||||
source = "git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable#afbff59776ce16ccec5ee9e218b8891830ee6fdf"
|
||||
dependencies = [
|
||||
"bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)",
|
||||
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)",
|
||||
"mio 0.6.0-dev (git+https://github.com/carllerche/mio?rev=62ec763c9cc34d8a452ed0392c575c50ddd5fc8d)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sha1 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)",
|
||||
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -1549,16 +1589,13 @@ dependencies = [
|
||||
"checksum aster 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07d344974f0a155f091948aa389fb1b912d3a58414fbdb9c8d446d193ee3496a"
|
||||
"checksum bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9bf6104718e80d7b26a68fdbacff3481cfc05df670821affc7e9cbc1884400c"
|
||||
"checksum bit-vec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "5b97c2c8e8bbb4251754f559df8af22fb264853c7d009084a576cdf12565089d"
|
||||
"checksum bitflags 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "32866f4d103c4e438b1db1158aa1b1a80ee078e5d77a59a2f906fd62a577389c"
|
||||
"checksum bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8dead7461c1127cf637931a1e50934eb6eee8bff2f74433ac7909e9afcee04a3"
|
||||
"checksum bitflags 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4f67931368edf3a9a51d29886d245f1c3db2f1ef0dcc9e35ff70341b78c10d23"
|
||||
"checksum bitflags 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "72cd7314bd4ee024071241147222c706e80385a1605ac7d4cd2fcc339da2ae46"
|
||||
"checksum blastfig 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "09640e0509d97d5cdff03a9f5daf087a8e04c735c3b113a75139634a19cfc7b2"
|
||||
"checksum bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f421095d2a76fc24cd3fb3f912b90df06be7689912b1bdb423caefae59c258d"
|
||||
"checksum byteorder 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3e68d0b3b234a583993a53d5b0063fb5fe8713590fe733d41b98a2cee6a9c26e"
|
||||
"checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27"
|
||||
"checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "<none>"
|
||||
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
|
||||
"checksum chrono 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)" = "a714b6792cb4bb07643c35d2a051d92988d4e296322a60825549dd0764bcd396"
|
||||
"checksum clippy 0.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "e96469b413984c78285727f94f9c626a1f2006cecdcf813b5d6893c0c85df42f"
|
||||
"checksum clippy_lints 0.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "f11938c4b10c556903bb1c1e717eb038658324bf7197e4cfc159a16417327345"
|
||||
"checksum cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90266f45846f14a1e986c77d1e9c2626b8c342ed806fe60241ec38cc8697b245"
|
||||
@ -1577,9 +1614,10 @@ dependencies = [
|
||||
"checksum hpack 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2da7d3a34cf6406d9d700111b8eafafe9a251de41ae71d8052748259343b58"
|
||||
"checksum httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46534074dbb80b070d60a5cb8ecadd8963a00a438ae1a95268850a7ef73b67ae"
|
||||
"checksum hyper 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bb0f4d00bb781e559b6e66ae4b5479df0fdf9ab15949f52fa2f1f5de16d4cc07"
|
||||
"checksum hyper 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)" = "eb27e8a3e8f17ac43ffa41bbda9cf5ad3f9f13ef66fa4873409d4902310275f7"
|
||||
"checksum hyper 0.9.4 (git+https://github.com/ethcore/hyper)" = "<none>"
|
||||
"checksum idna 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1053236e00ce4f668aeca4a769a09b3bf5a682d802abd6f3cb39374f6b162c11"
|
||||
"checksum igd 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8f5b93df68d6152576e9bc9f371e33e00b40738d528b3566ff41ea11d04401dc"
|
||||
"checksum igd 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c8c12b1795b8b168f577c45fa10379b3814dcb11b7ab702406001f0d63f40484"
|
||||
"checksum isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7408a548dc0e406b7912d9f84c261cc533c1866e047644a811c133c56041ac0c"
|
||||
"checksum itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "086e1fa5fe48840b1cfdef3a20c7e3115599f8d5c4c87ef32a794a7cdd184d76"
|
||||
"checksum json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)" = "<none>"
|
||||
@ -1596,11 +1634,13 @@ dependencies = [
|
||||
"checksum mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e50bf542f81754ef69e5cea856946a3819f7c09ea97b4903c8bc8a89f74e7b6"
|
||||
"checksum mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)" = "<none>"
|
||||
"checksum mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a637d1ca14eacae06296a008fa7ad955347e34efcb5891cfd8ba05491a37907e"
|
||||
"checksum miow 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4e93d633d34b8ff65a24566d67d49703e7a5c7ac2844d6139a9fc441a799e89a"
|
||||
"checksum mio 0.6.0-dev (git+https://github.com/carllerche/mio?rev=62ec763c9cc34d8a452ed0392c575c50ddd5fc8d)" = "<none>"
|
||||
"checksum miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d5bfc6782530ac8ace97af10a540054a37126b63b0702ddaaa243b73b5745b9a"
|
||||
"checksum nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)" = "<none>"
|
||||
"checksum nanomsg-sys 0.5.0 (git+https://github.com/ethcore/nanomsg.rs.git)" = "<none>"
|
||||
"checksum net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)" = "6a816012ca11cb47009693c1e0c6130e26d39e4d97ee2a13c50e868ec83e3204"
|
||||
"checksum nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f05c2fc965fc1cd6b73fa57fa7b89f288178737f2f3ce9e63e4a6a141189000e"
|
||||
"checksum nix 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a7bb1da2be7da3cbffda73fc681d509ffd9e665af478d2bee1907cee0bc64b2"
|
||||
"checksum nodrop 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4d9a22dbcebdeef7bf275cbf444d6521d4e7a2fee187b72d80dba0817120dd8f"
|
||||
"checksum nom 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6caab12c5f97aa316cb249725aa32115118e1522b445e26c257dd77cad5ffd4e"
|
||||
"checksum num 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "c04bd954dbf96f76bab6e5bd6cef6f1ce1262d15268ce4f926d2b5b778fa7af2"
|
||||
@ -1648,12 +1688,14 @@ dependencies = [
|
||||
"checksum serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b76133a8a02f1c6ebd3fb9a2ecaab3d54302565a51320e80931adba571aadb1b"
|
||||
"checksum serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c5b74ff4fb34013cc0b917dd182fefc05ee9af233b9d0d557078334554284d0e"
|
||||
"checksum serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2c88a751caa8f0000058fb971cd443ed2e6b653f33f5a47f29892a8bd44ca4c1"
|
||||
"checksum sha1 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a307a40d5834140e4213a6952483b84e9ad53bdcab918b7335a6e305e505a53c"
|
||||
"checksum sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc30b1e1e8c40c121ca33b86c23308a090d19974ef001b4bf6e61fd1a0fb095c"
|
||||
"checksum slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d807fd58c4181bbabed77cb3b891ba9748241a552bcc5be698faaebefc54f46e"
|
||||
"checksum slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)" = "<none>"
|
||||
"checksum slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6dbdd334bd28d328dad1c41b0ea662517883d8880d8533895ef96c8003dec9c4"
|
||||
"checksum smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "fcc8d19212aacecf95e4a7a2179b26f7aeb9732a915cf01f05b0d3e044865410"
|
||||
"checksum solicit 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "172382bac9424588d7840732b250faeeef88942e37b6e35317dce98cafdd75b2"
|
||||
"checksum spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "93bdab61c1a413e591c4d17388ffa859eaff2df27f1e13a5ec8b716700605adf"
|
||||
"checksum stable-heap 0.1.0 (git+https://github.com/carllerche/stable-heap?rev=3c5cd1ca47)" = "<none>"
|
||||
"checksum strsim 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e4d73a2c36a4d095ed1a6df5cbeac159863173447f7a82b3f4757426844ab825"
|
||||
"checksum syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "393b6dd0889df2b064beeea954cfda6bc2571604ac460deeae0fed55a53988af"
|
||||
"checksum syntex_syntax 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44bded3cabafc65c90b663b1071bd2d198a9ab7515e6ce729e4570aaf53c407e"
|
||||
@ -1673,7 +1715,7 @@ dependencies = [
|
||||
"checksum unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "26643a2f83bac55f1976fb716c10234485f9202dcd65cfbdf9da49867b271172"
|
||||
"checksum unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "36dff09cafb4ec7c8cf0023eb0b686cb6ce65499116a12201c9e11840ca01beb"
|
||||
"checksum url 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f6d04073d0fcd045a1cf57aea560d1be5ba812d8f28814e1e1cf0e90ff4d2f03"
|
||||
"checksum url 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8ab4ca6f0107350f41a59a51cb0e71a04d905bc6a29181d2cb42fa4f040c65c9"
|
||||
"checksum url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "afe9ec54bc4db14bc8744b7fed060d785ac756791450959b2248443319d5b119"
|
||||
"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f"
|
||||
"checksum uuid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9767696a9e1bc7a73f2d5f8e0f5428b076cecd9199c200c0364aa0b2d57b8dfa"
|
||||
"checksum vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0795a11576d29ae80525a3fda315bf7b534f8feb9d34101e5fe63fb95bb2fd24"
|
||||
@ -1681,8 +1723,7 @@ dependencies = [
|
||||
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
|
||||
"checksum winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4dfaaa8fbdaa618fa6914b59b2769d690dd7521920a18d84b42d254678dd5fd4"
|
||||
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
|
||||
"checksum ws 0.5.0 (git+https://github.com/ethcore/ws-rs.git?branch=stable)" = "<none>"
|
||||
"checksum ws 0.5.2 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)" = "<none>"
|
||||
"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
|
||||
"checksum xml-rs 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "4bac8fd82b24db2dd3b54aa7b29f336d8b5ca1830065ce3aada71bce6f661519"
|
||||
"checksum xml-rs 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f11ef7864e55d06a38755beaf03ab70139a04e619acfe94ef800b11bd79eb52c"
|
||||
"checksum xml-rs 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "65e74b96bd3179209dc70a980da6df843dff09e46eee103a0376c0949257e3ef"
|
||||
"checksum xmltree 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "472a9d37c7c53ab2391161df5b89b1f3bf76dab6ab150d7941ecbdd832282082"
|
||||
|
@ -775,12 +775,20 @@ impl BlockChain {
|
||||
|
||||
/// Applt pending insertion updates
|
||||
pub fn commit(&self) {
|
||||
let mut best_block = self.best_block.write();
|
||||
let mut write_hashes = self.block_hashes.write();
|
||||
let mut write_txs = self.transaction_addresses.write();
|
||||
let mut pending_best_block = self.pending_best_block.write();
|
||||
let mut pending_write_hashes = self.pending_block_hashes.write();
|
||||
let mut pending_write_txs = self.pending_transaction_addresses.write();
|
||||
|
||||
for n in pending_write_hashes.keys() {
|
||||
self.note_used(CacheID::BlockHashes(*n));
|
||||
}
|
||||
for hash in pending_write_txs.keys() {
|
||||
self.note_used(CacheID::TransactionAddresses(hash.clone()));
|
||||
}
|
||||
|
||||
let mut best_block = self.best_block.write();
|
||||
let mut write_hashes = self.block_hashes.write();
|
||||
let mut write_txs = self.transaction_addresses.write();
|
||||
// update best block
|
||||
if let Some(block) = pending_best_block.take() {
|
||||
*best_block = block;
|
||||
|
@ -594,19 +594,35 @@ impl Client {
|
||||
}
|
||||
}
|
||||
|
||||
/// Take a snapshot.
|
||||
pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(&self, writer: W) -> Result<(), ::error::Error> {
|
||||
/// Take a snapshot at the given block.
|
||||
/// If the ID given is "latest", this will default to 1000 blocks behind.
|
||||
pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(&self, writer: W, at: BlockID, p: &snapshot::Progress) -> Result<(), ::error::Error> {
|
||||
let db = self.state_db.lock().boxed_clone();
|
||||
let best_block_number = self.chain_info().best_block_number;
|
||||
let start_block_number = if best_block_number > 1000 {
|
||||
best_block_number - 1000
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let start_hash = self.block_hash(BlockID::Number(start_block_number))
|
||||
.expect("blocks within HISTORY are always stored.");
|
||||
let block_number = try!(self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at)));
|
||||
|
||||
try!(snapshot::take_snapshot(&self.chain, start_hash, db.as_hashdb(), writer));
|
||||
if best_block_number > HISTORY + block_number && db.is_pruned() {
|
||||
return Err(snapshot::Error::OldBlockPrunedDB.into());
|
||||
}
|
||||
|
||||
let start_hash = match at {
|
||||
BlockID::Latest => {
|
||||
let start_num = if best_block_number > 1000 {
|
||||
best_block_number - 1000
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
self.block_hash(BlockID::Number(start_num))
|
||||
.expect("blocks within HISTORY are always stored.")
|
||||
}
|
||||
_ => match self.block_hash(at) {
|
||||
Some(hash) => hash,
|
||||
None => return Err(snapshot::Error::InvalidStartingBlock(at).into()),
|
||||
},
|
||||
};
|
||||
|
||||
try!(snapshot::take_snapshot(&self.chain, start_hash, db.as_hashdb(), writer, p));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -151,10 +151,14 @@ impl CostType for usize {
|
||||
}
|
||||
|
||||
fn from_u256(val: U256) -> Result<Self> {
|
||||
if U256::from(val.low_u64()) != val {
|
||||
let res = val.low_u64() as usize;
|
||||
|
||||
// validate if value fits into usize
|
||||
if U256::from(res) != val {
|
||||
return Err(Error::OutOfGas);
|
||||
}
|
||||
Ok(val.low_u64() as usize)
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
fn as_usize(&self) -> usize {
|
||||
@ -191,6 +195,7 @@ pub trait Evm {
|
||||
|
||||
|
||||
#[test]
|
||||
#[cfg(test)]
|
||||
fn should_calculate_overflow_mul_shr_without_overflow() {
|
||||
// given
|
||||
let num = 1048576;
|
||||
@ -207,6 +212,7 @@ fn should_calculate_overflow_mul_shr_without_overflow() {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(test)]
|
||||
fn should_calculate_overflow_mul_shr_with_overflow() {
|
||||
// given
|
||||
let max = ::std::u64::MAX;
|
||||
@ -225,3 +231,15 @@ fn should_calculate_overflow_mul_shr_with_overflow() {
|
||||
assert!(o1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(test)]
|
||||
fn should_validate_u256_to_usize_conversion() {
|
||||
// given
|
||||
let v = U256::from(::std::usize::MAX) + U256::from(1);
|
||||
|
||||
// when
|
||||
let res = usize::from_u256(v);
|
||||
|
||||
// then
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
@ -515,11 +515,11 @@ impl<Cost: CostType> Interpreter<Cost> {
|
||||
Ok(InstructionResult::Ok)
|
||||
}
|
||||
|
||||
fn copy_data_to_memory(&mut self, stack: &mut Stack<U256>, data: &[u8]) {
|
||||
fn copy_data_to_memory(&mut self, stack: &mut Stack<U256>, source: &[u8]) {
|
||||
let dest_offset = stack.pop_back();
|
||||
let source_offset = stack.pop_back();
|
||||
let size = stack.pop_back();
|
||||
let source_size = U256::from(data.len());
|
||||
let source_size = U256::from(source.len());
|
||||
|
||||
let output_end = match source_offset > source_size || size > source_size || source_offset + size > source_size {
|
||||
true => {
|
||||
@ -531,14 +531,14 @@ impl<Cost: CostType> Interpreter<Cost> {
|
||||
for i in zero_slice.iter_mut() {
|
||||
*i = 0;
|
||||
}
|
||||
data.len()
|
||||
source.len()
|
||||
},
|
||||
false => (size.low_u64() + source_offset.low_u64()) as usize
|
||||
};
|
||||
|
||||
if source_offset < source_size {
|
||||
let output_begin = source_offset.low_u64() as usize;
|
||||
self.mem.write_slice(dest_offset, &data[output_begin..output_end]);
|
||||
self.mem.write_slice(dest_offset, &source[output_begin..output_end]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -168,12 +168,11 @@ pub struct Miner {
|
||||
// NOTE [ToDr] When locking always lock in this order!
|
||||
transaction_queue: Arc<Mutex<TransactionQueue>>,
|
||||
sealing_work: Mutex<SealingWork>,
|
||||
|
||||
next_allowed_reseal: Mutex<Instant>,
|
||||
sealing_block_last_request: Mutex<u64>,
|
||||
// for sealing...
|
||||
options: MinerOptions,
|
||||
|
||||
next_allowed_reseal: Mutex<Instant>,
|
||||
sealing_block_last_request: Mutex<u64>,
|
||||
gas_range_target: RwLock<(U256, U256)>,
|
||||
author: RwLock<Address>,
|
||||
extra_data: RwLock<Bytes>,
|
||||
@ -736,11 +735,11 @@ impl MinerService for Miner {
|
||||
fn update_sealing(&self, chain: &MiningBlockChainClient) {
|
||||
trace!(target: "miner", "update_sealing");
|
||||
let requires_reseal = {
|
||||
let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions();
|
||||
let mut sealing_work = self.sealing_work.lock();
|
||||
if sealing_work.enabled {
|
||||
trace!(target: "miner", "update_sealing: sealing enabled");
|
||||
let current_no = chain.chain_info().best_block_number;
|
||||
let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions();
|
||||
let last_request = *self.sealing_block_last_request.lock();
|
||||
let should_disable_sealing = !self.forced_sealing()
|
||||
&& !has_local_transactions
|
||||
|
@ -18,6 +18,8 @@
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use ids::BlockID;
|
||||
|
||||
use util::H256;
|
||||
use util::trie::TrieError;
|
||||
use util::rlp::DecoderError;
|
||||
@ -26,9 +28,13 @@ use util::rlp::DecoderError;
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
/// Invalid starting block for snapshot.
|
||||
InvalidStartingBlock(H256),
|
||||
InvalidStartingBlock(BlockID),
|
||||
/// Block not found.
|
||||
BlockNotFound(H256),
|
||||
/// Incomplete chain.
|
||||
IncompleteChain,
|
||||
/// Old starting block in a pruned database.
|
||||
OldBlockPrunedDB,
|
||||
/// Trie error.
|
||||
Trie(TrieError),
|
||||
/// Decoder error.
|
||||
@ -40,8 +46,11 @@ pub enum Error {
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
Error::InvalidStartingBlock(ref hash) => write!(f, "Invalid starting block hash: {}", hash),
|
||||
Error::InvalidStartingBlock(ref id) => write!(f, "Invalid starting block: {:?}", id),
|
||||
Error::BlockNotFound(ref hash) => write!(f, "Block not found in chain: {}", hash),
|
||||
Error::IncompleteChain => write!(f, "Cannot create snapshot due to incomplete chain."),
|
||||
Error::OldBlockPrunedDB => write!(f, "Attempted to create a snapshot at an old block while using \
|
||||
a pruned database. Please re-run with the --pruning archive flag."),
|
||||
Error::Io(ref err) => err.fmt(f),
|
||||
Error::Decoder(ref err) => err.fmt(f),
|
||||
Error::Trie(ref err) => err.fmt(f),
|
||||
|
@ -18,10 +18,12 @@
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
|
||||
use account_db::{AccountDB, AccountDBMut};
|
||||
use blockchain::{BlockChain, BlockProvider};
|
||||
use engines::Engine;
|
||||
use ids::BlockID;
|
||||
use views::BlockView;
|
||||
|
||||
use util::{Bytes, Hashable, HashDB, snappy, TrieDB, TrieDBMut, TrieMut};
|
||||
@ -58,9 +60,49 @@ const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024;
|
||||
// How many blocks to include in a snapshot, starting from the head of the chain.
|
||||
const SNAPSHOT_BLOCKS: u64 = 30000;
|
||||
|
||||
/// A progress indicator for snapshots.
|
||||
#[derive(Debug)]
|
||||
pub struct Progress {
|
||||
accounts: AtomicUsize,
|
||||
blocks: AtomicUsize,
|
||||
size: AtomicUsize, // Todo [rob] use Atomicu64 when it stabilizes.
|
||||
done: AtomicBool,
|
||||
}
|
||||
|
||||
impl Progress {
|
||||
/// Create a new progress indicator.
|
||||
pub fn new() -> Self {
|
||||
Progress {
|
||||
accounts: AtomicUsize::new(0),
|
||||
blocks: AtomicUsize::new(0),
|
||||
size: AtomicUsize::new(0),
|
||||
done: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the number of accounts snapshotted thus far.
|
||||
pub fn accounts(&self) -> usize { self.accounts.load(Ordering::Relaxed) }
|
||||
|
||||
/// Get the number of blocks snapshotted thus far.
|
||||
pub fn blocks(&self) -> usize { self.blocks.load(Ordering::Relaxed) }
|
||||
|
||||
/// Get the written size of the snapshot in bytes.
|
||||
pub fn size(&self) -> usize { self.size.load(Ordering::Relaxed) }
|
||||
|
||||
/// Whether the snapshot is complete.
|
||||
pub fn done(&self) -> bool { self.done.load(Ordering::SeqCst) }
|
||||
|
||||
}
|
||||
/// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer.
|
||||
pub fn take_snapshot<W: SnapshotWriter + Send>(chain: &BlockChain, start_block_hash: H256, state_db: &HashDB, writer: W) -> Result<(), Error> {
|
||||
let start_header = try!(chain.block_header(&start_block_hash).ok_or(Error::InvalidStartingBlock(start_block_hash)));
|
||||
pub fn take_snapshot<W: SnapshotWriter + Send>(
|
||||
chain: &BlockChain,
|
||||
block_at: H256,
|
||||
state_db: &HashDB,
|
||||
writer: W,
|
||||
p: &Progress
|
||||
) -> Result<(), Error> {
|
||||
let start_header = try!(chain.block_header(&block_at)
|
||||
.ok_or(Error::InvalidStartingBlock(BlockID::Hash(block_at))));
|
||||
let state_root = start_header.state_root();
|
||||
let number = start_header.number();
|
||||
|
||||
@ -68,8 +110,8 @@ pub fn take_snapshot<W: SnapshotWriter + Send>(chain: &BlockChain, start_block_h
|
||||
|
||||
let writer = Mutex::new(writer);
|
||||
let (state_hashes, block_hashes) = try!(scope(|scope| {
|
||||
let block_guard = scope.spawn(|| chunk_blocks(chain, (number, start_block_hash), &writer));
|
||||
let state_res = chunk_state(state_db, state_root, &writer);
|
||||
let block_guard = scope.spawn(|| chunk_blocks(chain, (number, block_at), &writer, p));
|
||||
let state_res = chunk_state(state_db, state_root, &writer, p);
|
||||
|
||||
state_res.and_then(|state_hashes| {
|
||||
block_guard.join().map(|block_hashes| (state_hashes, block_hashes))
|
||||
@ -83,11 +125,13 @@ pub fn take_snapshot<W: SnapshotWriter + Send>(chain: &BlockChain, start_block_h
|
||||
block_hashes: block_hashes,
|
||||
state_root: *state_root,
|
||||
block_number: number,
|
||||
block_hash: start_block_hash,
|
||||
block_hash: block_at,
|
||||
};
|
||||
|
||||
try!(writer.into_inner().finish(manifest_data));
|
||||
|
||||
p.done.store(true, Ordering::SeqCst);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -100,6 +144,7 @@ struct BlockChunker<'a> {
|
||||
hashes: Vec<H256>,
|
||||
snappy_buffer: Vec<u8>,
|
||||
writer: &'a Mutex<SnapshotWriter + 'a>,
|
||||
progress: &'a Progress,
|
||||
}
|
||||
|
||||
impl<'a> BlockChunker<'a> {
|
||||
@ -162,7 +207,8 @@ impl<'a> BlockChunker<'a> {
|
||||
|
||||
let parent_total_difficulty = parent_details.total_difficulty;
|
||||
|
||||
let mut rlp_stream = RlpStream::new_list(3 + self.rlps.len());
|
||||
let num_entries = self.rlps.len();
|
||||
let mut rlp_stream = RlpStream::new_list(3 + num_entries);
|
||||
rlp_stream.append(&parent_number).append(&parent_hash).append(&parent_total_difficulty);
|
||||
|
||||
for pair in self.rlps.drain(..) {
|
||||
@ -178,6 +224,9 @@ impl<'a> BlockChunker<'a> {
|
||||
try!(self.writer.lock().write_block_chunk(hash, compressed));
|
||||
trace!(target: "snapshot", "wrote block chunk. hash: {}, size: {}, uncompressed size: {}", hash.hex(), size, raw_data.len());
|
||||
|
||||
self.progress.size.fetch_add(size, Ordering::SeqCst);
|
||||
self.progress.blocks.fetch_add(num_entries, Ordering::SeqCst);
|
||||
|
||||
self.hashes.push(hash);
|
||||
Ok(())
|
||||
}
|
||||
@ -189,7 +238,7 @@ impl<'a> BlockChunker<'a> {
|
||||
/// The path parameter is the directory to store the block chunks in.
|
||||
/// This function assumes the directory exists already.
|
||||
/// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis.
|
||||
pub fn chunk_blocks<'a>(chain: &'a BlockChain, start_block_info: (u64, H256), writer: &Mutex<SnapshotWriter + 'a>) -> Result<Vec<H256>, Error> {
|
||||
pub fn chunk_blocks<'a>(chain: &'a BlockChain, start_block_info: (u64, H256), writer: &Mutex<SnapshotWriter + 'a>, progress: &'a Progress) -> Result<Vec<H256>, Error> {
|
||||
let (start_number, start_hash) = start_block_info;
|
||||
|
||||
let first_hash = if start_number < SNAPSHOT_BLOCKS {
|
||||
@ -197,8 +246,7 @@ pub fn chunk_blocks<'a>(chain: &'a BlockChain, start_block_info: (u64, H256), wr
|
||||
chain.genesis_hash()
|
||||
} else {
|
||||
let first_num = start_number - SNAPSHOT_BLOCKS;
|
||||
chain.block_hash(first_num)
|
||||
.expect("number before best block number; whole chain is stored; qed")
|
||||
try!(chain.block_hash(first_num).ok_or(Error::IncompleteChain))
|
||||
};
|
||||
|
||||
let mut chunker = BlockChunker {
|
||||
@ -208,6 +256,7 @@ pub fn chunk_blocks<'a>(chain: &'a BlockChain, start_block_info: (u64, H256), wr
|
||||
hashes: Vec::new(),
|
||||
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
|
||||
writer: writer,
|
||||
progress: progress,
|
||||
};
|
||||
|
||||
try!(chunker.chunk_all(first_hash));
|
||||
@ -222,6 +271,7 @@ struct StateChunker<'a> {
|
||||
cur_size: usize,
|
||||
snappy_buffer: Vec<u8>,
|
||||
writer: &'a Mutex<SnapshotWriter + 'a>,
|
||||
progress: &'a Progress,
|
||||
}
|
||||
|
||||
impl<'a> StateChunker<'a> {
|
||||
@ -249,7 +299,8 @@ impl<'a> StateChunker<'a> {
|
||||
// Write out the buffer to disk, pushing the created chunk's hash to
|
||||
// the list.
|
||||
fn write_chunk(&mut self) -> Result<(), Error> {
|
||||
let mut stream = RlpStream::new_list(self.rlps.len());
|
||||
let num_entries = self.rlps.len();
|
||||
let mut stream = RlpStream::new_list(num_entries);
|
||||
for rlp in self.rlps.drain(..) {
|
||||
stream.append_raw(&rlp, 1);
|
||||
}
|
||||
@ -263,6 +314,9 @@ impl<'a> StateChunker<'a> {
|
||||
try!(self.writer.lock().write_state_chunk(hash, compressed));
|
||||
trace!(target: "snapshot", "wrote state chunk. size: {}, uncompressed size: {}", compressed_size, raw_data.len());
|
||||
|
||||
self.progress.accounts.fetch_add(num_entries, Ordering::SeqCst);
|
||||
self.progress.size.fetch_add(compressed_size, Ordering::SeqCst);
|
||||
|
||||
self.hashes.push(hash);
|
||||
self.cur_size = 0;
|
||||
|
||||
@ -275,7 +329,7 @@ impl<'a> StateChunker<'a> {
|
||||
///
|
||||
/// Returns a list of hashes of chunks created, or any error it may
|
||||
/// have encountered.
|
||||
pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter + 'a>) -> Result<Vec<H256>, Error> {
|
||||
pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter + 'a>, progress: &'a Progress) -> Result<Vec<H256>, Error> {
|
||||
let account_trie = try!(TrieDB::new(db, &root));
|
||||
|
||||
let mut chunker = StateChunker {
|
||||
@ -284,10 +338,9 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter +
|
||||
cur_size: 0,
|
||||
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
|
||||
writer: writer,
|
||||
progress: progress,
|
||||
};
|
||||
|
||||
trace!(target: "snapshot", "beginning state chunking");
|
||||
|
||||
// account_key here is the address' hash.
|
||||
for (account_key, account_data) in account_trie.iter() {
|
||||
let account = Account::from_thin_rlp(account_data);
|
||||
@ -383,6 +436,7 @@ impl StateRebuilder {
|
||||
let chunk_size = account_fat_rlps.len() / ::num_cpus::get() + 1;
|
||||
|
||||
// build account tries in parallel.
|
||||
// Todo [rob] keep a thread pool around so we don't do this per-chunk.
|
||||
try!(scope(|scope| {
|
||||
let mut handles = Vec::new();
|
||||
for (account_chunk, out_pairs_chunk) in account_fat_rlps.chunks(chunk_size).zip(pairs.chunks_mut(chunk_size)) {
|
||||
|
@ -20,7 +20,7 @@ use devtools::RandomTempPath;
|
||||
|
||||
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
|
||||
use blockchain::BlockChain;
|
||||
use snapshot::{chunk_blocks, BlockRebuilder};
|
||||
use snapshot::{chunk_blocks, BlockRebuilder, Progress};
|
||||
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
|
||||
|
||||
use util::{Mutex, snappy};
|
||||
@ -55,7 +55,7 @@ fn chunk_and_restore(amount: u64) {
|
||||
|
||||
// snapshot it.
|
||||
let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap());
|
||||
let block_hashes = chunk_blocks(&bc, (amount, best_hash), &writer).unwrap();
|
||||
let block_hashes = chunk_blocks(&bc, (amount, best_hash), &writer, &Progress::new()).unwrap();
|
||||
writer.into_inner().finish(::snapshot::ManifestData {
|
||||
state_hashes: Vec::new(),
|
||||
block_hashes: block_hashes,
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
//! State snapshotting tests.
|
||||
|
||||
use snapshot::{chunk_state, StateRebuilder};
|
||||
use snapshot::{chunk_state, Progress, StateRebuilder};
|
||||
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
|
||||
use super::helpers::{compare_dbs, StateProducer};
|
||||
|
||||
@ -48,7 +48,7 @@ fn snap_and_restore() {
|
||||
let state_root = producer.state_root();
|
||||
let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap());
|
||||
|
||||
let state_hashes = chunk_state(&old_db, &state_root, &writer).unwrap();
|
||||
let state_hashes = chunk_state(&old_db, &state_root, &writer, &Progress::new()).unwrap();
|
||||
|
||||
writer.into_inner().finish(::snapshot::ManifestData {
|
||||
state_hashes: state_hashes,
|
||||
|
@ -40,7 +40,8 @@ pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Res
|
||||
try!(verify_header(&header, engine));
|
||||
try!(verify_block_integrity(bytes, &header.transactions_root, &header.uncles_hash));
|
||||
try!(engine.verify_block_basic(&header, Some(bytes)));
|
||||
for u in Rlp::new(bytes).at(2).iter().map(|rlp| rlp.as_val::<Header>()) {
|
||||
for u in try!(UntrustedRlp::new(bytes).at(2)).iter().map(|rlp| rlp.as_val::<Header>()) {
|
||||
let u = try!(u);
|
||||
try!(verify_header(&u, engine));
|
||||
try!(engine.verify_block_basic(&u, None));
|
||||
}
|
||||
@ -58,8 +59,8 @@ pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Res
|
||||
/// Returns a `PreverifiedBlock` structure populated with transactions
|
||||
pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> Result<PreverifiedBlock, Error> {
|
||||
try!(engine.verify_block_unordered(&header, Some(&bytes)));
|
||||
for u in Rlp::new(&bytes).at(2).iter().map(|rlp| rlp.as_val::<Header>()) {
|
||||
try!(engine.verify_block_unordered(&u, None));
|
||||
for u in try!(UntrustedRlp::new(&bytes).at(2)).iter().map(|rlp| rlp.as_val::<Header>()) {
|
||||
try!(engine.verify_block_unordered(&try!(u), None));
|
||||
}
|
||||
// Verify transactions.
|
||||
let mut transactions = Vec::new();
|
||||
@ -84,7 +85,7 @@ pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: &
|
||||
try!(verify_parent(&header, &parent));
|
||||
try!(engine.verify_block_family(&header, &parent, Some(bytes)));
|
||||
|
||||
let num_uncles = Rlp::new(bytes).at(2).item_count();
|
||||
let num_uncles = try!(UntrustedRlp::new(bytes).at(2)).item_count();
|
||||
if num_uncles != 0 {
|
||||
if num_uncles > engine.maximum_uncle_count() {
|
||||
return Err(From::from(BlockError::TooManyUncles(OutOfBounds { min: None, max: Some(engine.maximum_uncle_count()), found: num_uncles })));
|
||||
@ -106,7 +107,8 @@ pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: &
|
||||
}
|
||||
}
|
||||
|
||||
for uncle in Rlp::new(bytes).at(2).iter().map(|rlp| rlp.as_val::<Header>()) {
|
||||
for uncle in try!(UntrustedRlp::new(bytes).at(2)).iter().map(|rlp| rlp.as_val::<Header>()) {
|
||||
let uncle = try!(uncle);
|
||||
if excluded.contains(&uncle.hash()) {
|
||||
return Err(From::from(BlockError::UncleInChain(uncle.hash())))
|
||||
}
|
||||
@ -210,13 +212,13 @@ fn verify_parent(header: &Header, parent: &Header) -> Result<(), Error> {
|
||||
|
||||
/// Verify block data against header: transactions root and uncles hash.
|
||||
fn verify_block_integrity(block: &[u8], transactions_root: &H256, uncles_hash: &H256) -> Result<(), Error> {
|
||||
let block = Rlp::new(block);
|
||||
let tx = block.at(1);
|
||||
let block = UntrustedRlp::new(block);
|
||||
let tx = try!(block.at(1));
|
||||
let expected_root = &ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec()).collect()); //TODO: get rid of vectors here
|
||||
if expected_root != transactions_root {
|
||||
return Err(From::from(BlockError::InvalidTransactionsRoot(Mismatch { expected: expected_root.clone(), found: transactions_root.clone() })))
|
||||
}
|
||||
let expected_uncles = &block.at(2).as_raw().sha3();
|
||||
let expected_uncles = &try!(block.at(2)).as_raw().sha3();
|
||||
if expected_uncles != uncles_hash {
|
||||
return Err(From::from(BlockError::InvalidUnclesHash(Mismatch { expected: expected_uncles.clone(), found: uncles_hash.clone() })))
|
||||
}
|
||||
|
@ -48,10 +48,10 @@ macro_rules! impl_hash {
|
||||
0 => $inner::from(0),
|
||||
2 if value == "0x" => $inner::from(0),
|
||||
_ if value.starts_with("0x") => try!($inner::from_str(&value[2..]).map_err(|_| {
|
||||
Error::custom(format!("Invalid hex value {}.", value).as_ref())
|
||||
Error::custom(format!("Invalid hex value {}.", value).as_str())
|
||||
})),
|
||||
_ => try!($inner::from_str(value).map_err(|_| {
|
||||
Error::custom(format!("Invalid hex value {}.", value).as_ref())
|
||||
Error::custom(format!("Invalid hex value {}.", value).as_str())
|
||||
}))
|
||||
};
|
||||
|
||||
|
@ -70,10 +70,10 @@ impl Visitor for UintVisitor {
|
||||
0 => U256::from(0),
|
||||
2 if value.starts_with("0x") => U256::from(0),
|
||||
_ if value.starts_with("0x") => try!(U256::from_str(&value[2..]).map_err(|_| {
|
||||
Error::custom(format!("Invalid hex value {}.", value).as_ref())
|
||||
Error::custom(format!("Invalid hex value {}.", value).as_str())
|
||||
})),
|
||||
_ => try!(U256::from_dec_str(value).map_err(|_| {
|
||||
Error::custom(format!("Invalid decimal value {}.", value).as_ref())
|
||||
Error::custom(format!("Invalid decimal value {}.", value).as_str())
|
||||
}))
|
||||
};
|
||||
|
||||
|
@ -231,6 +231,12 @@ Import/Export Options:
|
||||
--format FORMAT For import/export in given format. FORMAT must be
|
||||
one of 'hex' and 'binary'.
|
||||
|
||||
Snapshot Options:
|
||||
--at BLOCK Take a snapshot at the given block, which may be an
|
||||
index, hash, or 'latest'. Note that taking snapshots at
|
||||
non-recent blocks will only work with --pruning archive
|
||||
[default: latest]
|
||||
|
||||
Virtual Machine Options:
|
||||
--jitvm Enable the JIT VM.
|
||||
|
||||
@ -365,6 +371,7 @@ pub struct Args {
|
||||
pub flag_version: bool,
|
||||
pub flag_from: String,
|
||||
pub flag_to: String,
|
||||
pub flag_at: String,
|
||||
pub flag_format: Option<String>,
|
||||
pub flag_jitvm: bool,
|
||||
pub flag_log_file: Option<String>,
|
||||
|
@ -171,6 +171,7 @@ impl Configuration {
|
||||
file_path: self.args.arg_file.clone(),
|
||||
wal: wal,
|
||||
kind: snapshot::Kind::Take,
|
||||
block_at: try!(to_block_id(&self.args.flag_at)),
|
||||
};
|
||||
Cmd::Snapshot(snapshot_cmd)
|
||||
} else if self.args.cmd_restore {
|
||||
@ -186,6 +187,7 @@ impl Configuration {
|
||||
file_path: self.args.arg_file.clone(),
|
||||
wal: wal,
|
||||
kind: snapshot::Kind::Restore,
|
||||
block_at: try!(to_block_id("latest")), // unimportant.
|
||||
};
|
||||
Cmd::Snapshot(restore_cmd)
|
||||
} else {
|
||||
|
@ -19,12 +19,15 @@
|
||||
use std::time::Duration;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use ethcore_logger::{setup_log, Config as LogConfig};
|
||||
use ethcore::snapshot::{RestorationStatus, SnapshotService};
|
||||
use ethcore::snapshot::{Progress, RestorationStatus, SnapshotService};
|
||||
use ethcore::snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
|
||||
use ethcore::service::ClientService;
|
||||
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType};
|
||||
use ethcore::miner::Miner;
|
||||
use ethcore::ids::BlockID;
|
||||
|
||||
use cache::CacheConfig;
|
||||
use params::{SpecType, Pruning};
|
||||
use helpers::{to_client_config, execute_upgrades};
|
||||
@ -56,6 +59,7 @@ pub struct SnapshotCommand {
|
||||
pub file_path: Option<String>,
|
||||
pub wal: bool,
|
||||
pub kind: Kind,
|
||||
pub block_at: BlockID,
|
||||
}
|
||||
|
||||
impl SnapshotCommand {
|
||||
@ -168,6 +172,7 @@ impl SnapshotCommand {
|
||||
pub fn take_snapshot(self) -> Result<(), String> {
|
||||
let file_path = try!(self.file_path.clone().ok_or("No file path provided.".to_owned()));
|
||||
let file_path: PathBuf = file_path.into();
|
||||
let block_at = self.block_at.clone();
|
||||
let (service, _panic_handler) = try!(self.start_service());
|
||||
|
||||
warn!("Snapshots are currently experimental. File formats may be subject to change.");
|
||||
@ -175,11 +180,35 @@ impl SnapshotCommand {
|
||||
let writer = try!(PackedWriter::new(&file_path)
|
||||
.map_err(|e| format!("Failed to open snapshot writer: {}", e)));
|
||||
|
||||
if let Err(e) = service.client().take_snapshot(writer) {
|
||||
let progress = Arc::new(Progress::new());
|
||||
let p = progress.clone();
|
||||
let informant_handle = ::std::thread::spawn(move || {
|
||||
::std::thread::sleep(Duration::from_secs(5));
|
||||
|
||||
let mut last_size = 0;
|
||||
while !p.done() {
|
||||
let cur_size = p.size();
|
||||
if cur_size != last_size {
|
||||
last_size = cur_size;
|
||||
info!("Snapshot: {} accounts {} blocks {} bytes", p.accounts(), p.blocks(), p.size());
|
||||
} else {
|
||||
info!("Snapshot: No progress since last update.");
|
||||
}
|
||||
|
||||
::std::thread::sleep(Duration::from_secs(5));
|
||||
}
|
||||
});
|
||||
|
||||
if let Err(e) = service.client().take_snapshot(writer, block_at, &*progress) {
|
||||
let _ = ::std::fs::remove_file(&file_path);
|
||||
return Err(format!("Encountered fatal error while creating snapshot: {}", e));
|
||||
}
|
||||
|
||||
info!("snapshot creation complete");
|
||||
|
||||
assert!(progress.done());
|
||||
try!(informant_handle.join().map_err(|_| "failed to join logger thread"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ rand = "0.3.14"
|
||||
jsonrpc-core = "2.0"
|
||||
log = "0.3"
|
||||
env_logger = "0.3"
|
||||
ws = { git = "https://github.com/ethcore/ws-rs.git", branch = "stable" }
|
||||
ws = { git = "https://github.com/ethcore/ws-rs.git", branch = "mio-upstream-stable" }
|
||||
ethcore-util = { path = "../util" }
|
||||
ethcore-io = { path = "../util/io" }
|
||||
ethcore-rpc = { path = "../rpc" }
|
||||
|
@ -46,7 +46,7 @@ impl TimeProvider for DefaultTimeProvider {
|
||||
}
|
||||
|
||||
/// No of seconds the hash is valid
|
||||
const TIME_THRESHOLD: u64 = 2;
|
||||
const TIME_THRESHOLD: u64 = 7;
|
||||
const TOKEN_LENGTH: usize = 16;
|
||||
|
||||
/// Manages authorization codes for `SignerUIs`
|
||||
@ -102,7 +102,7 @@ impl<T: TimeProvider> AuthCodes<T> {
|
||||
let now = self.now.now();
|
||||
// check time
|
||||
if time >= now + TIME_THRESHOLD || time <= now - TIME_THRESHOLD {
|
||||
warn!(target: "signer", "Received old authentication request.");
|
||||
warn!(target: "signer", "Received old authentication request. ({} vs {})", now, time);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -169,8 +169,8 @@ mod tests {
|
||||
fn should_return_false_if_hash_is_valid_but_time_is_invalid() {
|
||||
// given
|
||||
let code = "23521352asdfasdfadf";
|
||||
let time = 105;
|
||||
let time2 = 95;
|
||||
let time = 107;
|
||||
let time2 = 93;
|
||||
let codes = AuthCodes::new(vec![code.into()], || 100);
|
||||
|
||||
// when
|
||||
|
@ -146,7 +146,9 @@ impl ws::Handler for Session {
|
||||
fn on_message(&mut self, msg: ws::Message) -> ws::Result<()> {
|
||||
let req = try!(msg.as_text());
|
||||
match self.handler.handle_request(req) {
|
||||
Some(res) => self.out.send(res),
|
||||
Some(res) => {
|
||||
self.out.send(res)
|
||||
},
|
||||
None => Ok(()),
|
||||
}
|
||||
}
|
||||
|
@ -199,6 +199,16 @@ enum PeerAsking {
|
||||
Heads,
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, PartialEq)]
|
||||
enum ForkConfirmation {
|
||||
/// Fork block confirmation pending.
|
||||
Unconfirmed,
|
||||
/// Peers chain is too short to confirm the fork.
|
||||
TooShort,
|
||||
/// Fork is confurmed.
|
||||
Confirmed,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// Syncing peer information
|
||||
struct PeerInfo {
|
||||
@ -224,13 +234,17 @@ struct PeerInfo {
|
||||
ask_time: f64,
|
||||
/// Pending request is expird and result should be ignored
|
||||
expired: bool,
|
||||
/// Peer fork confirmed
|
||||
confirmed: bool,
|
||||
/// Peer fork confirmation status
|
||||
confirmation: ForkConfirmation,
|
||||
}
|
||||
|
||||
impl PeerInfo {
|
||||
fn is_available(&self) -> bool {
|
||||
self.confirmed && !self.expired
|
||||
fn can_sync(&self) -> bool {
|
||||
self.confirmation == ForkConfirmation::Confirmed && !self.expired
|
||||
}
|
||||
|
||||
fn is_allowed(&self) -> bool {
|
||||
self.confirmation != ForkConfirmation::Unconfirmed && !self.expired
|
||||
}
|
||||
}
|
||||
|
||||
@ -307,8 +321,8 @@ impl ChainSync {
|
||||
highest_block_number: self.highest_block.map(|n| max(n, self.last_imported_block)),
|
||||
blocks_received: if self.last_imported_block > self.starting_block { self.last_imported_block - self.starting_block } else { 0 },
|
||||
blocks_total: match self.highest_block { Some(x) if x > self.starting_block => x - self.starting_block, _ => 0 },
|
||||
num_peers: self.peers.values().filter(|p| p.confirmed).count(),
|
||||
num_active_peers: self.peers.values().filter(|p| p.confirmed && p.asking != PeerAsking::Nothing).count(),
|
||||
num_peers: self.peers.values().filter(|p| p.is_allowed()).count(),
|
||||
num_active_peers: self.peers.values().filter(|p| p.is_allowed() && p.asking != PeerAsking::Nothing).count(),
|
||||
mem_used:
|
||||
self.blocks.heap_size()
|
||||
+ self.peers.heap_size_of_children()
|
||||
@ -330,7 +344,7 @@ impl ChainSync {
|
||||
p.asking_blocks.clear();
|
||||
p.asking_hash = None;
|
||||
// mark any pending requests as expired
|
||||
if p.asking != PeerAsking::Nothing && p.confirmed {
|
||||
if p.asking != PeerAsking::Nothing && p.is_allowed() {
|
||||
p.expired = true;
|
||||
}
|
||||
}
|
||||
@ -384,7 +398,7 @@ impl ChainSync {
|
||||
asking_hash: None,
|
||||
ask_time: 0f64,
|
||||
expired: false,
|
||||
confirmed: self.fork_block.is_none(),
|
||||
confirmation: if self.fork_block.is_none() { ForkConfirmation::Confirmed } else { ForkConfirmation::Unconfirmed },
|
||||
};
|
||||
|
||||
trace!(target: "sync", "New peer {} (protocol: {}, network: {:?}, difficulty: {:?}, latest:{}, genesis:{})", peer_id, peer.protocol_version, peer.network_id, peer.difficulty, peer.latest_hash, peer.genesis);
|
||||
@ -427,14 +441,19 @@ impl ChainSync {
|
||||
Some(ref mut peer) if peer.asking == PeerAsking::ForkHeader => {
|
||||
let item_count = r.item_count();
|
||||
if item_count == 0 || (item_count == 1 && try!(r.at(0)).as_raw().sha3() == self.fork_block.unwrap().1) {
|
||||
trace!(target: "sync", "{}: Confirmed peer", peer_id);
|
||||
peer.asking = PeerAsking::Nothing;
|
||||
peer.confirmed = true;
|
||||
if item_count == 0 {
|
||||
trace!(target: "sync", "{}: Chain is too short to confirm the block", peer_id);
|
||||
peer.confirmation = ForkConfirmation::TooShort;
|
||||
} else {
|
||||
trace!(target: "sync", "{}: Confirmed peer", peer_id);
|
||||
peer.confirmation = ForkConfirmation::Confirmed;
|
||||
}
|
||||
true
|
||||
} else {
|
||||
trace!(target: "sync", "{}: Fork mismatch", peer_id);
|
||||
io.disconnect_peer(peer_id);
|
||||
false
|
||||
return Ok(());
|
||||
}
|
||||
},
|
||||
_ => false,
|
||||
@ -586,7 +605,7 @@ impl ChainSync {
|
||||
/// Called by peer once it has new block bodies
|
||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||
fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
||||
if !self.peers.get(&peer_id).map_or(false, |p| p.confirmed) {
|
||||
if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) {
|
||||
trace!(target: "sync", "Ignoring new block from unconfirmed peer {}", peer_id);
|
||||
return Ok(());
|
||||
}
|
||||
@ -654,7 +673,7 @@ impl ChainSync {
|
||||
|
||||
/// Handles `NewHashes` packet. Initiates headers download for any unknown hashes.
|
||||
fn on_peer_new_hashes(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
||||
if !self.peers.get(&peer_id).map_or(false, |p| p.confirmed) {
|
||||
if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) {
|
||||
trace!(target: "sync", "Ignoring new hashes from unconfirmed peer {}", peer_id);
|
||||
return Ok(());
|
||||
}
|
||||
@ -741,7 +760,7 @@ impl ChainSync {
|
||||
/// Resume downloading
|
||||
fn continue_sync(&mut self, io: &mut SyncIo) {
|
||||
let mut peers: Vec<(PeerId, U256)> = self.peers.iter().filter_map(|(k, p)|
|
||||
if p.is_available() { Some((*k, p.difficulty.unwrap_or_else(U256::zero))) } else { None }).collect();
|
||||
if p.can_sync() { Some((*k, p.difficulty.unwrap_or_else(U256::zero))) } else { None }).collect();
|
||||
thread_rng().shuffle(&mut peers); //TODO: sort by rating
|
||||
trace!(target: "sync", "Syncing with {}/{} peers", self.active_peers.len(), peers.len());
|
||||
for (p, _) in peers {
|
||||
@ -749,7 +768,7 @@ impl ChainSync {
|
||||
self.sync_peer(io, p, false);
|
||||
}
|
||||
}
|
||||
if self.state != SyncState::Waiting && !self.peers.values().any(|p| p.asking != PeerAsking::Nothing && p.is_available()) {
|
||||
if self.state != SyncState::Waiting && !self.peers.values().any(|p| p.asking != PeerAsking::Nothing && p.can_sync()) {
|
||||
self.complete_sync();
|
||||
}
|
||||
}
|
||||
@ -775,7 +794,7 @@ impl ChainSync {
|
||||
}
|
||||
let (peer_latest, peer_difficulty) = {
|
||||
let peer = self.peers.get_mut(&peer_id).unwrap();
|
||||
if peer.asking != PeerAsking::Nothing || !peer.is_available() {
|
||||
if peer.asking != PeerAsking::Nothing || !peer.can_sync() {
|
||||
return;
|
||||
}
|
||||
if self.state == SyncState::Waiting {
|
||||
@ -1037,7 +1056,7 @@ impl ChainSync {
|
||||
if !io.is_chain_queue_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
if !self.peers.get(&peer_id).map_or(false, |p| p.confirmed) {
|
||||
if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) {
|
||||
trace!(target: "sync", "{} Ignoring transactions from unconfirmed/unknown peer", peer_id);
|
||||
}
|
||||
|
||||
@ -1357,27 +1376,23 @@ impl ChainSync {
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn select_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo) -> Vec<(PeerId, BlockNumber)> {
|
||||
fn select_random_lagging_peers(&mut self, peers: &[(PeerId, BlockNumber)]) -> Vec<(PeerId, BlockNumber)> {
|
||||
use rand::Rng;
|
||||
let mut lagging_peers = self.get_lagging_peers(chain_info, io);
|
||||
// take sqrt(x) peers
|
||||
let mut peers = peers.to_vec();
|
||||
let mut count = (self.peers.len() as f64).powf(0.5).round() as usize;
|
||||
count = min(count, MAX_PEERS_PROPAGATION);
|
||||
count = max(count, MIN_PEERS_PROPAGATION);
|
||||
::rand::thread_rng().shuffle(&mut lagging_peers);
|
||||
lagging_peers.into_iter().take(count).collect::<Vec<_>>()
|
||||
::rand::thread_rng().shuffle(&mut peers);
|
||||
peers.truncate(count);
|
||||
peers
|
||||
}
|
||||
|
||||
/// propagates latest block to lagging peers
|
||||
fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, sealed: &[H256]) -> usize {
|
||||
let lucky_peers: Vec<_> = if sealed.is_empty() {
|
||||
self.select_lagging_peers(chain_info, io).iter().map(|&(id, _)| id).collect()
|
||||
} else {
|
||||
self.peers.keys().cloned().collect()
|
||||
};
|
||||
trace!(target: "sync", "Sending NewBlocks to {:?}", lucky_peers);
|
||||
fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, sealed: &[H256], peers: &[(PeerId, BlockNumber)]) -> usize {
|
||||
trace!(target: "sync", "Sending NewBlocks to {:?}", peers);
|
||||
let mut sent = 0;
|
||||
for peer_id in lucky_peers {
|
||||
for &(peer_id, _) in peers {
|
||||
if sealed.is_empty() {
|
||||
let rlp = ChainSync::create_latest_block_rlp(io.chain());
|
||||
self.send_packet(io, peer_id, NEW_BLOCK_PACKET, rlp);
|
||||
@ -1395,12 +1410,11 @@ impl ChainSync {
|
||||
}
|
||||
|
||||
/// propagates new known hashes to all peers
|
||||
fn propagate_new_hashes(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo) -> usize {
|
||||
let lucky_peers = self.select_lagging_peers(chain_info, io);
|
||||
trace!(target: "sync", "Sending NewHashes to {:?}", lucky_peers);
|
||||
fn propagate_new_hashes(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, peers: &[(PeerId, BlockNumber)]) -> usize {
|
||||
trace!(target: "sync", "Sending NewHashes to {:?}", peers);
|
||||
let mut sent = 0;
|
||||
let last_parent = HeaderView::new(&io.chain().block_header(BlockID::Hash(chain_info.best_block_hash.clone())).unwrap()).parent_hash();
|
||||
for (peer_id, peer_number) in lucky_peers {
|
||||
for &(peer_id, peer_number) in peers {
|
||||
let peer_best = if chain_info.best_block_number - peer_number > MAX_PEER_LAG_PROPAGATION as BlockNumber {
|
||||
// If we think peer is too far behind just send one latest hash
|
||||
last_parent.clone()
|
||||
@ -1466,11 +1480,19 @@ impl ChainSync {
|
||||
fn propagate_latest_blocks(&mut self, io: &mut SyncIo, sealed: &[H256]) {
|
||||
let chain_info = io.chain().chain_info();
|
||||
if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
|
||||
let hashes = self.propagate_new_hashes(&chain_info, io);
|
||||
let blocks = self.propagate_blocks(&chain_info, io, sealed);
|
||||
if blocks != 0 || hashes != 0 {
|
||||
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
|
||||
}
|
||||
let mut peers = self.get_lagging_peers(&chain_info, io);
|
||||
if sealed.is_empty() {
|
||||
let hashes = self.propagate_new_hashes(&chain_info, io, &peers);
|
||||
peers = self.select_random_lagging_peers(&peers);
|
||||
let blocks = self.propagate_blocks(&chain_info, io, sealed, &peers);
|
||||
if blocks != 0 || hashes != 0 {
|
||||
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
|
||||
}
|
||||
} else {
|
||||
self.propagate_blocks(&chain_info, io, sealed, &peers);
|
||||
self.propagate_new_hashes(&chain_info, io, &peers);
|
||||
trace!(target: "sync", "Sent sealed block to all peers");
|
||||
};
|
||||
}
|
||||
self.propagate_new_transactions(io);
|
||||
self.last_sent_block_number = chain_info.best_block_number;
|
||||
@ -1693,7 +1715,7 @@ mod tests {
|
||||
asking_hash: None,
|
||||
ask_time: 0f64,
|
||||
expired: false,
|
||||
confirmed: true,
|
||||
confirmation: super::ForkConfirmation::Confirmed,
|
||||
});
|
||||
sync
|
||||
}
|
||||
@ -1738,7 +1760,8 @@ mod tests {
|
||||
let chain_info = client.chain_info();
|
||||
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||
|
||||
let peer_count = sync.propagate_new_hashes(&chain_info, &mut io);
|
||||
let peers = sync.get_lagging_peers(&chain_info, &mut io);
|
||||
let peer_count = sync.propagate_new_hashes(&chain_info, &mut io, &peers);
|
||||
|
||||
// 1 message should be send
|
||||
assert_eq!(1, io.queue.len());
|
||||
@ -1756,7 +1779,8 @@ mod tests {
|
||||
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
|
||||
let chain_info = client.chain_info();
|
||||
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||
let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[]);
|
||||
let peers = sync.get_lagging_peers(&chain_info, &mut io);
|
||||
let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[], &peers);
|
||||
|
||||
// 1 message should be send
|
||||
assert_eq!(1, io.queue.len());
|
||||
@ -1775,7 +1799,8 @@ mod tests {
|
||||
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
|
||||
let chain_info = client.chain_info();
|
||||
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||
let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[hash.clone()]);
|
||||
let peers = sync.get_lagging_peers(&chain_info, &mut io);
|
||||
let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[hash.clone()], &peers);
|
||||
|
||||
// 1 message should be send
|
||||
assert_eq!(1, io.queue.len());
|
||||
@ -1881,7 +1906,8 @@ mod tests {
|
||||
let chain_info = client.chain_info();
|
||||
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||
|
||||
sync.propagate_new_hashes(&chain_info, &mut io);
|
||||
let peers = sync.get_lagging_peers(&chain_info, &mut io);
|
||||
sync.propagate_new_hashes(&chain_info, &mut io, &peers);
|
||||
|
||||
let data = &io.queue[0].data.clone();
|
||||
let result = sync.on_peer_new_hashes(&mut io, 0, &UntrustedRlp::new(data));
|
||||
@ -1899,7 +1925,8 @@ mod tests {
|
||||
let chain_info = client.chain_info();
|
||||
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||
|
||||
sync.propagate_blocks(&chain_info, &mut io, &[]);
|
||||
let peers = sync.get_lagging_peers(&chain_info, &mut io);
|
||||
sync.propagate_blocks(&chain_info, &mut io, &[], &peers);
|
||||
|
||||
let data = &io.queue[0].data.clone();
|
||||
let result = sync.on_peer_new_block(&mut io, 0, &UntrustedRlp::new(data));
|
||||
|
@ -161,11 +161,11 @@ fn propagate_hashes() {
|
||||
net.trigger_chain_new_blocks(0); //first event just sets the marker
|
||||
net.trigger_chain_new_blocks(0);
|
||||
|
||||
// 5 peers to sync
|
||||
assert_eq!(5, net.peer(0).queue.len());
|
||||
// 5 peers with NewHahses, 4 with blocks
|
||||
assert_eq!(9, net.peer(0).queue.len());
|
||||
let mut hashes = 0;
|
||||
let mut blocks = 0;
|
||||
for i in 0..5 {
|
||||
for i in 0..net.peer(0).queue.len() {
|
||||
if net.peer(0).queue[i].packet_id == 0x1 {
|
||||
hashes += 1;
|
||||
}
|
||||
@ -173,7 +173,8 @@ fn propagate_hashes() {
|
||||
blocks += 1;
|
||||
}
|
||||
}
|
||||
assert!(blocks + hashes == 5);
|
||||
assert_eq!(blocks, 4);
|
||||
assert_eq!(hashes, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -172,7 +172,7 @@ impl Default for DatabaseConfig {
|
||||
fn default() -> DatabaseConfig {
|
||||
DatabaseConfig {
|
||||
cache_size: None,
|
||||
max_open_files: 1024,
|
||||
max_open_files: 512,
|
||||
compaction: CompactionProfile::default(),
|
||||
columns: None,
|
||||
wal: true,
|
||||
|
Loading…
Reference in New Issue
Block a user