diff --git a/Cargo.lock b/Cargo.lock index 627fbfa69..8e3a4b168 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,7 +2,7 @@ name = "parity" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)", "daemonize 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.78 (registry+https://github.com/rust-lang/crates.io-index)", @@ -31,7 +31,7 @@ dependencies = [ [[package]] name = "arrayvec" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "nodrop 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -43,14 +43,14 @@ name = "aster" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "bigint" version = "0.1.0" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", @@ -65,7 +65,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bitflags" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -85,7 +85,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "chrono" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "num 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", @@ -94,7 +94,7 @@ dependencies = [ [[package]] name = "clippy" -version = "0.0.44" +version = "0.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -117,7 +117,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", - "url 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -136,7 +136,7 @@ version = "1.1.1" source = "git+https://github.com/tomusdrw/rust-ctrlc.git#f4927770f89eca80ec250911eea3adcbf579ac48" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -145,7 +145,7 @@ name = "daemonize" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -161,7 +161,7 @@ name = "docopt" version = "0.6.78" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "strsim 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -177,7 +177,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -185,7 +185,7 @@ name = "eth-secp256k1" version = "0.5.4" source = "git+https://github.com/ethcore/rust-secp256k1#283a0677d8327536be58a87e0494d7e0e7b1d1d8" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -207,7 +207,7 @@ dependencies = [ name = "ethcore" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 0.9.99", @@ -233,7 +233,7 @@ dependencies = [ name = "ethcore-rpc" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 0.9.99", "ethcore 0.9.99", "ethcore-util 0.9.99", @@ -253,10 +253,10 @@ dependencies = [ name = "ethcore-util" version = "0.9.99" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 0.1.0", - "chrono 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -264,14 +264,14 @@ dependencies = [ "ethcore-devtools 0.9.99", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "igd 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", "json-tests 0.1.0", "lazy_static 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rocksdb 0.4.1 (git+https://github.com/arkpar/rust-rocksdb.git)", + "rocksdb 0.4.2 (git+https://github.com/arkpar/rust-rocksdb.git)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -288,7 +288,7 @@ dependencies = [ name = "ethsync" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 0.9.99", "ethcore-util 0.9.99", @@ -304,7 +304,7 @@ dependencies = [ name = "fdlimit" version = "0.1.0" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -314,7 +314,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "glob" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -328,7 +328,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -381,7 +381,7 @@ dependencies = [ "traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -391,21 +391,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", "xml-rs 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", "xmltree 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "itertools" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "json-tests" version = "0.1.0" dependencies = [ - "glob 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -461,16 +461,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "librocksdb-sys" -version = "0.2.1" -source = "git+https://github.com/arkpar/rust-rocksdb.git#2156621f583bda95c1c07e89e79e4019f75158ee" +version = "0.2.2" +source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -478,7 +477,7 @@ name = "log" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -491,7 +490,7 @@ name = "memchr" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -537,7 +536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -561,7 +560,7 @@ dependencies = [ [[package]] name = "nom" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -578,7 +577,7 @@ name = "num_cpus" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -594,11 +593,6 @@ name = "odds" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "pkg-config" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "primal" version = "0.2.3" @@ -645,7 +639,7 @@ name = "quasi" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -655,7 +649,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aster 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -663,7 +657,7 @@ name = "rand" version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -678,7 +672,7 @@ dependencies = [ [[package]] name = "regex" -version = "0.1.54" +version = "0.1.55" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aho-corasick 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -694,11 +688,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rocksdb" -version = "0.4.1" -source = "git+https://github.com/arkpar/rust-rocksdb.git#2156621f583bda95c1c07e89e79e4019f75158ee" +version = "0.4.2" +source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "librocksdb-sys 0.2.1 (git+https://github.com/arkpar/rust-rocksdb.git)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "librocksdb-sys 0.2.2 (git+https://github.com/arkpar/rust-rocksdb.git)", ] [[package]] @@ -707,7 +701,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -747,7 +741,7 @@ name = "semver" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "nom 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "nom 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -772,7 +766,7 @@ dependencies = [ "quasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "quasi_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -815,16 +809,16 @@ name = "syntex" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "syntex_syntax" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -850,7 +844,7 @@ name = "termios" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -859,7 +853,7 @@ version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -924,7 +918,7 @@ dependencies = [ [[package]] name = "url" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -981,7 +975,7 @@ name = "xml-rs" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 196807a04..77d1e57ae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" } fdlimit = { path = "util/fdlimit" } daemonize = "0.2" number_prefix = "0.2" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } ethsync = { path = "sync" } diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index c3a3d32dc..5ef83842f 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -17,7 +17,7 @@ ethcore-util = { path = "../util" } evmjit = { path = "../evmjit", optional = true } ethash = { path = "../ethash" } num_cpus = "0.2" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } crossbeam = "0.1.5" lazy_static = "0.1" ethcore-devtools = { path = "../devtools" } diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index e7f1b2bad..026e813f5 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -13,17 +13,14 @@ pub struct AccountDB<'db> { #[inline] fn combine_key<'a>(address: &'a H256, key: &'a H256) -> H256 { - let mut addr_hash = address.sha3(); - // preserve 96 bits of original key for db lookup - addr_hash[0..12].clone_from_slice(&[0u8; 12]); - &addr_hash ^ key + address ^ key } impl<'db> AccountDB<'db> { pub fn new(db: &'db HashDB, address: &Address) -> AccountDB<'db> { AccountDB { db: db, - address: x!(address.clone()), + address: x!(address), } } } @@ -70,7 +67,7 @@ impl<'db> AccountDBMut<'db> { pub fn new(db: &'db mut HashDB, address: &Address) -> AccountDBMut<'db> { AccountDBMut { db: db, - address: x!(address.clone()), + address: x!(address), } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 4642a0103..95ee90f7d 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -116,7 +116,7 @@ pub struct Client where V: Verifier { } const HISTORY: u64 = 1200; -const CLIENT_DB_VER_STR: &'static str = "5.1"; +const CLIENT_DB_VER_STR: &'static str = "5.3"; impl Client { /// Create a new client with given spec and DB path. diff --git a/ethcore/src/extras.rs b/ethcore/src/extras.rs index f4759b040..a7c82c37c 100644 --- a/ethcore/src/extras.rs +++ b/ethcore/src/extras.rs @@ -35,13 +35,13 @@ pub enum ExtrasIndex { BlocksBlooms = 4, /// Block receipts index BlockReceipts = 5, -} +} /// trait used to write Extras data to db pub trait ExtrasWritable { /// Write extra data to db fn put_extras(&self, hash: &K, value: &T) where - T: ExtrasIndexable + Encodable, + T: ExtrasIndexable + Encodable, K: ExtrasSliceConvertable; } @@ -60,9 +60,9 @@ pub trait ExtrasReadable { impl ExtrasWritable for DBTransaction { fn put_extras(&self, hash: &K, value: &T) where - T: ExtrasIndexable + Encodable, + T: ExtrasIndexable + Encodable, K: ExtrasSliceConvertable { - + self.put(&hash.to_extras_slice(T::extras_index()), &encode(value)).unwrap() } } @@ -215,6 +215,12 @@ pub struct BlocksBlooms { pub blooms: [H2048; 16], } +impl Default for BlocksBlooms { + fn default() -> Self { + BlocksBlooms::new() + } +} + impl BlocksBlooms { pub fn new() -> Self { BlocksBlooms { blooms: unsafe { ::std::mem::zeroed() }} diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 458a94476..572cda2fa 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -25,6 +25,8 @@ #![cfg_attr(feature="dev", allow(match_bool))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(feature="dev", allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(feature="dev", allow(if_not_else))] //! Ethcore library //! diff --git a/ethcore/src/substate.rs b/ethcore/src/substate.rs index 374397ca7..57e35ad2e 100644 --- a/ethcore/src/substate.rs +++ b/ethcore/src/substate.rs @@ -31,6 +31,12 @@ pub struct Substate { pub contracts_created: Vec
} +impl Default for Substate { + fn default() -> Self { + Substate::new() + } +} + impl Substate { /// Creates new substate. pub fn new() -> Self { @@ -67,8 +73,8 @@ mod tests { let mut sub_state = Substate::new(); sub_state.contracts_created.push(address_from_u64(1u64)); sub_state.logs.push(LogEntry { - address: address_from_u64(1u64), - topics: vec![], + address: address_from_u64(1u64), + topics: vec![], data: vec![] }); sub_state.sstore_clears_count = x!(5); @@ -77,8 +83,8 @@ mod tests { let mut sub_state_2 = Substate::new(); sub_state_2.contracts_created.push(address_from_u64(2u64)); sub_state_2.logs.push(LogEntry { - address: address_from_u64(1u64), - topics: vec![], + address: address_from_u64(1u64), + topics: vec![], data: vec![] }); sub_state_2.sstore_clears_count = x!(7); diff --git a/parity/main.rs b/parity/main.rs index e927e179e..bbcb17bae 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -81,10 +81,9 @@ Protocol Options: --testnet Equivalent to --chain testnet (geth-compatible). --networkid INDEX Override the network identifier from the chain we are on. --pruning METHOD Configure pruning of the state/storage trie. METHOD may be one of: archive, - light (experimental) [default: archive]. + light (experimental), fast (experimental) [default: archive]. -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] - --pruning Client should prune the state/storage trie. --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --identity NAME Specify your node's name. @@ -104,7 +103,8 @@ API and Console Options: --jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545]. --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited - list of API name. Possible names are web3, eth and net. [default: web3,eth,net]. + list of API name. Possible name are web3, eth and net. [default: web3,eth,net,personal]. + --rpc Equivalent to --jsonrpc (geth-compatible). --rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible). --rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible). @@ -231,6 +231,7 @@ fn setup_rpc_server(client: Arc, sync: Arc, secret_store: Arc server.add_delegate(PersonalClient::new(&secret_store).to_delegate()), _ => { die!("{}: Invalid API name to be enabled.", api); } @@ -423,7 +424,7 @@ impl Configuration { "" => journaldb::Algorithm::Archive, "archive" => journaldb::Algorithm::Archive, "pruned" => journaldb::Algorithm::EarlyMerge, -// "fast" => journaldb::Algorithm::OverlayRecent, // TODO: @arkpar uncomment this once option 2 is merged. + "fast" => journaldb::Algorithm::OverlayRecent, // "slow" => journaldb::Algorithm::RefCounted, // TODO: @gavofyork uncomment this once ref-count algo is merged. _ => { die!("Invalid pruning method given."); } }; diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index f324aba10..a1f154ca8 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -18,7 +18,7 @@ ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } ethash = { path = "../ethash" } ethsync = { path = "../sync" } -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } rustc-serialize = "0.3" transient-hashmap = "0.1" serde_macros = { version = "0.7.0", optional = true } diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index 7b79ceae7..ce200244c 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -22,20 +22,20 @@ use util::keys::store::*; use util::Address; /// Account management (personal) rpc implementation. -pub struct PersonalClient { - accounts: Weak, +pub struct PersonalClient where A: AccountProvider { + accounts: Weak, } -impl PersonalClient { +impl PersonalClient where A: AccountProvider { /// Creates new PersonalClient - pub fn new(store: &Arc) -> Self { + pub fn new(store: &Arc) -> Self { PersonalClient { accounts: Arc::downgrade(store), } } } -impl Personal for PersonalClient { +impl Personal for PersonalClient where A: AccountProvider + 'static { fn accounts(&self, _: Params) -> Result { let store = take_weak!(self.accounts); match store.accounts() { diff --git a/rpc/src/v1/tests/mod.rs b/rpc/src/v1/tests/mod.rs index 5ef74987c..3a38ced15 100644 --- a/rpc/src/v1/tests/mod.rs +++ b/rpc/src/v1/tests/mod.rs @@ -17,4 +17,5 @@ //!TODO: load custom blockchain state and test mod net; +mod web3; mod helpers; diff --git a/rpc/src/v1/tests/net.rs b/rpc/src/v1/tests/net.rs index 792e469d8..e24045ca6 100644 --- a/rpc/src/v1/tests/net.rs +++ b/rpc/src/v1/tests/net.rs @@ -36,7 +36,7 @@ fn rpc_net_version() { let request = r#"{"jsonrpc": "2.0", "method": "net_version", "params": [], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":"65","id":1}"#; - assert_eq!(io.handle_request(request), Some(response.to_string())); + assert_eq!(io.handle_request(request), Some(response.to_owned())); } #[test] @@ -49,7 +49,7 @@ fn rpc_net_peer_count() { let request = r#"{"jsonrpc": "2.0", "method": "net_peerCount", "params": [], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":"0x78","id":1}"#; - assert_eq!(io.handle_request(request), Some(response.to_string())); + assert_eq!(io.handle_request(request), Some(response.to_owned())); } #[test] @@ -62,5 +62,5 @@ fn rpc_net_listening() { let request = r#"{"jsonrpc": "2.0", "method": "net_listening", "params": [], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request(request), Some(response.to_string())); + assert_eq!(io.handle_request(request), Some(response.to_owned())); } diff --git a/rpc/src/v1/tests/web3.rs b/rpc/src/v1/tests/web3.rs new file mode 100644 index 000000000..c717d361a --- /dev/null +++ b/rpc/src/v1/tests/web3.rs @@ -0,0 +1,33 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use jsonrpc_core::IoHandler; +use util::version; +use v1::{Web3, Web3Client}; + +#[test] +fn rpc_web3_version() { + let web3 = Web3Client::new().to_delegate(); + let io = IoHandler::new(); + io.add_delegate(web3); + + let v = version().to_owned().replace("Parity/", "Parity//"); + + let request = r#"{"jsonrpc": "2.0", "method": "web3_clientVersion", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"VER","id":1}"#.to_owned().replace("VER", v.as_ref()); + + assert_eq!(io.handle_request(request), Some(response)); +} diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 0097cd47e..cf0027368 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -10,7 +10,7 @@ authors = ["Ethcore for EthSync { self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref())); } + #[allow(single_match)] fn message(&self, io: &NetworkContext, message: &SyncMessage) { match *message { SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 618eb6a0b..0f9ef19c8 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -79,6 +79,7 @@ //! - When it's removed from `current` - all transactions from this sender (`current` & `future`) are recalculated. //! +use std::default::Default; use std::cmp::{Ordering}; use std::collections::{HashMap, BTreeSet}; use util::numbers::{Uint, U256}; @@ -102,6 +103,7 @@ struct TransactionOrder { hash: H256, } + impl TransactionOrder { fn for_transaction(tx: &VerifiedTransaction, base_nonce: U256) -> Self { TransactionOrder { @@ -253,6 +255,12 @@ pub struct TransactionQueue { last_nonces: HashMap, } +impl Default for TransactionQueue { + fn default() -> Self { + TransactionQueue::new() + } +} + impl TransactionQueue { /// Creates new instance of this Queue pub fn new() -> Self { diff --git a/util/Cargo.toml b/util/Cargo.toml index 74e4d7226..ce40bb052 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -27,7 +27,7 @@ crossbeam = "0.2" slab = "0.1" sha3 = { path = "sha3" } serde = "0.7.0" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } json-tests = { path = "json-tests" } rustc_version = "0.1.0" igd = "0.4.2" diff --git a/util/bigint/Cargo.toml b/util/bigint/Cargo.toml index 377391eeb..1bd2b994e 100644 --- a/util/bigint/Cargo.toml +++ b/util/bigint/Cargo.toml @@ -15,7 +15,6 @@ rustc-serialize = "0.3" arrayvec = "0.3" rand = "0.3.12" serde = "0.7.0" -clippy = { version = "0.0.44", optional = true } heapsize = "0.3" [features] diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index c8c29e765..2e4e966c1 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -305,7 +305,6 @@ mod tests { assert!(jdb.exists(&foo)); } - #[test] fn reopen() { let mut dir = ::std::env::temp_dir(); @@ -364,11 +363,12 @@ mod tests { jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); } } + #[test] fn reopen_fork() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let (foo, bar, baz) = { + let (foo, _, _) = { let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); @@ -406,6 +406,5 @@ mod tests { let state = jdb.state(&key); assert!(state.is_some()); } - } } diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs new file mode 100644 index 000000000..83be31fb4 --- /dev/null +++ b/util/src/journaldb/earlymergedb.rs @@ -0,0 +1,1031 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Disk-backed HashDB implementation. + +use common::*; +use rlp::*; +use hashdb::*; +use memorydb::*; +use super::traits::JournalDB; +use kvdb::{Database, DBTransaction, DatabaseConfig}; +#[cfg(test)] +use std::env; + +#[derive(Clone, PartialEq, Eq)] +struct RefInfo { + queue_refs: usize, + in_archive: bool, +} + +impl HeapSizeOf for RefInfo { + fn heap_size_of_children(&self) -> usize { 0 } +} + +impl fmt::Display for RefInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}+{}", self.queue_refs, if self.in_archive {1} else {0}) + } +} + +impl fmt::Debug for RefInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}+{}", self.queue_refs, if self.in_archive {1} else {0}) + } +} + +#[derive(Clone, PartialEq, Eq)] +enum RemoveFrom { + Queue, + Archive, +} + +/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// and latent-removal semantics. +/// +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before +/// the removals actually take effect. +pub struct EarlyMergeDB { + overlay: MemoryDB, + backing: Arc, + refs: Option>>>, +} + +// all keys must be at least 12 bytes +const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const DB_VERSION : u32 = 3; +const PADDING : [u8; 10] = [ 0u8; 10 ]; + +impl EarlyMergeDB { + /// Create a new instance from file + pub fn new(path: &str) -> EarlyMergeDB { + let opts = DatabaseConfig { + prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix + }; + let backing = Database::open(&opts, path).unwrap_or_else(|e| { + panic!("Error opening state db: {}", e); + }); + if !backing.is_empty() { + match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { + Ok(Some(DB_VERSION)) => {}, + v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) + } + } else { + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + } + + let refs = Some(Arc::new(RwLock::new(EarlyMergeDB::read_refs(&backing)))); + EarlyMergeDB { + overlay: MemoryDB::new(), + backing: Arc::new(backing), + refs: refs, + } + } + + /// Create a new instance with an anonymous temporary database. + #[cfg(test)] + fn new_temp() -> EarlyMergeDB { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + Self::new(dir.to_str().unwrap()) + } + + fn morph_key(key: &H256, index: u8) -> Bytes { + let mut ret = key.bytes().to_owned(); + ret.push(index); + ret + } + + // The next three are valid only as long as there is an insert operation of `key` in the journal. + fn set_already_in(batch: &DBTransaction, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); } + fn reset_already_in(batch: &DBTransaction, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); } + fn is_already_in(backing: &Database, key: &H256) -> bool { + backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() + } + + fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, refs: &mut HashMap, batch: &DBTransaction, trace: bool) { + for &(ref h, ref d) in inserts { + if let Some(c) = refs.get_mut(h) { + // already counting. increment. + c.queue_refs += 1; + if trace { + trace!(target: "jdb.fine", " insert({}): In queue: Incrementing refs to {}", h, c.queue_refs); + } + continue; + } + + // this is the first entry for this node in the journal. + if backing.get(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?").is_some() { + // already in the backing DB. start counting, and remember it was already in. + Self::set_already_in(batch, &h); + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: true}); + if trace { + trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h); + } + continue; + } + + // Gets removed when a key leaves the journal, so should never be set when we're placing a new key. + //Self::reset_already_in(&h); + assert!(!Self::is_already_in(backing, &h)); + batch.put(&h.bytes(), d).expect("Low-level database error. Some issue with your hard disk?"); + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: false}); + if trace { + trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h); + } + } + } + + fn replay_keys(inserts: &[H256], backing: &Database, refs: &mut HashMap) { + trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs); + for h in inserts { + if let Some(c) = refs.get_mut(h) { + // already counting. increment. + c.queue_refs += 1; + continue; + } + + // this is the first entry for this node in the journal. + // it is initialised to 1 if it was already in. + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: Self::is_already_in(backing, h)}); + } + trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs); + } + + fn kill_keys(deletes: &Vec, refs: &mut HashMap, batch: &DBTransaction, from: RemoveFrom, trace: bool) { + // with a kill on {queue_refs: 1, in_archive: true}, we have two options: + // - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive) + // - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue) + // (the latter option would then mean removing the RefInfo, since it would no longer be counted in the queue.) + // both are valid, but we switch between them depending on context. + // All inserts in queue (i.e. those which may yet be reverted) have an entry in refs. + for h in deletes.iter() { + let mut n: Option = None; + if let Some(c) = refs.get_mut(h) { + if c.in_archive && from == RemoveFrom::Archive { + c.in_archive = false; + Self::reset_already_in(batch, h); + if trace { + trace!(target: "jdb.fine", " kill({}): In archive, 1 in queue: Reducing to queue only and recording", h); + } + continue; + } else if c.queue_refs > 1 { + c.queue_refs -= 1; + if trace { + trace!(target: "jdb.fine", " kill({}): In queue > 1 refs: Decrementing ref count to {}", h, c.queue_refs); + } + continue; + } else { + n = Some(c.clone()); + } + } + match n { + Some(RefInfo{queue_refs: 1, in_archive: true}) => { + refs.remove(h); + Self::reset_already_in(batch, h); + if trace { + trace!(target: "jdb.fine", " kill({}): In archive, 1 in queue: Removing from queue and leaving in archive", h); + } + } + Some(RefInfo{queue_refs: 1, in_archive: false}) => { + refs.remove(h); + batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + if trace { + trace!(target: "jdb.fine", " kill({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h); + } + } + None => { + // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. + //assert!(!Self::is_already_in(db, &h)); + batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + if trace { + trace!(target: "jdb.fine", " kill({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h); + } + } + _ => panic!("Invalid value in refs: {:?}", n), + } + } + } + + #[cfg(test)] + fn can_reconstruct_refs(&self) -> bool { + let reconstructed = Self::read_refs(&self.backing); + let refs = self.refs.as_ref().unwrap().write().unwrap(); + if *refs != reconstructed { + let clean_refs = refs.iter().filter_map(|(k, v)| if reconstructed.get(k) == Some(v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); + let clean_recon = reconstructed.into_iter().filter_map(|(k, v)| if refs.get(&k) == Some(&v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); + warn!(target: "jdb", "mem: {:?} != log: {:?}", clean_refs, clean_recon); + false + } else { + true + } + } + + fn payload(&self, key: &H256) -> Option { + self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + } + + fn read_refs(db: &Database) -> HashMap { + let mut refs = HashMap::new(); + if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { + let mut era = decode::(&val); + loop { + let mut index = 0usize; + while let Some(rlp_data) = db.get({ + let mut r = RlpStream::new_list(3); + r.append(&era); + r.append(&index); + r.append(&&PADDING[..]); + &r.drain() + }).expect("Low-level database error.") { + let rlp = Rlp::new(&rlp_data); + let inserts: Vec = rlp.val_at(1); + Self::replay_keys(&inserts, db, &mut refs); + index += 1; + }; + if index == 0 || era == 0 { + break; + } + era -= 1; + } + } + refs + } + } + +impl HashDB for EarlyMergeDB { + fn keys(&self) -> HashMap { + let mut ret: HashMap = HashMap::new(); + for (key, _) in self.backing.iter() { + let h = H256::from_slice(key.deref()); + ret.insert(h, 1); + } + + for (key, refs) in self.overlay.keys().into_iter() { + let refs = *ret.get(&key).unwrap_or(&0) + refs; + ret.insert(key, refs); + } + ret + } + + fn lookup(&self, key: &H256) -> Option<&[u8]> { + let k = self.overlay.raw(key); + match k { + Some(&(ref d, rc)) if rc > 0 => Some(d), + _ => { + if let Some(x) = self.payload(key) { + Some(&self.overlay.denote(key, x).0) + } + else { + None + } + } + } + } + + fn exists(&self, key: &H256) -> bool { + self.lookup(key).is_some() + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: Bytes) { + self.overlay.emplace(key, value); + } + fn kill(&mut self, key: &H256) { + self.overlay.kill(key); + } +} + +impl JournalDB for EarlyMergeDB { + fn spawn(&self) -> Box { + Box::new(EarlyMergeDB { + overlay: MemoryDB::new(), + backing: self.backing.clone(), + refs: self.refs.clone(), + }) + } + + fn is_empty(&self) -> bool { + self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + } + + fn mem_used(&self) -> usize { + self.overlay.mem_used() + match self.refs { + Some(ref c) => c.read().unwrap().heap_size_of_children(), + None => 0 + } + } + + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + // journal format: + // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, n] => [ ... ] + + // TODO: store reclaim_period. + + // When we make a new commit, we make a journal of all blocks in the recent history and record + // all keys that were inserted and deleted. The journal is ordered by era; multiple commits can + // share the same era. This forms a data structure similar to a queue but whose items are tuples. + // By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history + // into ancient history) then only one commit from the tuple is considered canonical. This commit + // is kept in the main backing database, whereas any others from the same era are reverted. + // + // It is possible that a key, properly available in the backing database be deleted and re-inserted + // in the recent history queue, yet have both operations in commits that are eventually non-canonical. + // To avoid the original, and still required, key from being deleted, we maintain a reference count + // which includes an original key, if any. + // + // The semantics of the `counter` are: + // insert key k: + // counter already contains k: count += 1 + // counter doesn't contain k: + // backing db contains k: count = 1 + // backing db doesn't contain k: insert into backing db, count = 0 + // delete key k: + // counter contains k (count is asserted to be non-zero): + // count > 1: counter -= 1 + // count == 1: remove counter + // count == 0: remove key from backing db + // counter doesn't contain k: remove key from backing db + // + // Practically, this means that for each commit block turning from recent to ancient we do the + // following: + // is_canonical: + // inserts: Ignored (left alone in the backing database). + // deletes: Enacted; however, recent history queue is checked for ongoing references. This is + // reduced as a preference to deletion from the backing database. + // !is_canonical: + // inserts: Reverted; however, recent history queue is checked for ongoing references. This is + // reduced as a preference to deletion from the backing database. + // deletes: Ignored (they were never inserted). + // + + // record new commit's details. + let mut refs = self.refs.as_ref().unwrap().write().unwrap(); + let batch = DBTransaction::new(); + let trace = false; + { + let mut index = 0usize; + let mut last; + + while try!(self.backing.get({ + let mut r = RlpStream::new_list(3); + r.append(&now); + r.append(&index); + r.append(&&PADDING[..]); + last = r.drain(); + &last + })).is_some() { + index += 1; + } + + let drained = self.overlay.drain(); + + if trace { + trace!(target: "jdb", "commit: #{} ({}), end era: {:?}", now, id, end); + } + + let removes: Vec = drained + .iter() + .filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None}) + .collect(); + let inserts: Vec<(H256, Bytes)> = drained + .into_iter() + .filter_map(|(k, (v, r))| if r > 0 { assert!(r == 1); Some((k, v)) } else { assert!(r >= -1); None }) + .collect(); + + + // TODO: check all removes are in the db. + + let mut r = RlpStream::new_list(3); + r.append(id); + + // Process the new inserts. + // We use the inserts for three things. For each: + // - we place into the backing DB or increment the counter if already in; + // - we note in the backing db that it was already in; + // - we write the key into our journal for this block; + + r.begin_list(inserts.len()); + inserts.iter().foreach(|&(k, _)| {r.append(&k);}); + r.append(&removes); + Self::insert_keys(&inserts, &self.backing, &mut refs, &batch, trace); + if trace { + let ins = inserts.iter().map(|&(k, _)| k).collect::>(); + trace!(target: "jdb.ops", " Inserts: {:?}", ins); + trace!(target: "jdb.ops", " Deletes: {:?}", removes); + } + try!(batch.put(&last, r.as_raw())); + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + } + + // apply old commits' details + if let Some((end_era, canon_id)) = end { + let mut index = 0usize; + let mut last; + while let Some(rlp_data) = try!(self.backing.get({ + let mut r = RlpStream::new_list(3); + r.append(&end_era); + r.append(&index); + r.append(&&PADDING[..]); + last = r.drain(); + &last + })) { + let rlp = Rlp::new(&rlp_data); + let inserts: Vec = rlp.val_at(1); + + if canon_id == rlp.val_at(0) { + // Collect keys to be removed. Canon block - remove the (enacted) deletes. + let deletes: Vec = rlp.val_at(2); + if trace { + trace!(target: "jdb.ops", " Expunging: {:?}", deletes); + } + Self::kill_keys(&deletes, &mut refs, &batch, RemoveFrom::Archive, trace); + + if trace { + trace!(target: "jdb.ops", " Finalising: {:?}", inserts); + } + for k in inserts.iter() { + match refs.get(k).cloned() { + None => { + // [in archive] -> SHIFT remove -> SHIFT insert None->Some{queue_refs: 1, in_archive: true} -> TAKE remove Some{queue_refs: 1, in_archive: true}->None -> TAKE insert + // already expunged from the queue (which is allowed since the key is in the archive). + // leave well alone. + } + Some( RefInfo{queue_refs: 1, in_archive: false} ) => { + // just delete the refs entry. + refs.remove(k); + } + Some( RefInfo{queue_refs: x, in_archive: false} ) => { + // must set already in; , + Self::set_already_in(&batch, k); + refs.insert(k.clone(), RefInfo{ queue_refs: x - 1, in_archive: true }); + } + Some( RefInfo{queue_refs: _, in_archive: true} ) => { + // Invalid! Reinserted the same key twice. + warn!("Key {} inserted twice into same fork.", k); + } + } + } + } else { + // Collect keys to be removed. Non-canon block - remove the (reverted) inserts. + if trace { + trace!(target: "jdb.ops", " Reverting: {:?}", inserts); + } + Self::kill_keys(&inserts, &mut refs, &batch, RemoveFrom::Queue, trace); + } + + try!(batch.delete(&last)); + index += 1; + } + if trace { + trace!(target: "jdb", "EarlyMergeDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); + } + } + + try!(self.backing.write(batch)); + + // Comment out for now. TODO: automatically enable in tests. + + if trace { + trace!(target: "jdb", "OK: {:?}", refs.clone()); + } + + Ok(0) + } +} + +#[cfg(test)] +mod tests { + use common::*; + use super::*; + use super::super::traits::JournalDB; + use hashdb::*; + use log::init_log; + + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = EarlyMergeDB::new_temp(); + + let x = jdb.insert(b"X"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&x); + jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + let x = jdb.insert(b"X"); + jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&x)); + } + + #[test] + fn long_history() { + // history is 3 + let mut jdb = EarlyMergeDB::new_temp(); + let h = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.remove(&h); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&h)); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = EarlyMergeDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = EarlyMergeDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + + #[test] + fn overwrite() { + // history is 1 + let mut jdb = EarlyMergeDB::new_temp(); + + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + assert!(jdb.exists(&foo)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_same_key_one() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_same_key_other() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_ins_del_ins() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + foo + }; + + { + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + { + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + } + + #[test] + fn insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn forked_insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn broken_assert() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + + #[test] + fn reopen_test() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.remove(&bar); + jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.insert(b"bar"); + jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen_remove_three() { + init_log(); + + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = b"foo".sha3(); + + { + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + // history is 1 + jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + + jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + + jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + } + + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, bar, baz) = { + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + (foo, bar, baz) + }; + + { + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + } +} diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index 0cee7dd8d..cf5278368 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -21,7 +21,8 @@ use common::*; /// Export the journaldb module. pub mod traits; mod archivedb; -mod optiononedb; +mod earlymergedb; +mod overlayrecentdb; /// Export the JournalDB trait. pub use self::traits::JournalDB; @@ -72,7 +73,8 @@ impl fmt::Display for Algorithm { pub fn new(path: &str, algorithm: Algorithm) -> Box { match algorithm { Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), - Algorithm::EarlyMerge => Box::new(optiononedb::OptionOneDB::new(path)), + Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(path)), + Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(path)), _ => unimplemented!(), } } diff --git a/util/src/journaldb/optiononedb.rs b/util/src/journaldb/optiononedb.rs deleted file mode 100644 index dfa7c8ec1..000000000 --- a/util/src/journaldb/optiononedb.rs +++ /dev/null @@ -1,618 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Disk-backed HashDB implementation. - -use common::*; -use rlp::*; -use hashdb::*; -use memorydb::*; -use super::traits::JournalDB; -use kvdb::{Database, DBTransaction, DatabaseConfig}; -#[cfg(test)] -use std::env; - -/// Implementation of the HashDB trait for a disk-backed database with a memory overlay -/// and latent-removal semantics. -/// -/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to -/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect -/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before -/// the removals actually take effect. -pub struct OptionOneDB { - overlay: MemoryDB, - backing: Arc, - counters: Option>>>, -} - -// all keys must be at least 12 bytes -const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; -const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; -const DB_VERSION : u32 = 3; -const PADDING : [u8; 10] = [ 0u8; 10 ]; - -impl OptionOneDB { - /// Create a new instance from file - pub fn new(path: &str) -> OptionOneDB { - let opts = DatabaseConfig { - prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix - }; - let backing = Database::open(&opts, path).unwrap_or_else(|e| { - panic!("Error opening state db: {}", e); - }); - if !backing.is_empty() { - match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { - Ok(Some(DB_VERSION)) => {}, - v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) - } - } else { - backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); - } - - let counters = Some(Arc::new(RwLock::new(OptionOneDB::read_counters(&backing)))); - OptionOneDB { - overlay: MemoryDB::new(), - backing: Arc::new(backing), - counters: counters, - } - } - - /// Create a new instance with an anonymous temporary database. - #[cfg(test)] - fn new_temp() -> OptionOneDB { - let mut dir = env::temp_dir(); - dir.push(H32::random().hex()); - Self::new(dir.to_str().unwrap()) - } - - fn morph_key(key: &H256, index: u8) -> Bytes { - let mut ret = key.bytes().to_owned(); - ret.push(index); - ret - } - - // The next three are valid only as long as there is an insert operation of `key` in the journal. - fn set_already_in(batch: &DBTransaction, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); } - fn reset_already_in(batch: &DBTransaction, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); } - fn is_already_in(backing: &Database, key: &H256) -> bool { - backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() - } - - fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { - for &(ref h, ref d) in inserts { - if let Some(c) = counters.get_mut(h) { - // already counting. increment. - *c += 1; - continue; - } - - // this is the first entry for this node in the journal. - if backing.get(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?").is_some() { - // already in the backing DB. start counting, and remember it was already in. - Self::set_already_in(batch, &h); - counters.insert(h.clone(), 1); - continue; - } - - // Gets removed when a key leaves the journal, so should never be set when we're placing a new key. - //Self::reset_already_in(&h); - assert!(!Self::is_already_in(backing, &h)); - batch.put(&h.bytes(), d).expect("Low-level database error. Some issue with your hard disk?"); - } - } - - fn replay_keys(inserts: &[H256], backing: &Database, counters: &mut HashMap) { - trace!("replay_keys: inserts={:?}, counters={:?}", inserts, counters); - for h in inserts { - if let Some(c) = counters.get_mut(h) { - // already counting. increment. - *c += 1; - continue; - } - - // this is the first entry for this node in the journal. - // it is initialised to 1 if it was already in. - if Self::is_already_in(backing, h) { - trace!("replace_keys: Key {} was already in!", h); - counters.insert(h.clone(), 1); - } - } - trace!("replay_keys: (end) counters={:?}", counters); - } - - fn kill_keys(deletes: Vec, counters: &mut HashMap, batch: &DBTransaction) { - for h in deletes.into_iter() { - let mut n: Option = None; - if let Some(c) = counters.get_mut(&h) { - if *c > 1 { - *c -= 1; - continue; - } else { - n = Some(*c); - } - } - match n { - Some(i) if i == 1 => { - counters.remove(&h); - Self::reset_already_in(batch, &h); - } - None => { - // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. - //assert!(!Self::is_already_in(db, &h)); - batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); - } - _ => panic!("Invalid value in counters: {:?}", n), - } - } - } - - fn payload(&self, key: &H256) -> Option { - self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) - } - - fn read_counters(db: &Database) -> HashMap { - let mut counters = HashMap::new(); - if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val); - loop { - let mut index = 0usize; - while let Some(rlp_data) = db.get({ - let mut r = RlpStream::new_list(3); - r.append(&era); - r.append(&index); - r.append(&&PADDING[..]); - &r.drain() - }).expect("Low-level database error.") { - trace!("read_counters: era={}, index={}", era, index); - let rlp = Rlp::new(&rlp_data); - let inserts: Vec = rlp.val_at(1); - Self::replay_keys(&inserts, db, &mut counters); - index += 1; - }; - if index == 0 || era == 0 { - break; - } - era -= 1; - } - } - trace!("Recovered {} counters", counters.len()); - counters - } -} - -impl HashDB for OptionOneDB { - fn keys(&self) -> HashMap { - let mut ret: HashMap = HashMap::new(); - for (key, _) in self.backing.iter() { - let h = H256::from_slice(key.deref()); - ret.insert(h, 1); - } - - for (key, refs) in self.overlay.keys().into_iter() { - let refs = *ret.get(&key).unwrap_or(&0) + refs; - ret.insert(key, refs); - } - ret - } - - fn lookup(&self, key: &H256) -> Option<&[u8]> { - let k = self.overlay.raw(key); - match k { - Some(&(ref d, rc)) if rc > 0 => Some(d), - _ => { - if let Some(x) = self.payload(key) { - Some(&self.overlay.denote(key, x).0) - } - else { - None - } - } - } - } - - fn exists(&self, key: &H256) -> bool { - self.lookup(key).is_some() - } - - fn insert(&mut self, value: &[u8]) -> H256 { - self.overlay.insert(value) - } - fn emplace(&mut self, key: H256, value: Bytes) { - self.overlay.emplace(key, value); - } - fn kill(&mut self, key: &H256) { - self.overlay.kill(key); - } -} - -impl JournalDB for OptionOneDB { - fn spawn(&self) -> Box { - Box::new(OptionOneDB { - overlay: MemoryDB::new(), - backing: self.backing.clone(), - counters: self.counters.clone(), - }) - } - - fn mem_used(&self) -> usize { - self.overlay.mem_used() + match self.counters { - Some(ref c) => c.read().unwrap().heap_size_of_children(), - None => 0 - } - } - - fn is_empty(&self) -> bool { - self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() - } - - fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - // journal format: - // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] - // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] - // [era, n] => [ ... ] - - // TODO: store reclaim_period. - - // When we make a new commit, we make a journal of all blocks in the recent history and record - // all keys that were inserted and deleted. The journal is ordered by era; multiple commits can - // share the same era. This forms a data structure similar to a queue but whose items are tuples. - // By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history - // into ancient history) then only one commit from the tuple is considered canonical. This commit - // is kept in the main backing database, whereas any others from the same era are reverted. - // - // It is possible that a key, properly available in the backing database be deleted and re-inserted - // in the recent history queue, yet have both operations in commits that are eventually non-canonical. - // To avoid the original, and still required, key from being deleted, we maintain a reference count - // which includes an original key, if any. - // - // The semantics of the `counter` are: - // insert key k: - // counter already contains k: count += 1 - // counter doesn't contain k: - // backing db contains k: count = 1 - // backing db doesn't contain k: insert into backing db, count = 0 - // delete key k: - // counter contains k (count is asserted to be non-zero): - // count > 1: counter -= 1 - // count == 1: remove counter - // count == 0: remove key from backing db - // counter doesn't contain k: remove key from backing db - // - // Practically, this means that for each commit block turning from recent to ancient we do the - // following: - // is_canonical: - // inserts: Ignored (left alone in the backing database). - // deletes: Enacted; however, recent history queue is checked for ongoing references. This is - // reduced as a preference to deletion from the backing database. - // !is_canonical: - // inserts: Reverted; however, recent history queue is checked for ongoing references. This is - // reduced as a preference to deletion from the backing database. - // deletes: Ignored (they were never inserted). - // - - // record new commit's details. - trace!("commit: #{} ({}), end era: {:?}", now, id, end); - let mut counters = self.counters.as_ref().unwrap().write().unwrap(); - let batch = DBTransaction::new(); - { - let mut index = 0usize; - let mut last; - - while try!(self.backing.get({ - let mut r = RlpStream::new_list(3); - r.append(&now); - r.append(&index); - r.append(&&PADDING[..]); - last = r.drain(); - &last - })).is_some() { - index += 1; - } - - let drained = self.overlay.drain(); - let removes: Vec = drained - .iter() - .filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None}) - .collect(); - let inserts: Vec<(H256, Bytes)> = drained - .into_iter() - .filter_map(|(k, (v, r))| if r > 0 { assert!(r == 1); Some((k, v)) } else { assert!(r >= -1); None }) - .collect(); - - let mut r = RlpStream::new_list(3); - r.append(id); - - // Process the new inserts. - // We use the inserts for three things. For each: - // - we place into the backing DB or increment the counter if already in; - // - we note in the backing db that it was already in; - // - we write the key into our journal for this block; - - r.begin_list(inserts.len()); - inserts.iter().foreach(|&(k, _)| {r.append(&k);}); - r.append(&removes); - Self::insert_keys(&inserts, &self.backing, &mut counters, &batch); - try!(batch.put(&last, r.as_raw())); - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); - } - - // apply old commits' details - if let Some((end_era, canon_id)) = end { - let mut index = 0usize; - let mut last; - while let Some(rlp_data) = try!(self.backing.get({ - let mut r = RlpStream::new_list(3); - r.append(&end_era); - r.append(&index); - r.append(&&PADDING[..]); - last = r.drain(); - &last - })) { - let rlp = Rlp::new(&rlp_data); - let inserts: Vec = rlp.val_at(1); - let deletes: Vec = rlp.val_at(2); - // Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical - Self::kill_keys(if canon_id == rlp.val_at(0) {deletes} else {inserts}, &mut counters, &batch); - try!(batch.delete(&last)); - index += 1; - } - trace!("OptionOneDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); - } - - try!(self.backing.write(batch)); -// trace!("OptionOneDB::commit() deleted {} nodes", deletes); - Ok(0) - } -} - -#[cfg(test)] -mod tests { - use common::*; - use super::*; - use hashdb::*; - use journaldb::traits::JournalDB; - - #[test] - fn insert_same_in_fork() { - // history is 1 - let mut jdb = OptionOneDB::new_temp(); - - let x = jdb.insert(b"X"); - jdb.commit(1, &b"1".sha3(), None).unwrap(); - jdb.commit(2, &b"2".sha3(), None).unwrap(); - jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); - jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); - - jdb.remove(&x); - jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); - let x = jdb.insert(b"X"); - jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); - - jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); - jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); - - assert!(jdb.exists(&x)); - } - - #[test] - fn long_history() { - // history is 3 - let mut jdb = OptionOneDB::new_temp(); - let h = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - assert!(jdb.exists(&h)); - jdb.remove(&h); - jdb.commit(1, &b"1".sha3(), None).unwrap(); - assert!(jdb.exists(&h)); - jdb.commit(2, &b"2".sha3(), None).unwrap(); - assert!(jdb.exists(&h)); - jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); - assert!(jdb.exists(&h)); - jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); - assert!(!jdb.exists(&h)); - } - - #[test] - fn complex() { - // history is 1 - let mut jdb = OptionOneDB::new_temp(); - - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - assert!(jdb.exists(&foo)); - assert!(jdb.exists(&bar)); - - jdb.remove(&foo); - jdb.remove(&bar); - let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - assert!(jdb.exists(&bar)); - assert!(jdb.exists(&baz)); - - let foo = jdb.insert(b"foo"); - jdb.remove(&baz); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - assert!(!jdb.exists(&bar)); - assert!(jdb.exists(&baz)); - - jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - assert!(!jdb.exists(&bar)); - assert!(!jdb.exists(&baz)); - - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); - assert!(!jdb.exists(&foo)); - assert!(!jdb.exists(&bar)); - assert!(!jdb.exists(&baz)); - } - - #[test] - fn fork() { - // history is 1 - let mut jdb = OptionOneDB::new_temp(); - - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - assert!(jdb.exists(&foo)); - assert!(jdb.exists(&bar)); - - jdb.remove(&foo); - let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); - - jdb.remove(&bar); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); - - assert!(jdb.exists(&foo)); - assert!(jdb.exists(&bar)); - assert!(jdb.exists(&baz)); - - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - assert!(!jdb.exists(&baz)); - assert!(!jdb.exists(&bar)); - } - - #[test] - fn overwrite() { - // history is 1 - let mut jdb = OptionOneDB::new_temp(); - - let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - assert!(jdb.exists(&foo)); - - jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); - jdb.insert(b"foo"); - assert!(jdb.exists(&foo)); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - } - - #[test] - fn fork_same_key() { - // history is 1 - let mut jdb = OptionOneDB::new_temp(); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - - let foo = jdb.insert(b"foo"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); - - jdb.insert(b"foo"); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - - jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - } - - - #[test] - fn reopen() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - let bar = H256::random(); - - let foo = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.emplace(bar.clone(), b"bar".to_vec()); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - foo - }; - - { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); - jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); - } - - { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); - assert!(jdb.exists(&foo)); - assert!(jdb.exists(&bar)); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - assert!(!jdb.exists(&foo)); - } - } - - #[test] - fn reopen_remove() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - - let foo = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); - - // foo is ancient history. - - jdb.insert(b"foo"); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - foo - }; - - { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); - jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - jdb.remove(&foo); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); - jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); - assert!(!jdb.exists(&foo)); - } - } - #[test] - fn reopen_fork() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - let (foo, bar, baz) = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); - // history is 1 - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - jdb.remove(&foo); - let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); - - jdb.remove(&bar); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); - (foo, bar, baz) - }; - - { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - assert!(!jdb.exists(&baz)); - assert!(!jdb.exists(&bar)); - } - } -} diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs new file mode 100644 index 000000000..8dd4d1752 --- /dev/null +++ b/util/src/journaldb/overlayrecentdb.rs @@ -0,0 +1,873 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! JournalDB over in-memory overlay + +use common::*; +use rlp::*; +use hashdb::*; +use memorydb::*; +use kvdb::{Database, DBTransaction, DatabaseConfig}; +#[cfg(test)] +use std::env; +use super::JournalDB; + +/// Implementation of the JournalDB trait for a disk-backed database with a memory overlay +/// and, possibly, latent-removal semantics. +/// +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before +/// the removals actually take effect. +/// +/// There are two memory overlays: +/// - Transaction overlay contains current transaction data. It is merged with with history +/// overlay on each `commit()` +/// - History overlay contains all data inserted during the history period. When the node +/// in the overlay becomes ancient it is written to disk on `commit()` +/// +/// There is also a journal maintained in memory and on the disk as well which lists insertions +/// and removals for each commit during the history period. This is used to track +/// data nodes that go out of history scope and must be written to disk. +/// +/// Commit workflow: +/// 1. Create a new journal record from the transaction overlay. +/// 2. Inseart each node from the transaction overlay into the History overlay increasing reference +/// count if it is already there. Note that the reference counting is managed by `MemoryDB` +/// 3. Clear the transaction overlay. +/// 4. For a canonical journal record that becomes ancient inserts its insertions into the disk DB +/// 5. For each journal record that goes out of the history scope (becomes ancient) remove its +/// insertions from the history overlay, decreasing the reference counter and removing entry if +/// if reaches zero. +/// 6. For a canonical journal record that becomes ancient delete its removals from the disk only if +/// the removed key is not present in the history overlay. +/// 7. Delete ancient record from memory and disk. +/// +pub struct OverlayRecentDB { + transaction_overlay: MemoryDB, + backing: Arc, + journal_overlay: Arc>, +} + +#[derive(PartialEq)] +struct JournalOverlay { + backing_overlay: MemoryDB, + journal: HashMap>, + latest_era: u64, +} + +#[derive(PartialEq)] +struct JournalEntry { + id: H256, + insertions: Vec, + deletions: Vec, +} + +impl HeapSizeOf for JournalEntry { + fn heap_size_of_children(&self) -> usize { + self.insertions.heap_size_of_children() + self.deletions.heap_size_of_children() + } +} + +impl Clone for OverlayRecentDB { + fn clone(&self) -> OverlayRecentDB { + OverlayRecentDB { + transaction_overlay: MemoryDB::new(), + backing: self.backing.clone(), + journal_overlay: self.journal_overlay.clone(), + } + } +} + +// all keys must be at least 12 bytes +const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const DB_VERSION : u32 = 0x200 + 3; +const PADDING : [u8; 10] = [ 0u8; 10 ]; + +impl OverlayRecentDB { + /// Create a new instance from file + pub fn new(path: &str) -> OverlayRecentDB { + Self::from_prefs(path) + } + + /// Create a new instance from file + pub fn from_prefs(path: &str) -> OverlayRecentDB { + let opts = DatabaseConfig { + prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix + }; + let backing = Database::open(&opts, path).unwrap_or_else(|e| { + panic!("Error opening state db: {}", e); + }); + if !backing.is_empty() { + match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { + Ok(Some(DB_VERSION)) => {} + v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) + } + } else { + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + } + + let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&backing))); + OverlayRecentDB { + transaction_overlay: MemoryDB::new(), + backing: Arc::new(backing), + journal_overlay: journal_overlay, + } + } + + /// Create a new instance with an anonymous temporary database. + #[cfg(test)] + pub fn new_temp() -> OverlayRecentDB { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + Self::new(dir.to_str().unwrap()) + } + + #[cfg(test)] + fn can_reconstruct_refs(&self) -> bool { + let reconstructed = Self::read_overlay(&self.backing); + let journal_overlay = self.journal_overlay.read().unwrap(); + *journal_overlay == reconstructed + } + + fn payload(&self, key: &H256) -> Option { + self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + } + + fn read_overlay(db: &Database) -> JournalOverlay { + let mut journal = HashMap::new(); + let mut overlay = MemoryDB::new(); + let mut count = 0; + let mut latest_era = 0; + if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { + latest_era = decode::(&val); + let mut era = latest_era; + loop { + let mut index = 0usize; + while let Some(rlp_data) = db.get({ + let mut r = RlpStream::new_list(3); + r.append(&era); + r.append(&index); + r.append(&&PADDING[..]); + &r.drain() + }).expect("Low-level database error.") { + trace!("read_overlay: era={}, index={}", era, index); + let rlp = Rlp::new(&rlp_data); + let id: H256 = rlp.val_at(0); + let insertions = rlp.at(1); + let deletions: Vec = rlp.val_at(2); + let mut inserted_keys = Vec::new(); + for r in insertions.iter() { + let k: H256 = r.val_at(0); + let v: Bytes = r.val_at(1); + overlay.emplace(k.clone(), v); + inserted_keys.push(k); + count += 1; + } + journal.entry(era).or_insert_with(Vec::new).push(JournalEntry { + id: id, + insertions: inserted_keys, + deletions: deletions, + }); + index += 1; + }; + if index == 0 || era == 0 { + break; + } + era -= 1; + } + } + trace!("Recovered {} overlay entries, {} journal entries", count, journal.len()); + JournalOverlay { backing_overlay: overlay, journal: journal, latest_era: latest_era } + } +} + +impl JournalDB for OverlayRecentDB { + fn spawn(&self) -> Box { + Box::new(self.clone()) + } + + fn mem_used(&self) -> usize { + let mut mem = self.transaction_overlay.mem_used(); + let overlay = self.journal_overlay.read().unwrap(); + mem += overlay.backing_overlay.mem_used(); + mem += overlay.journal.heap_size_of_children(); + mem + } + + fn is_empty(&self) -> bool { + self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + } + + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + // record new commit's details. + trace!("commit: #{} ({}), end era: {:?}", now, id, end); + let mut journal_overlay = self.journal_overlay.write().unwrap(); + let batch = DBTransaction::new(); + { + let mut r = RlpStream::new_list(3); + let mut tx = self.transaction_overlay.drain(); + let inserted_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c > 0 { Some(k.clone()) } else { None }).collect(); + let removed_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c < 0 { Some(k.clone()) } else { None }).collect(); + // Increase counter for each inserted key no matter if the block is canonical or not. + let insertions = tx.drain().filter_map(|(k, (v, c))| if c > 0 { Some((k, v)) } else { None }); + r.append(id); + r.begin_list(inserted_keys.len()); + for (k, v) in insertions { + r.begin_list(2); + r.append(&k); + r.append(&v); + journal_overlay.backing_overlay.emplace(k, v); + } + r.append(&removed_keys); + + let mut k = RlpStream::new_list(3); + let index = journal_overlay.journal.get(&now).map(|j| j.len()).unwrap_or(0); + k.append(&now); + k.append(&index); + k.append(&&PADDING[..]); + try!(batch.put(&k.drain(), r.as_raw())); + if now >= journal_overlay.latest_era { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + journal_overlay.latest_era = now; + } + journal_overlay.journal.entry(now).or_insert_with(Vec::new).push(JournalEntry { id: id.clone(), insertions: inserted_keys, deletions: removed_keys }); + } + + let journal_overlay = journal_overlay.deref_mut(); + // apply old commits' details + if let Some((end_era, canon_id)) = end { + if let Some(ref mut records) = journal_overlay.journal.get_mut(&end_era) { + let mut canon_insertions: Vec<(H256, Bytes)> = Vec::new(); + let mut canon_deletions: Vec = Vec::new(); + let mut overlay_deletions: Vec = Vec::new(); + let mut index = 0usize; + for mut journal in records.drain(..) { + //delete the record from the db + let mut r = RlpStream::new_list(3); + r.append(&end_era); + r.append(&index); + r.append(&&PADDING[..]); + try!(batch.delete(&r.drain())); + trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries", end_era, index, journal.id, canon_id, journal.insertions.len(), journal.deletions.len()); + { + if canon_id == journal.id { + for h in &journal.insertions { + if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(h) { + if rc > 0 { + canon_insertions.push((h.clone(), d.clone())); //TODO: optimize this to avoid data copy + } + } + } + canon_deletions = journal.deletions; + } + overlay_deletions.append(&mut journal.insertions); + } + index += 1; + } + // apply canon inserts first + for (k, v) in canon_insertions { + try!(batch.put(&k, &v)); + } + // update the overlay + for k in overlay_deletions { + journal_overlay.backing_overlay.kill(&k); + } + // apply canon deletions + for k in canon_deletions { + if !journal_overlay.backing_overlay.exists(&k) { + try!(batch.delete(&k)); + } + } + journal_overlay.backing_overlay.purge(); + } + journal_overlay.journal.remove(&end_era); + } + try!(self.backing.write(batch)); + Ok(0) + } + +} + +impl HashDB for OverlayRecentDB { + fn keys(&self) -> HashMap { + let mut ret: HashMap = HashMap::new(); + for (key, _) in self.backing.iter() { + let h = H256::from_slice(key.deref()); + ret.insert(h, 1); + } + + for (key, refs) in self.transaction_overlay.keys().into_iter() { + let refs = *ret.get(&key).unwrap_or(&0) + refs; + ret.insert(key, refs); + } + ret + } + + fn lookup(&self, key: &H256) -> Option<&[u8]> { + let k = self.transaction_overlay.raw(key); + match k { + Some(&(ref d, rc)) if rc > 0 => Some(d), + _ => { + let v = self.journal_overlay.read().unwrap().backing_overlay.lookup(key).map(|v| v.to_vec()); + match v { + Some(x) => { + Some(&self.transaction_overlay.denote(key, x).0) + } + _ => { + if let Some(x) = self.payload(key) { + Some(&self.transaction_overlay.denote(key, x).0) + } + else { + None + } + } + } + } + } + } + + fn exists(&self, key: &H256) -> bool { + self.lookup(key).is_some() + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.transaction_overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: Bytes) { + self.transaction_overlay.emplace(key, value); + } + fn kill(&mut self, key: &H256) { + self.transaction_overlay.kill(key); + } +} + +#[cfg(test)] +mod tests { + use common::*; + use super::*; + use hashdb::*; + use log::init_log; + use journaldb::JournalDB; + + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = OverlayRecentDB::new_temp(); + + let x = jdb.insert(b"X"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&x); + jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + let x = jdb.insert(b"X"); + jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&x)); + } + + #[test] + fn long_history() { + // history is 3 + let mut jdb = OverlayRecentDB::new_temp(); + let h = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.remove(&h); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&h)); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = OverlayRecentDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = OverlayRecentDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + + #[test] + fn overwrite() { + // history is 1 + let mut jdb = OverlayRecentDB::new_temp(); + + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + assert!(jdb.exists(&foo)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_same_key_one() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_same_key_other() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_ins_del_ins() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + foo + }; + + { + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + { + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + } + + #[test] + fn insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn forked_insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn broken_assert() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + + #[test] + fn reopen_test() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.remove(&bar); + jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.insert(b"bar"); + jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen_remove_three() { + init_log(); + + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = b"foo".sha3(); + + { + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + // history is 1 + jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + + jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + + jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + } + + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, bar, baz) = { + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + (foo, bar, baz) + }; + + { + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + } +} diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index ea97cc80e..6a5efc87d 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -131,9 +131,15 @@ impl AccountService { } } +impl Default for SecretStore { + fn default() -> Self { + SecretStore::new() + } +} + impl SecretStore { /// new instance of Secret Store in default home directory - pub fn new() -> SecretStore { + pub fn new() -> Self { let mut path = ::std::env::home_dir().expect("Failed to get home dir"); path.push(".parity"); path.push("keys"); @@ -142,7 +148,7 @@ impl SecretStore { } /// new instance of Secret Store in specific directory - pub fn new_in(path: &Path) -> SecretStore { + pub fn new_in(path: &Path) -> Self { SecretStore { directory: KeyDirectory::new(path), unlocks: RwLock::new(HashMap::new()), @@ -214,12 +220,12 @@ impl SecretStore { /// Creates new account pub fn new_account(&mut self, pass: &str) -> Result { - let secret = H256::random(); + let key_pair = crypto::KeyPair::create().expect("Error creating key-pair. Something wrong with crypto libraries?"); + let address = Address::from(key_pair.public().sha3()); let key_id = H128::random(); - self.insert(key_id.clone(), secret, pass); + self.insert(key_id.clone(), key_pair.secret().clone(), pass); let mut key_file = self.directory.get(&key_id).expect("the key was just inserted"); - let address = Address::random(); key_file.account = Some(address); try!(self.directory.save(key_file)); Ok(address) @@ -381,6 +387,7 @@ mod tests { use super::*; use devtools::*; use common::*; + use crypto::KeyPair; #[test] fn can_insert() { @@ -555,4 +562,15 @@ mod tests { let accounts = sstore.accounts().unwrap(); assert_eq!(30, accounts.len()); } + + #[test] + fn validate_generated_addresses() { + let temp = RandomTempPath::create_dir(); + let mut sstore = SecretStore::new_test(&temp); + let addr = sstore.new_account("test").unwrap(); + let _ok = sstore.unlock_account(&addr, "test").unwrap(); + let secret = sstore.account_secret(&addr).unwrap(); + let kp = KeyPair::from_secret(secret).unwrap(); + assert_eq!(Address::from(kp.public().sha3()), addr); + } } diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index a2fa2215a..df5c2c448 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -16,6 +16,7 @@ //! Key-Value store abstraction with RocksDB backend. +use std::default::Default; use rocksdb::{DB, Writable, WriteBatch, IteratorMode, DBVector, DBIterator, IndexType, Options, DBCompactionStyle, BlockBasedOptions, Direction}; @@ -24,6 +25,12 @@ pub struct DBTransaction { batch: WriteBatch, } +impl Default for DBTransaction { + fn default() -> Self { + DBTransaction::new() + } +} + impl DBTransaction { /// Create new transaction. pub fn new() -> DBTransaction { diff --git a/util/src/lib.rs b/util/src/lib.rs index b22cba55d..b97a157ff 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -27,6 +27,8 @@ #![cfg_attr(feature="dev", allow(match_same_arms))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(feature="dev", allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(feature="dev", allow(if_not_else))] //! Ethcore-util library //! diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 9cd018935..bada4c4c6 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -24,6 +24,7 @@ use hashdb::*; use heapsize::*; use std::mem; use std::collections::HashMap; +use std::default::Default; #[derive(Debug,Clone)] /// Reference-counted memory-based HashDB implementation. @@ -32,7 +33,7 @@ use std::collections::HashMap; /// with `kill()`, check for existance with `exists()` and lookup a hash to derive /// the data with `lookup()`. Clear with `clear()` and purge the portions of the data /// that have no references with `purge()`. -/// +/// /// # Example /// ```rust /// extern crate ethcore_util; @@ -69,11 +70,18 @@ use std::collections::HashMap; /// assert!(!m.exists(&k)); /// } /// ``` +#[derive(PartialEq)] pub struct MemoryDB { data: HashMap, static_null_rlp: (Bytes, i32), } +impl Default for MemoryDB { + fn default() -> Self { + MemoryDB::new() + } +} + impl MemoryDB { /// Create a new instance of the memory DB. pub fn new() -> MemoryDB { @@ -133,7 +141,7 @@ impl MemoryDB { /// Denote than an existing value has the given key. Used when a key gets removed without /// a prior insert and thus has a negative reference with no value. - /// + /// /// May safely be called even if the key's value is known, in which case it will be a no-op. pub fn denote(&self, key: &H256, value: Bytes) -> &(Bytes, i32) { if self.raw(key) == None { diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index 4f3384894..a381b49f8 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -19,6 +19,7 @@ use std::net::SocketAddr; use std::collections::{HashSet, HashMap, BTreeMap, VecDeque}; use std::mem; use std::cmp; +use std::default::Default; use mio::*; use mio::udp::*; use sha3::*; @@ -62,8 +63,14 @@ struct NodeBucket { nodes: VecDeque, //sorted by last active } +impl Default for NodeBucket { + fn default() -> Self { + NodeBucket::new() + } +} + impl NodeBucket { - fn new() -> NodeBucket { + fn new() -> Self { NodeBucket { nodes: VecDeque::new() } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index ece24a1d1..f63c75e9f 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -23,6 +23,7 @@ use std::ops::*; use std::cmp::min; use std::path::{Path, PathBuf}; use std::io::{Read, Write}; +use std::default::Default; use std::fs; use mio::*; use mio::tcp::*; @@ -75,9 +76,15 @@ pub struct NetworkConfiguration { pub ideal_peers: u32, } +impl Default for NetworkConfiguration { + fn default() -> Self { + NetworkConfiguration::new() + } +} + impl NetworkConfiguration { /// Create a new instance of default settings. - pub fn new() -> NetworkConfiguration { + pub fn new() -> Self { NetworkConfiguration { config_path: None, listen_address: None, diff --git a/util/src/panics.rs b/util/src/panics.rs index 05d266b8b..980d4fc69 100644 --- a/util/src/panics.rs +++ b/util/src/panics.rs @@ -19,6 +19,7 @@ use std::thread; use std::ops::DerefMut; use std::sync::{Arc, Mutex}; +use std::default::Default; /// Thread-safe closure for handling possible panics pub trait OnPanicListener: Send + Sync + 'static { @@ -56,14 +57,20 @@ pub struct PanicHandler { listeners: Mutex>> } +impl Default for PanicHandler { + fn default() -> Self { + PanicHandler::new() + } +} + impl PanicHandler { /// Creates new `PanicHandler` wrapped in `Arc` - pub fn new_in_arc() -> Arc { + pub fn new_in_arc() -> Arc { Arc::new(Self::new()) } /// Creates new `PanicHandler` - pub fn new() -> PanicHandler { + pub fn new() -> Self { PanicHandler { listeners: Mutex::new(vec![]) } diff --git a/util/src/rlp/rlpstream.rs b/util/src/rlp/rlpstream.rs index ba70e7b2b..7bf3d3cdd 100644 --- a/util/src/rlp/rlpstream.rs +++ b/util/src/rlp/rlpstream.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::ops::Deref; +use std::default::Default; use elastic_array::*; use rlp::bytes::{ToBytes, VecLike}; use rlp::{Stream, Encoder, Encodable}; @@ -44,6 +45,12 @@ pub struct RlpStream { finished_list: bool, } +impl Default for RlpStream { + fn default() -> Self { + RlpStream::new() + } +} + impl Stream for RlpStream { fn new() -> Self { RlpStream { @@ -190,8 +197,14 @@ struct BasicEncoder { bytes: ElasticArray1024, } +impl Default for BasicEncoder { + fn default() -> Self { + BasicEncoder::new() + } +} + impl BasicEncoder { - fn new() -> BasicEncoder { + fn new() -> Self { BasicEncoder { bytes: ElasticArray1024::new() } } @@ -222,7 +235,7 @@ impl Encoder for BasicEncoder { // just 0 0 => self.bytes.push(0x80u8), // byte is its own encoding if < 0x80 - 1 => { + 1 => { value.to_bytes(&mut self.bytes); let len = self.bytes.len(); let last_byte = self.bytes[len - 1]; diff --git a/util/src/table.rs b/util/src/table.rs index e41209608..5ba572289 100644 --- a/util/src/table.rs +++ b/util/src/table.rs @@ -16,6 +16,7 @@ //! A collection associating pair of keys (row and column) with a single value. +use std::default::Default; use std::hash::Hash; use std::collections::HashMap; @@ -30,11 +31,21 @@ pub struct Table map: HashMap>, } +impl Default for Table + where Row: Eq + Hash + Clone, + Col: Eq + Hash { + fn default() -> Self { + Table::new() + } +} + +// There is default but clippy does not detect it? +#[allow(new_without_default)] impl Table where Row: Eq + Hash + Clone, Col: Eq + Hash { /// Creates new Table - pub fn new() -> Table { + pub fn new() -> Self { Table { map: HashMap::new(), } diff --git a/util/src/trie/journal.rs b/util/src/trie/journal.rs index db16a313d..4ffd7cf5c 100644 --- a/util/src/trie/journal.rs +++ b/util/src/trie/journal.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::default::Default; use sha3::*; use hash::H256; use bytes::*; @@ -39,6 +40,12 @@ pub struct Score { #[derive(Debug)] pub struct Journal (Vec); +impl Default for Journal { + fn default() -> Self { + Journal::new() + } +} + impl Journal { /// Create a new, empty, object. pub fn new() -> Journal { Journal(vec![]) }