Merge pull request #7438 from paritytech/beta-backports-kvdb

[beta] kvdb backports
This commit is contained in:
Marek Kotewicz 2018-01-03 16:08:56 +01:00 committed by GitHub
commit 849e574c2c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
65 changed files with 1258 additions and 1159 deletions

70
Cargo.lock generated
View File

@ -233,6 +233,9 @@ dependencies = [
name = "cc" name = "cc"
version = "1.0.0" version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "cfg-if" name = "cfg-if"
@ -587,6 +590,8 @@ dependencies = [
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0", "kvdb 0.1.0",
"kvdb-memorydb 0.1.0",
"kvdb-rocksdb 0.1.0",
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"linked-hash-map 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "linked-hash-map 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
@ -596,7 +601,7 @@ dependencies = [
"migration 0.1.0", "migration 0.1.0",
"native-contracts 0.1.0", "native-contracts 0.1.0",
"num 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", "num 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-machine 0.1.0", "parity-machine 0.1.0",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"patricia_trie 0.1.0", "patricia_trie 0.1.0",
@ -744,6 +749,8 @@ dependencies = [
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0", "kvdb 0.1.0",
"kvdb-memorydb 0.1.0",
"kvdb-rocksdb 0.1.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"memorydb 0.1.0", "memorydb 0.1.0",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
@ -830,6 +837,7 @@ dependencies = [
"hash 0.1.0", "hash 0.1.0",
"hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0", "kvdb 0.1.0",
"kvdb-rocksdb 0.1.0",
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"native-contracts 0.1.0", "native-contracts 0.1.0",
@ -885,6 +893,7 @@ dependencies = [
"hashdb 0.1.0", "hashdb 0.1.0",
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0", "kvdb 0.1.0",
"kvdb-memorydb 0.1.0",
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1010,6 +1019,7 @@ dependencies = [
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)", "ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0", "kvdb 0.1.0",
"kvdb-memorydb 0.1.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"macros 0.1.0", "macros 0.1.0",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1128,7 +1138,7 @@ version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -1263,7 +1273,7 @@ dependencies = [
"language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
"traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1500,11 +1510,28 @@ version = "0.1.0"
dependencies = [ dependencies = [
"elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-bigint 0.1.3",
"ethcore-bytes 0.1.0", "ethcore-bytes 0.1.0",
]
[[package]]
name = "kvdb-memorydb"
version = "0.1.0"
dependencies = [
"kvdb 0.1.0",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.2.0",
]
[[package]]
name = "kvdb-rocksdb"
version = "0.1.0"
dependencies = [
"elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-bigint 0.1.3",
"ethcore-devtools 1.8.0", "ethcore-devtools 1.8.0",
"hashdb 0.1.0", "kvdb 0.1.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.2.0", "rlp 0.2.0",
@ -1656,6 +1683,7 @@ version = "0.1.0"
dependencies = [ dependencies = [
"ethcore-devtools 1.8.0", "ethcore-devtools 1.8.0",
"kvdb 0.1.0", "kvdb 0.1.0",
"kvdb-rocksdb 0.1.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"macros 0.1.0", "macros 0.1.0",
] ]
@ -1845,7 +1873,7 @@ dependencies = [
"ethcore-network 1.8.0", "ethcore-network 1.8.0",
"ethcore-util 1.8.5", "ethcore-util 1.8.5",
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0", "kvdb-memorydb 0.1.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"native-contracts 0.1.0", "native-contracts 0.1.0",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1960,7 +1988,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "num_cpus" name = "num_cpus"
version = "1.6.2" version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2076,12 +2104,12 @@ dependencies = [
"ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)", "ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)",
"isatty 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "isatty 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"kvdb 0.1.0", "kvdb-rocksdb 0.1.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"migration 0.1.0", "migration 0.1.0",
"node-filter 1.8.0", "node-filter 1.8.0",
"node-health 0.1.0", "node-health 0.1.0",
"num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"number_prefix 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
"panic_hook 0.1.0", "panic_hook 0.1.0",
"parity-dapps 1.8.0", "parity-dapps 1.8.0",
@ -2223,6 +2251,7 @@ dependencies = [
"ethcore-util 1.8.5", "ethcore-util 1.8.5",
"ethkey 0.2.0", "ethkey 0.2.0",
"kvdb 0.1.0", "kvdb 0.1.0",
"kvdb-memorydb 0.1.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.2.0", "rlp 0.2.0",
"serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2280,7 +2309,7 @@ dependencies = [
"jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"kvdb 0.1.0", "kvdb-memorydb 0.1.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"macros 0.1.0", "macros 0.1.0",
"multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2693,7 +2722,7 @@ dependencies = [
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -2780,7 +2809,7 @@ dependencies = [
[[package]] [[package]]
name = "rocksdb" name = "rocksdb"
version = "0.4.5" version = "0.4.5"
source = "git+https://github.com/paritytech/rust-rocksdb#4364caec4dd5da1a1d78c39276774ee65bf55c7d" source = "git+https://github.com/paritytech/rust-rocksdb#166e14ed63cbd2e44b51267b8b98e4b89b0f236f"
dependencies = [ dependencies = [
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
"local-encoding 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "local-encoding 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2790,10 +2819,11 @@ dependencies = [
[[package]] [[package]]
name = "rocksdb-sys" name = "rocksdb-sys"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/paritytech/rust-rocksdb#4364caec4dd5da1a1d78c39276774ee65bf55c7d" source = "git+https://github.com/paritytech/rust-rocksdb#166e14ed63cbd2e44b51267b8b98e4b89b0f236f"
dependencies = [ dependencies = [
"gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
"snappy-sys 0.1.0 (git+https://github.com/paritytech/rust-snappy)",
] ]
[[package]] [[package]]
@ -3090,6 +3120,15 @@ dependencies = [
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "snappy-sys"
version = "0.1.0"
source = "git+https://github.com/paritytech/rust-snappy#858eac97192ea25d18d3f3626a8cc13ca0b175bb"
dependencies = [
"gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "spmc" name = "spmc"
version = "0.2.2" version = "0.2.2"
@ -3803,7 +3842,7 @@ dependencies = [
"checksum num-iter 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)" = "7485fcc84f85b4ecd0ea527b14189281cf27d60e583ae65ebc9c088b13dffe01" "checksum num-iter 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)" = "7485fcc84f85b4ecd0ea527b14189281cf27d60e583ae65ebc9c088b13dffe01"
"checksum num-rational 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "288629c76fac4b33556f4b7ab57ba21ae202da65ba8b77466e6d598e31990790" "checksum num-rational 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "288629c76fac4b33556f4b7ab57ba21ae202da65ba8b77466e6d598e31990790"
"checksum num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "99843c856d68d8b4313b03a17e33c4bb42ae8f6610ea81b28abe076ac721b9b0" "checksum num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "99843c856d68d8b4313b03a17e33c4bb42ae8f6610ea81b28abe076ac721b9b0"
"checksum num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "aec53c34f2d0247c5ca5d32cca1478762f301740468ee9ee6dcb7a0dd7a0c584" "checksum num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "514f0d73e64be53ff320680ca671b64fe3fb91da01e1ae2ddc99eb51d453b20d"
"checksum number_prefix 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "59a14be9c211cb9c602bad35ac99f41e9a84b44d71b8cbd3040e3bd02a214902" "checksum number_prefix 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "59a14be9c211cb9c602bad35ac99f41e9a84b44d71b8cbd3040e3bd02a214902"
"checksum odds 0.2.25 (registry+https://github.com/rust-lang/crates.io-index)" = "c3df9b730298cea3a1c3faa90b7e2f9df3a9c400d0936d6015e6165734eefcba" "checksum odds 0.2.25 (registry+https://github.com/rust-lang/crates.io-index)" = "c3df9b730298cea3a1c3faa90b7e2f9df3a9c400d0936d6015e6165734eefcba"
"checksum ole32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d2c49021782e5233cd243168edfa8037574afed4eba4bbaf538b3d8d1789d8c" "checksum ole32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d2c49021782e5233cd243168edfa8037574afed4eba4bbaf538b3d8d1789d8c"
@ -3888,6 +3927,7 @@ dependencies = [
"checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23"
"checksum smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c8cbcd6df1e117c2210e13ab5109635ad68a929fcbb8964dc965b76cb5ee013" "checksum smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c8cbcd6df1e117c2210e13ab5109635ad68a929fcbb8964dc965b76cb5ee013"
"checksum smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8fcd03faf178110ab0334d74ca9631d77f94c8c11cc77fcb59538abf0025695d" "checksum smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8fcd03faf178110ab0334d74ca9631d77f94c8c11cc77fcb59538abf0025695d"
"checksum snappy-sys 0.1.0 (git+https://github.com/paritytech/rust-snappy)" = "<none>"
"checksum spmc 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cd1f11d1fb5fd41834e55ce0b85a186efbf2f2afd9fdb09e2c8d72f9bff1ad1a" "checksum spmc 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cd1f11d1fb5fd41834e55ce0b85a186efbf2f2afd9fdb09e2c8d72f9bff1ad1a"
"checksum stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15132e0e364248108c5e2c02e3ab539be8d6f5d52a01ca9bbf27ed657316f02b" "checksum stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15132e0e364248108c5e2c02e3ab539be8d6f5d52a01ca9bbf27ed657316f02b"
"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694" "checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694"

View File

@ -63,7 +63,7 @@ path = { path = "util/path" }
panic_hook = { path = "panic_hook" } panic_hook = { path = "panic_hook" }
hash = { path = "util/hash" } hash = { path = "util/hash" }
migration = { path = "util/migration" } migration = { path = "util/migration" }
kvdb = { path = "util/kvdb" } kvdb-rocksdb = { path = "util/kvdb-rocksdb" }
parity-dapps = { path = "dapps", optional = true } parity-dapps = { path = "dapps", optional = true }
clippy = { version = "0.0.103", optional = true} clippy = { version = "0.0.103", optional = true}

View File

@ -56,6 +56,8 @@ rand = "0.3"
rlp = { path = "../util/rlp" } rlp = { path = "../util/rlp" }
rlp_derive = { path = "../util/rlp_derive" } rlp_derive = { path = "../util/rlp_derive" }
kvdb = { path = "../util/kvdb" } kvdb = { path = "../util/kvdb" }
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
kvdb-memorydb = { path = "../util/kvdb-memorydb" }
util-error = { path = "../util/error" } util-error = { path = "../util/error" }
snappy = { path = "../util/snappy" } snappy = { path = "../util/snappy" }
migration = { path = "../util/migration" } migration = { path = "../util/migration" }

View File

@ -40,6 +40,8 @@ stats = { path = "../../util/stats" }
hash = { path = "../../util/hash" } hash = { path = "../../util/hash" }
triehash = { path = "../../util/triehash" } triehash = { path = "../../util/triehash" }
kvdb = { path = "../../util/kvdb" } kvdb = { path = "../../util/kvdb" }
kvdb-rocksdb = { path = "../../util/kvdb-rocksdb" }
kvdb-memorydb = { path = "../../util/kvdb-memorydb" }
[features] [features]
default = [] default = []

View File

@ -728,13 +728,14 @@ mod tests {
use ethcore::header::Header; use ethcore::header::Header;
use ethcore::spec::Spec; use ethcore::spec::Spec;
use cache::Cache; use cache::Cache;
use kvdb::{in_memory, KeyValueDB}; use kvdb::KeyValueDB;
use kvdb_memorydb;
use time::Duration; use time::Duration;
use parking_lot::Mutex; use parking_lot::Mutex;
fn make_db() -> Arc<KeyValueDB> { fn make_db() -> Arc<KeyValueDB> {
Arc::new(in_memory(0)) Arc::new(kvdb_memorydb::create(0))
} }
#[test] #[test]

View File

@ -36,7 +36,8 @@ use bigint::prelude::U256;
use bigint::hash::H256; use bigint::hash::H256;
use futures::{IntoFuture, Future}; use futures::{IntoFuture, Future};
use kvdb::{KeyValueDB, CompactionProfile}; use kvdb::KeyValueDB;
use kvdb_rocksdb::CompactionProfile;
use self::fetch::ChainDataFetcher; use self::fetch::ChainDataFetcher;
use self::header_chain::{AncestryIter, HeaderChain}; use self::header_chain::{AncestryIter, HeaderChain};
@ -214,7 +215,7 @@ impl<T: ChainDataFetcher> Client<T> {
io_channel: IoChannel<ClientIoMessage>, io_channel: IoChannel<ClientIoMessage>,
cache: Arc<Mutex<Cache>> cache: Arc<Mutex<Cache>>
) -> Self { ) -> Self {
let db = ::kvdb::in_memory(0); let db = ::kvdb_memorydb::create(0);
Client::new( Client::new(
config, config,

View File

@ -25,7 +25,7 @@ use ethcore::db;
use ethcore::service::ClientIoMessage; use ethcore::service::ClientIoMessage;
use ethcore::spec::Spec; use ethcore::spec::Spec;
use io::{IoContext, IoError, IoHandler, IoService}; use io::{IoContext, IoError, IoHandler, IoService};
use kvdb::{Database, DatabaseConfig}; use kvdb_rocksdb::{Database, DatabaseConfig};
use cache::Cache; use cache::Cache;
use parking_lot::Mutex; use parking_lot::Mutex;
@ -63,11 +63,7 @@ impl<T: ChainDataFetcher> Service<T> {
// initialize database. // initialize database.
let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS); let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS);
// give all rocksdb cache to the header chain column. db_config.memory_budget = config.db_cache_size;
if let Some(size) = config.db_cache_size {
db_config.set_cache(db::COL_LIGHT_CHAIN, size);
}
db_config.compaction = config.db_compaction; db_config.compaction = config.db_compaction;
db_config.wal = config.db_wal; db_config.wal = config.db_wal;

View File

@ -92,6 +92,8 @@ extern crate vm;
extern crate hash; extern crate hash;
extern crate triehash; extern crate triehash;
extern crate kvdb; extern crate kvdb;
extern crate kvdb_memorydb;
extern crate kvdb_rocksdb;
#[cfg(feature = "ipc")] #[cfg(feature = "ipc")]
extern crate ethcore_ipc as ipc; extern crate ethcore_ipc as ipc;

View File

@ -11,10 +11,12 @@ ethcore = { path = ".."}
ethcore-util = { path = "../../util" } ethcore-util = { path = "../../util" }
ethcore-bigint = { path = "../../util/bigint" } ethcore-bigint = { path = "../../util/bigint" }
ethcore-bytes = { path = "../../util/bytes" } ethcore-bytes = { path = "../../util/bytes" }
ethcore-io = { path = "../../util/io" }
ethcore-network = { path = "../../util/network" } ethcore-network = { path = "../../util/network" }
kvdb = { path = "../../util/kvdb" }
native-contracts = { path = "../native_contracts" } native-contracts = { path = "../native_contracts" }
futures = "0.1" futures = "0.1"
log = "0.3" log = "0.3"
parking_lot = "0.4" parking_lot = "0.4"
[dev-dependencies]
kvdb-memorydb = { path = "../../util/kvdb-memorydb" }
ethcore-io = { path = "../../util/io" }

View File

@ -24,9 +24,14 @@ extern crate ethcore_network as network;
extern crate native_contracts; extern crate native_contracts;
extern crate futures; extern crate futures;
extern crate parking_lot; extern crate parking_lot;
extern crate kvdb;
#[cfg(test)] extern crate ethcore_io as io; #[macro_use]
#[macro_use] extern crate log; extern crate log;
#[cfg(test)]
extern crate kvdb_memorydb;
#[cfg(test)]
extern crate ethcore_io as io;
use std::sync::Weak; use std::sync::Weak;
use std::collections::HashMap; use std::collections::HashMap;
@ -135,7 +140,7 @@ mod test {
let contract_addr = Address::from_str("0000000000000000000000000000000000000005").unwrap(); let contract_addr = Address::from_str("0000000000000000000000000000000000000005").unwrap();
let data = include_bytes!("../res/node_filter.json"); let data = include_bytes!("../res/node_filter.json");
let spec = Spec::load(&::std::env::temp_dir(), &data[..]).unwrap(); let spec = Spec::load(&::std::env::temp_dir(), &data[..]).unwrap();
let client_db = Arc::new(::kvdb::in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0))); let client_db = Arc::new(::kvdb_memorydb::create(::ethcore::db::NUM_COLUMNS.unwrap_or(0)));
let client = Client::new( let client = Client::new(
ClientConfig::default(), ClientConfig::default(),

View File

@ -1479,7 +1479,8 @@ mod tests {
use std::sync::Arc; use std::sync::Arc;
use rustc_hex::FromHex; use rustc_hex::FromHex;
use hash::keccak; use hash::keccak;
use kvdb::{in_memory, KeyValueDB}; use kvdb::KeyValueDB;
use kvdb_memorydb;
use bigint::hash::*; use bigint::hash::*;
use receipt::{Receipt, TransactionOutcome}; use receipt::{Receipt, TransactionOutcome};
use blockchain::{BlockProvider, BlockChain, Config, ImportRoute}; use blockchain::{BlockProvider, BlockChain, Config, ImportRoute};
@ -1493,7 +1494,7 @@ mod tests {
use header::BlockNumber; use header::BlockNumber;
fn new_db() -> Arc<KeyValueDB> { fn new_db() -> Arc<KeyValueDB> {
Arc::new(in_memory(::db::NUM_COLUMNS.unwrap_or(0))) Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)))
} }
fn new_chain(genesis: &[u8], db: Arc<KeyValueDB>) -> BlockChain { fn new_chain(genesis: &[u8], db: Arc<KeyValueDB>) -> BlockChain {

View File

@ -23,8 +23,6 @@ pub struct Config {
pub pref_cache_size: usize, pub pref_cache_size: usize,
/// Maximum cache size in bytes. /// Maximum cache size in bytes.
pub max_cache_size: usize, pub max_cache_size: usize,
/// Backing db cache_size
pub db_cache_size: Option<usize>,
} }
impl Default for Config { impl Default for Config {
@ -32,8 +30,6 @@ impl Default for Config {
Config { Config {
pref_cache_size: 1 << 14, pref_cache_size: 1 << 14,
max_cache_size: 1 << 20, max_cache_size: 1 << 20,
db_cache_size: None,
} }
} }
} }

View File

@ -21,7 +21,7 @@ use std::fmt::{Display, Formatter, Error as FmtError};
use mode::Mode as IpcMode; use mode::Mode as IpcMode;
use verification::{VerifierType, QueueConfig}; use verification::{VerifierType, QueueConfig};
use util::journaldb; use util::journaldb;
use kvdb::CompactionProfile; use kvdb_rocksdb::CompactionProfile;
pub use std::time::Duration; pub use std::time::Duration;
pub use blockchain::Config as BlockChainConfig; pub use blockchain::Config as BlockChainConfig;
@ -141,7 +141,7 @@ pub struct ClientConfig {
pub pruning: journaldb::Algorithm, pub pruning: journaldb::Algorithm,
/// The name of the client instance. /// The name of the client instance.
pub name: String, pub name: String,
/// RocksDB state column cache-size if not default /// RocksDB column cache-size if not default
pub db_cache_size: Option<usize>, pub db_cache_size: Option<usize>,
/// State db compaction profile /// State db compaction profile
pub db_compaction: DatabaseCompactionProfile, pub db_compaction: DatabaseCompactionProfile,

View File

@ -21,8 +21,7 @@ use std::sync::Arc;
use bigint::prelude::U256; use bigint::prelude::U256;
use bigint::hash::H256; use bigint::hash::H256;
use util::journaldb; use util::journaldb;
use trie; use {trie, kvdb_memorydb, bytes};
use bytes;
use kvdb::{self, KeyValueDB}; use kvdb::{self, KeyValueDB};
use {state, state_db, client, executive, trace, transaction, db, spec, pod_state}; use {state, state_db, client, executive, trace, transaction, db, spec, pod_state};
use factory::Factories; use factory::Factories;
@ -128,7 +127,7 @@ impl<'a> EvmTestClient<'a> {
} }
fn state_from_spec(spec: &'a spec::Spec, factories: &Factories) -> Result<state::State<state_db::StateDB>, EvmTestError> { fn state_from_spec(spec: &'a spec::Spec, factories: &Factories) -> Result<state::State<state_db::StateDB>, EvmTestError> {
let db = Arc::new(kvdb::in_memory(db::NUM_COLUMNS.expect("We use column-based DB; qed"))); let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE); let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
let mut state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024); let mut state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
state_db = spec.ensure_db_good(state_db, factories)?; state_db = spec.ensure_db_good(state_db, factories)?;
@ -150,7 +149,7 @@ impl<'a> EvmTestClient<'a> {
} }
fn state_from_pod(spec: &'a spec::Spec, factories: &Factories, pod_state: pod_state::PodState) -> Result<state::State<state_db::StateDB>, EvmTestError> { fn state_from_pod(spec: &'a spec::Spec, factories: &Factories, pod_state: pod_state::PodState) -> Result<state::State<state_db::StateDB>, EvmTestError> {
let db = Arc::new(kvdb::in_memory(db::NUM_COLUMNS.expect("We use column-based DB; qed"))); let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE); let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
let state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024); let state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
let mut state = state::State::new( let mut state = state::State::new(

View File

@ -27,7 +27,7 @@ use bigint::prelude::U256;
use bigint::hash::H256; use bigint::hash::H256;
use parking_lot::RwLock; use parking_lot::RwLock;
use util::*; use util::*;
use kvdb::{Database, DatabaseConfig}; use kvdb_rocksdb::{Database, DatabaseConfig};
use bytes::Bytes; use bytes::Bytes;
use rlp::*; use rlp::*;
use ethkey::{Generator, Random}; use ethkey::{Generator, Random};

View File

@ -57,7 +57,7 @@ pub fn json_chain_test(json_data: &[u8]) -> Vec<String> {
}; };
{ {
let db = Arc::new(::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); let db = Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
let mut config = ClientConfig::default(); let mut config = ClientConfig::default();
config.history = 8; config.history = 8;
let client = Client::new( let client = Client::new(

View File

@ -113,6 +113,8 @@ extern crate ansi_term;
extern crate semantic_version; extern crate semantic_version;
extern crate unexpected; extern crate unexpected;
extern crate kvdb; extern crate kvdb;
extern crate kvdb_rocksdb;
extern crate kvdb_memorydb;
extern crate util_error; extern crate util_error;
extern crate snappy; extern crate snappy;
extern crate migration; extern crate migration;

View File

@ -22,7 +22,7 @@ use std::collections::HashMap;
use bigint::hash::H256; use bigint::hash::H256;
use util::Address; use util::Address;
use bytes::Bytes; use bytes::Bytes;
use kvdb::Database; use kvdb_rocksdb::Database;
use migration::{Batch, Config, Error, Migration, SimpleMigration, Progress}; use migration::{Batch, Config, Error, Migration, SimpleMigration, Progress};
use hash::keccak; use hash::keccak;
use std::sync::Arc; use std::sync::Arc;

View File

@ -26,7 +26,8 @@ use migration::{Error, Migration, Progress, Batch, Config};
use util::journaldb; use util::journaldb;
use bigint::hash::H256; use bigint::hash::H256;
use trie::Trie; use trie::Trie;
use kvdb::{Database, DBTransaction}; use kvdb::DBTransaction;
use kvdb_rocksdb::Database;
/// Account bloom upgrade routine. If bloom already present, does nothing. /// Account bloom upgrade routine. If bloom already present, does nothing.
/// If database empty (no best block), does nothing. /// If database empty (no best block), does nothing.

View File

@ -18,7 +18,7 @@
//! This migration consolidates all databases into single one using Column Families. //! This migration consolidates all databases into single one using Column Families.
use rlp::{Rlp, RlpStream}; use rlp::{Rlp, RlpStream};
use kvdb::Database; use kvdb_rocksdb::Database;
use migration::{Batch, Config, Error, Migration, Progress}; use migration::{Batch, Config, Error, Migration, Progress};
use std::sync::Arc; use std::sync::Arc;

View File

@ -19,7 +19,8 @@
use std::sync::Arc; use std::sync::Arc;
use std::path::Path; use std::path::Path;
use bigint::hash::H256; use bigint::hash::H256;
use kvdb::{Database, DatabaseConfig, KeyValueDB}; use kvdb::KeyValueDB;
use kvdb_rocksdb::{Database, DatabaseConfig};
use bytes::Bytes; use bytes::Bytes;
use io::*; use io::*;
use spec::Spec; use spec::Spec;
@ -82,12 +83,7 @@ impl ClientService {
let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
// give all rocksdb cache to state column; everything else has its db_config.memory_budget = config.db_cache_size;
// own caches.
if let Some(size) = config.db_cache_size {
db_config.set_cache(::db::COL_STATE, size);
}
db_config.compaction = config.db_compaction.compaction_profile(client_path); db_config.compaction = config.db_compaction.compaction_profile(client_path);
db_config.wal = config.db_wal; db_config.wal = config.db_wal;

View File

@ -40,7 +40,7 @@ use parking_lot::{Mutex, RwLock, RwLockReadGuard};
use util_error::UtilError; use util_error::UtilError;
use bytes::Bytes; use bytes::Bytes;
use util::journaldb::Algorithm; use util::journaldb::Algorithm;
use kvdb::{Database, DatabaseConfig}; use kvdb_rocksdb::{Database, DatabaseConfig};
use snappy; use snappy;
/// Helper for removing directories in case of error. /// Helper for removing directories in case of error.
@ -682,7 +682,7 @@ mod tests {
#[test] #[test]
fn cannot_finish_with_invalid_chunks() { fn cannot_finish_with_invalid_chunks() {
use bigint::hash::H256; use bigint::hash::H256;
use kvdb::DatabaseConfig; use kvdb_rocksdb::DatabaseConfig;
let spec = get_test_spec(); let spec = get_test_spec();
let dir = RandomTempPath::new(); let dir = RandomTempPath::new();

View File

@ -31,7 +31,7 @@ use tests::helpers;
use transaction::{Transaction, Action, SignedTransaction}; use transaction::{Transaction, Action, SignedTransaction};
use util::Address; use util::Address;
use kvdb; use kvdb_memorydb;
const PASS: &'static str = ""; const PASS: &'static str = "";
const TRANSITION_BLOCK_1: usize = 2; // block at which the contract becomes activated. const TRANSITION_BLOCK_1: usize = 2; // block at which the contract becomes activated.
@ -238,7 +238,7 @@ fn fixed_to_contract_only() {
assert_eq!(client.chain_info().best_block_number, 11); assert_eq!(client.chain_info().best_block_number, 11);
let reader = snapshot_helpers::snap(&*client); let reader = snapshot_helpers::snap(&*client);
let new_db = kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)); let new_db = kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0));
let spec = spec_fixed_to_contract(); let spec = spec_fixed_to_contract();
// ensure fresh engine's step matches. // ensure fresh engine's step matches.
@ -270,7 +270,7 @@ fn fixed_to_contract_to_contract() {
assert_eq!(client.chain_info().best_block_number, 16); assert_eq!(client.chain_info().best_block_number, 16);
let reader = snapshot_helpers::snap(&*client); let reader = snapshot_helpers::snap(&*client);
let new_db = kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)); let new_db = kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0));
let spec = spec_fixed_to_contract(); let spec = spec_fixed_to_contract();
for _ in 0..16 { spec.engine.step() } for _ in 0..16 { spec.engine.step() }

View File

@ -26,7 +26,8 @@ use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
use parking_lot::Mutex; use parking_lot::Mutex;
use snappy; use snappy;
use kvdb::{self, KeyValueDB, DBTransaction}; use kvdb::{KeyValueDB, DBTransaction};
use kvdb_memorydb;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
@ -43,7 +44,7 @@ fn chunk_and_restore(amount: u64) {
let mut snapshot_path = new_path.as_path().to_owned(); let mut snapshot_path = new_path.as_path().to_owned();
snapshot_path.push("SNAP"); snapshot_path.push("SNAP");
let old_db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); let old_db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
let bc = BlockChain::new(Default::default(), &genesis, old_db.clone()); let bc = BlockChain::new(Default::default(), &genesis, old_db.clone());
// build the blockchain. // build the blockchain.
@ -80,7 +81,7 @@ fn chunk_and_restore(amount: u64) {
writer.into_inner().finish(manifest.clone()).unwrap(); writer.into_inner().finish(manifest.clone()).unwrap();
// restore it. // restore it.
let new_db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); let new_db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone()); let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone());
let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap(); let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap();
@ -127,7 +128,7 @@ fn checks_flag() {
let chunk = stream.out(); let chunk = stream.out();
let db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); let db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
let engine = ::spec::Spec::new_test().engine; let engine = ::spec::Spec::new_test().engine;
let chain = BlockChain::new(Default::default(), &genesis, db.clone()); let chain = BlockChain::new(Default::default(), &genesis, db.clone());

View File

@ -27,7 +27,7 @@ use tests::helpers::generate_dummy_client_with_spec_and_data;
use devtools::RandomTempPath; use devtools::RandomTempPath;
use io::IoChannel; use io::IoChannel;
use kvdb::{Database, DatabaseConfig}; use kvdb_rocksdb::{Database, DatabaseConfig};
struct NoopDBRestore; struct NoopDBRestore;

View File

@ -27,7 +27,7 @@ use error::Error;
use rand::{XorShiftRng, SeedableRng}; use rand::{XorShiftRng, SeedableRng};
use bigint::hash::H256; use bigint::hash::H256;
use util::journaldb::{self, Algorithm}; use util::journaldb::{self, Algorithm};
use kvdb::{Database, DatabaseConfig}; use kvdb_rocksdb::{Database, DatabaseConfig};
use memorydb::MemoryDB; use memorydb::MemoryDB;
use parking_lot::Mutex; use parking_lot::Mutex;
use devtools::RandomTempPath; use devtools::RandomTempPath;

View File

@ -672,13 +672,13 @@ impl Spec {
pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> { pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> {
use transaction::{Action, Transaction}; use transaction::{Action, Transaction};
use util::journaldb; use util::journaldb;
use kvdb; use kvdb_memorydb;
let genesis = self.genesis_header(); let genesis = self.genesis_header();
let factories = Default::default(); let factories = Default::default();
let mut db = journaldb::new( let mut db = journaldb::new(
Arc::new(kvdb::in_memory(0)), Arc::new(kvdb_memorydb::create(0)),
journaldb::Algorithm::Archive, journaldb::Algorithm::Archive,
None, None,
); );

View File

@ -27,7 +27,7 @@ use tests::helpers::*;
use types::filter::Filter; use types::filter::Filter;
use bigint::prelude::U256; use bigint::prelude::U256;
use util::*; use util::*;
use kvdb::{Database, DatabaseConfig}; use kvdb_rocksdb::{Database, DatabaseConfig};
use devtools::*; use devtools::*;
use miner::Miner; use miner::Miner;
use spec::Spec; use spec::Spec;

View File

@ -232,7 +232,7 @@ pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> Arc<Client> {
} }
fn new_db() -> Arc<::kvdb::KeyValueDB> { fn new_db() -> Arc<::kvdb::KeyValueDB> {
Arc::new(::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))) Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)))
} }
pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain { pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain {

View File

@ -27,7 +27,7 @@ use client::*;
use tests::helpers::*; use tests::helpers::*;
use devtools::RandomTempPath; use devtools::RandomTempPath;
use client::{BlockChainClient, Client, ClientConfig}; use client::{BlockChainClient, Client, ClientConfig};
use kvdb::{Database, DatabaseConfig}; use kvdb_rocksdb::{Database, DatabaseConfig};
use std::sync::Arc; use std::sync::Arc;
use header::Header; use header::Header;
use miner::Miner; use miner::Miner;

View File

@ -416,7 +416,8 @@ mod tests {
use bigint::prelude::U256; use bigint::prelude::U256;
use bigint::hash::H256; use bigint::hash::H256;
use util::Address; use util::Address;
use kvdb::{DBTransaction, in_memory, KeyValueDB}; use kvdb::{DBTransaction, KeyValueDB};
use kvdb_memorydb;
use header::BlockNumber; use header::BlockNumber;
use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest}; use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest};
use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError}; use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError};
@ -467,7 +468,7 @@ mod tests {
} }
fn new_db() -> Arc<KeyValueDB> { fn new_db() -> Arc<KeyValueDB> {
Arc::new(in_memory(::db::NUM_COLUMNS.unwrap_or(0))) Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)))
} }
#[test] #[test]

View File

@ -178,7 +178,7 @@ mod test {
"#; "#;
let spec = Spec::load(&::std::env::temp_dir(), spec_data.as_bytes()).unwrap(); let spec = Spec::load(&::std::env::temp_dir(), spec_data.as_bytes()).unwrap();
let client_db = Arc::new(::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); let client_db = Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
let client = Client::new( let client = Client::new(
ClientConfig::default(), ClientConfig::default(),

View File

@ -14,4 +14,7 @@ serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
serde_json = "1.0" serde_json = "1.0"
log = "0.3" log = "0.3"
[dev-dependencies]
ethkey = { path = "../ethkey" } ethkey = { path = "../ethkey" }
kvdb-memorydb = { path = "../util/kvdb-memorydb" }

View File

@ -44,6 +44,8 @@ extern crate log;
#[cfg(test)] #[cfg(test)]
extern crate ethkey; extern crate ethkey;
#[cfg(test)]
extern crate kvdb_memorydb;
const LOCAL_TRANSACTIONS_KEY: &'static [u8] = &*b"LOCAL_TXS"; const LOCAL_TRANSACTIONS_KEY: &'static [u8] = &*b"LOCAL_TXS";
@ -243,7 +245,7 @@ mod tests {
#[test] #[test]
fn twice_empty() { fn twice_empty() {
let db = Arc::new(::kvdb::in_memory(0)); let db = Arc::new(::kvdb_memorydb::create(0));
{ {
let store = super::create(db.clone(), None, Dummy(vec![])); let store = super::create(db.clone(), None, Dummy(vec![]));
@ -272,7 +274,7 @@ mod tests {
PendingTransaction::new(signed, condition) PendingTransaction::new(signed, condition)
}).collect(); }).collect();
let db = Arc::new(::kvdb::in_memory(0)); let db = Arc::new(::kvdb_memorydb::create(0));
{ {
// nothing written yet, will write pending. // nothing written yet, will write pending.
@ -311,7 +313,7 @@ mod tests {
PendingTransaction::new(signed, None) PendingTransaction::new(signed, None)
}); });
let db = Arc::new(::kvdb::in_memory(0)); let db = Arc::new(::kvdb_memorydb::create(0));
{ {
// nothing written, will write bad. // nothing written, will write bad.
let store = super::create(db.clone(), None, Dummy(transactions.clone())); let store = super::create(db.clone(), None, Dummy(transactions.clone()));

View File

@ -17,8 +17,10 @@
use std::cmp::max; use std::cmp::max;
const MIN_BC_CACHE_MB: u32 = 4; const MIN_BC_CACHE_MB: u32 = 4;
const MIN_DB_CACHE_MB: u32 = 2; const MIN_DB_CACHE_MB: u32 = 8;
const MIN_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 16; const MIN_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 16;
const DEFAULT_DB_CACHE_SIZE: u32 = 128;
const DEFAULT_BC_CACHE_SIZE: u32 = 8;
const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 40; const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 40;
const DEFAULT_TRACE_CACHE_SIZE: u32 = 20; const DEFAULT_TRACE_CACHE_SIZE: u32 = 20;
const DEFAULT_STATE_CACHE_SIZE: u32 = 25; const DEFAULT_STATE_CACHE_SIZE: u32 = 25;
@ -41,7 +43,11 @@ pub struct CacheConfig {
impl Default for CacheConfig { impl Default for CacheConfig {
fn default() -> Self { fn default() -> Self {
CacheConfig::new(32, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, DEFAULT_STATE_CACHE_SIZE) CacheConfig::new(
DEFAULT_DB_CACHE_SIZE,
DEFAULT_BC_CACHE_SIZE,
DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB,
DEFAULT_STATE_CACHE_SIZE)
} }
} }
@ -68,14 +74,9 @@ impl CacheConfig {
} }
} }
/// Size of db cache for blockchain. /// Size of db cache.
pub fn db_blockchain_cache_size(&self) -> u32 { pub fn db_cache_size(&self) -> u32 {
max(MIN_DB_CACHE_MB, self.db / 4) max(MIN_DB_CACHE_MB, self.db)
}
/// Size of db cache for state.
pub fn db_state_cache_size(&self) -> u32 {
max(MIN_DB_CACHE_MB, self.db * 3 / 4)
} }
/// Size of block queue size limit /// Size of block queue size limit
@ -122,13 +123,16 @@ mod tests {
fn test_cache_config_db_cache_sizes() { fn test_cache_config_db_cache_sizes() {
let config = CacheConfig::new_with_total_cache_size(400); let config = CacheConfig::new_with_total_cache_size(400);
assert_eq!(config.db, 280); assert_eq!(config.db, 280);
assert_eq!(config.db_blockchain_cache_size(), 70); assert_eq!(config.db_cache_size(), 280);
assert_eq!(config.db_state_cache_size(), 210);
} }
#[test] #[test]
fn test_cache_config_default() { fn test_cache_config_default() {
assert_eq!(CacheConfig::default(), assert_eq!(CacheConfig::default(),
CacheConfig::new(32, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, super::DEFAULT_STATE_CACHE_SIZE)); CacheConfig::new(
super::DEFAULT_DB_CACHE_SIZE,
super::DEFAULT_BC_CACHE_SIZE,
super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB,
super::DEFAULT_STATE_CACHE_SIZE));
} }
} }

View File

@ -767,7 +767,7 @@ usage! {
"--pruning-memory=[MB]", "--pruning-memory=[MB]",
"The ideal amount of memory in megabytes to use to store recent states. As many states as possible will be kept within this limit, and at least --pruning-history states will always be kept.", "The ideal amount of memory in megabytes to use to store recent states. As many states as possible will be kept within this limit, and at least --pruning-history states will always be kept.",
ARG arg_cache_size_db: (u32) = 32u32, or |c: &Config| otry!(c.footprint).cache_size_db.clone(), ARG arg_cache_size_db: (u32) = 128u32, or |c: &Config| otry!(c.footprint).cache_size_db.clone(),
"--cache-size-db=[MB]", "--cache-size-db=[MB]",
"Override database cache size.", "Override database cache size.",
@ -1776,7 +1776,7 @@ mod tests {
pruning_memory: None, pruning_memory: None,
fast_and_loose: None, fast_and_loose: None,
cache_size: None, cache_size: None,
cache_size_db: Some(128), cache_size_db: Some(256),
cache_size_blocks: Some(16), cache_size_blocks: Some(16),
cache_size_queue: Some(100), cache_size_queue: Some(100),
cache_size_state: Some(25), cache_size_state: Some(25),

View File

@ -63,7 +63,7 @@ tx_queue_gas = "off"
tracing = "on" tracing = "on"
pruning = "fast" pruning = "fast"
pruning_history = 64 pruning_history = 64
cache_size_db = 128 cache_size_db = 256
cache_size_blocks = 16 cache_size_blocks = 16
cache_size_queue = 100 cache_size_queue = 100
cache_size_state = 25 cache_size_state = 25

View File

@ -21,7 +21,7 @@ use std::fs::File;
use bigint::prelude::U256; use bigint::prelude::U256;
use bigint::hash::clean_0x; use bigint::hash::clean_0x;
use util::Address; use util::Address;
use kvdb::CompactionProfile; use kvdb_rocksdb::CompactionProfile;
use util::journaldb::Algorithm; use util::journaldb::Algorithm;
use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType}; use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType};
use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy}; use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy};
@ -239,10 +239,8 @@ pub fn to_client_config(
client_config.blockchain.max_cache_size = cache_config.blockchain() as usize * mb; client_config.blockchain.max_cache_size = cache_config.blockchain() as usize * mb;
// in bytes // in bytes
client_config.blockchain.pref_cache_size = cache_config.blockchain() as usize * 3 / 4 * mb; client_config.blockchain.pref_cache_size = cache_config.blockchain() as usize * 3 / 4 * mb;
// db blockchain cache size, in megabytes // db cache size, in megabytes
client_config.blockchain.db_cache_size = Some(cache_config.db_blockchain_cache_size() as usize); client_config.db_cache_size = Some(cache_config.db_cache_size() as usize);
// db state cache size, in megabytes
client_config.db_cache_size = Some(cache_config.db_state_cache_size() as usize);
// db queue cache size, in bytes // db queue cache size, in bytes
client_config.queue.max_mem_use = cache_config.queue() as usize * mb; client_config.queue.max_mem_use = cache_config.queue() as usize * mb;
// in bytes // in bytes

View File

@ -62,7 +62,7 @@ extern crate ethcore_bigint as bigint;
extern crate ethcore_bytes as bytes; extern crate ethcore_bytes as bytes;
extern crate ethcore_network as network; extern crate ethcore_network as network;
extern crate migration as migr; extern crate migration as migr;
extern crate kvdb; extern crate kvdb_rocksdb;
extern crate ethkey; extern crate ethkey;
extern crate ethsync; extern crate ethsync;
extern crate node_health; extern crate node_health;

View File

@ -22,7 +22,7 @@ use std::fmt::{Display, Formatter, Error as FmtError};
use std::sync::Arc; use std::sync::Arc;
use util::journaldb::Algorithm; use util::journaldb::Algorithm;
use migr::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError, Migration}; use migr::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError, Migration};
use kvdb::{CompactionProfile, Database, DatabaseConfig}; use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig};
use ethcore::migrations; use ethcore::migrations;
use ethcore::db; use ethcore::db;
use ethcore::migrations::Extract; use ethcore::migrations::Extract;
@ -167,7 +167,7 @@ fn consolidate_database(
let config = default_migration_settings(compaction_profile); let config = default_migration_settings(compaction_profile);
let mut db_config = DatabaseConfig { let mut db_config = DatabaseConfig {
max_open_files: 64, max_open_files: 64,
cache_sizes: Default::default(), memory_budget: None,
compaction: config.compaction_profile, compaction: config.compaction_profile,
columns: None, columns: None,
wal: true, wal: true,
@ -283,7 +283,7 @@ mod legacy {
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use util::journaldb::Algorithm; use util::journaldb::Algorithm;
use migr::{Manager as MigrationManager}; use migr::{Manager as MigrationManager};
use kvdb::CompactionProfile; use kvdb_rocksdb::CompactionProfile;
use ethcore::migrations; use ethcore::migrations;
/// Blocks database path. /// Blocks database path.

View File

@ -57,7 +57,6 @@ rlp = { path = "../util/rlp" }
stats = { path = "../util/stats" } stats = { path = "../util/stats" }
vm = { path = "../ethcore/vm" } vm = { path = "../ethcore/vm" }
hash = { path = "../util/hash" } hash = { path = "../util/hash" }
kvdb = { path = "../util/kvdb" }
hardware-wallet = { path = "../hw" } hardware-wallet = { path = "../hw" }
clippy = { version = "0.0.103", optional = true} clippy = { version = "0.0.103", optional = true}
@ -66,6 +65,7 @@ pretty_assertions = "0.1"
[dev-dependencies] [dev-dependencies]
macros = { path = "../util/macros" } macros = { path = "../util/macros" }
ethcore-network = { path = "../util/network" } ethcore-network = { path = "../util/network" }
kvdb-memorydb = { path = "../util/kvdb-memorydb" }
[features] [features]
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"]

View File

@ -65,7 +65,6 @@ extern crate rlp;
extern crate stats; extern crate stats;
extern crate hash; extern crate hash;
extern crate hardware_wallet; extern crate hardware_wallet;
extern crate kvdb;
#[macro_use] #[macro_use]
extern crate log; extern crate log;
@ -85,6 +84,9 @@ extern crate pretty_assertions;
#[macro_use] #[macro_use]
extern crate macros; extern crate macros;
#[cfg(test)]
extern crate kvdb_memorydb;
pub extern crate jsonrpc_ws_server as ws; pub extern crate jsonrpc_ws_server as ws;
mod authcodes; mod authcodes;

View File

@ -33,7 +33,7 @@ use io::IoChannel;
use bigint::prelude::U256; use bigint::prelude::U256;
use bigint::hash::H256; use bigint::hash::H256;
use util::Address; use util::Address;
use kvdb::in_memory; use kvdb_memorydb;
use jsonrpc_core::IoHandler; use jsonrpc_core::IoHandler;
use v1::impls::{EthClient, SigningUnsafeClient}; use v1::impls::{EthClient, SigningUnsafeClient};
@ -131,7 +131,7 @@ impl EthTester {
let client = Client::new( let client = Client::new(
ClientConfig::default(), ClientConfig::default(),
&spec, &spec,
Arc::new(in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0))), Arc::new(kvdb_memorydb::create(::ethcore::db::NUM_COLUMNS.unwrap_or(0))),
miner_service.clone(), miner_service.clone(),
IoChannel::disconnected(), IoChannel::disconnected(),
).unwrap(); ).unwrap();

View File

@ -33,6 +33,7 @@ ethcore-devtools = { path = "../devtools" }
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
ethcore-bigint = { path = "../util/bigint" } ethcore-bigint = { path = "../util/bigint" }
kvdb = { path = "../util/kvdb" } kvdb = { path = "../util/kvdb" }
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
hash = { path = "../util/hash" } hash = { path = "../util/hash" }
ethcore-ipc = { path = "../ipc/rpc" } ethcore-ipc = { path = "../ipc/rpc" }
ethcore-ipc-nano = { path = "../ipc/nano" } ethcore-ipc-nano = { path = "../ipc/nano" }

View File

@ -18,7 +18,7 @@ use std::path::PathBuf;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use serde_json; use serde_json;
use ethkey::{Secret, Public}; use ethkey::{Secret, Public};
use kvdb::{Database, DatabaseIterator}; use kvdb_rocksdb::{Database, DatabaseIterator};
use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId}; use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId};
use serialization::{SerializablePublic, SerializableSecret}; use serialization::{SerializablePublic, SerializableSecret};
@ -293,7 +293,7 @@ pub mod tests {
use serde_json; use serde_json;
use devtools::RandomTempPath; use devtools::RandomTempPath;
use ethkey::{Random, Generator, Public, Secret}; use ethkey::{Random, Generator, Public, Secret};
use kvdb::Database; use kvdb_rocksdb::Database;
use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId}; use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId};
use super::{DB_META_KEY_VERSION, CURRENT_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare, use super::{DB_META_KEY_VERSION, CURRENT_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare,
SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1, SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1,

View File

@ -49,6 +49,7 @@ extern crate ethkey;
extern crate native_contracts; extern crate native_contracts;
extern crate hash; extern crate hash;
extern crate kvdb; extern crate kvdb;
extern crate kvdb_rocksdb;
mod key_server_cluster; mod key_server_cluster;
mod types; mod types;

View File

@ -34,11 +34,14 @@ ethcore-ipc = { path = "../ipc/rpc" }
semver = "0.6" semver = "0.6"
smallvec = { version = "0.4", features = ["heapsizeof"] } smallvec = { version = "0.4", features = ["heapsizeof"] }
ethcore-ipc-nano = { path = "../ipc/nano" } ethcore-ipc-nano = { path = "../ipc/nano" }
ethcore-devtools = { path = "../devtools" }
ethkey = { path = "../ethkey" }
parking_lot = "0.4" parking_lot = "0.4"
ipnetwork = "0.12.6" ipnetwork = "0.12.6"
[dev-dependencies]
ethkey = { path = "../ethkey" }
kvdb-memorydb = { path = "../util/kvdb-memorydb" }
ethcore-devtools = { path = "../devtools" }
[features] [features]
default = [] default = []
dev = ["clippy", "ethcore/dev", "ethcore-util/dev"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev"]

View File

@ -48,6 +48,7 @@ extern crate ethcore_light as light;
#[cfg(test)] extern crate ethcore_devtools as devtools; #[cfg(test)] extern crate ethcore_devtools as devtools;
#[cfg(test)] extern crate ethkey; #[cfg(test)] extern crate ethkey;
#[cfg(test)] extern crate kvdb_memorydb;
#[macro_use] #[macro_use]
extern crate macros; extern crate macros;

View File

@ -291,7 +291,7 @@ impl TestNet<EthPeer<EthcoreClient>> {
let client = EthcoreClient::new( let client = EthcoreClient::new(
ClientConfig::default(), ClientConfig::default(),
&spec, &spec,
Arc::new(::kvdb::in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0))), Arc::new(::kvdb_memorydb::create(::ethcore::db::NUM_COLUMNS.unwrap_or(0))),
Arc::new(Miner::with_spec_and_accounts(&spec, accounts)), Arc::new(Miner::with_spec_and_accounts(&spec, accounts)),
IoChannel::disconnected(), IoChannel::disconnected(),
).unwrap(); ).unwrap();

View File

@ -35,6 +35,9 @@ memorydb = { path = "memorydb" }
util-error = { path = "error" } util-error = { path = "error" }
kvdb = { path = "kvdb" } kvdb = { path = "kvdb" }
[dev-dependencies]
kvdb-memorydb = { path = "kvdb-memorydb" }
[features] [features]
default = [] default = []
dev = ["clippy"] dev = ["clippy"]

View File

@ -0,0 +1,9 @@
[package]
name = "kvdb-memorydb"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
parking_lot = "0.4"
rlp = { path = "../rlp" }
kvdb = { path = "../kvdb" }

View File

@ -0,0 +1,124 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate parking_lot;
extern crate kvdb;
extern crate rlp;
use std::collections::{BTreeMap, HashMap};
use parking_lot::RwLock;
use kvdb::{DBValue, Error, DBTransaction, KeyValueDB, DBOp};
use rlp::{RlpType, UntrustedRlp, Compressible};
/// A key-value database fulfilling the `KeyValueDB` trait, living in memory.
/// This is generally intended for tests and is not particularly optimized.
#[derive(Default)]
pub struct InMemory {
columns: RwLock<HashMap<Option<u32>, BTreeMap<Vec<u8>, DBValue>>>,
}
/// Create an in-memory database with the given number of columns.
/// Columns will be indexable by 0..`num_cols`
pub fn create(num_cols: u32) -> InMemory {
let mut cols = HashMap::new();
cols.insert(None, BTreeMap::new());
for idx in 0..num_cols {
cols.insert(Some(idx), BTreeMap::new());
}
InMemory {
columns: RwLock::new(cols)
}
}
impl KeyValueDB for InMemory {
fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
let columns = self.columns.read();
match columns.get(&col) {
None => Err(format!("No such column family: {:?}", col)),
Some(map) => Ok(map.get(key).cloned()),
}
}
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
let columns = self.columns.read();
match columns.get(&col) {
None => None,
Some(map) =>
map.iter()
.find(|&(ref k ,_)| k.starts_with(prefix))
.map(|(_, v)| v.to_vec().into_boxed_slice())
}
}
fn write_buffered(&self, transaction: DBTransaction) {
let mut columns = self.columns.write();
let ops = transaction.ops;
for op in ops {
match op {
DBOp::Insert { col, key, value } => {
if let Some(col) = columns.get_mut(&col) {
col.insert(key.into_vec(), value);
}
},
DBOp::InsertCompressed { col, key, value } => {
if let Some(col) = columns.get_mut(&col) {
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
let mut value = DBValue::new();
value.append_slice(&compressed);
col.insert(key.into_vec(), value);
}
},
DBOp::Delete { col, key } => {
if let Some(col) = columns.get_mut(&col) {
col.remove(&*key);
}
},
}
}
}
fn flush(&self) -> Result<(), String> { Ok(()) }
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a> {
match self.columns.read().get(&col) {
Some(map) => Box::new( // TODO: worth optimizing at all?
map.clone()
.into_iter()
.map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice()))
),
None => Box::new(None.into_iter()),
}
}
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>
{
match self.columns.read().get(&col) {
Some(map) => Box::new(
map.clone()
.into_iter()
.skip_while(move |&(ref k, _)| !k.starts_with(prefix))
.map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice()))
),
None => Box::new(None.into_iter()),
}
}
fn restore(&self, _new_db: &str) -> Result<(), Error> {
Err("Attempted to restore in-memory database".into())
}
}

View File

@ -0,0 +1,16 @@
[package]
name = "kvdb-rocksdb"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
elastic-array = "0.9"
ethcore-bigint = { path = "../bigint" }
ethcore-devtools = { path = "../../devtools" }
kvdb = { path = "../kvdb" }
log = "0.3"
num_cpus = "1.0"
parking_lot = "0.4"
regex = "0.2"
rlp = { path = "../rlp" }
rocksdb = { git = "https://github.com/paritytech/rust-rocksdb" }

View File

@ -0,0 +1,811 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#[macro_use]
extern crate log;
extern crate elastic_array;
extern crate num_cpus;
extern crate parking_lot;
extern crate regex;
extern crate rocksdb;
extern crate ethcore_bigint as bigint;
extern crate ethcore_devtools as devtools;
extern crate kvdb;
extern crate rlp;
use std::cmp;
use std::collections::HashMap;
use std::marker::PhantomData;
use std::path::{PathBuf, Path};
use std::{mem, fs, io};
use parking_lot::{Mutex, MutexGuard, RwLock};
use rocksdb::{
DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator,
Options, BlockBasedOptions, Direction, Cache, Column, ReadOptions
};
use elastic_array::ElasticArray32;
use rlp::{UntrustedRlp, RlpType, Compressible};
use kvdb::{KeyValueDB, DBTransaction, DBValue, Error, DBOp};
#[cfg(target_os = "linux")]
use regex::Regex;
#[cfg(target_os = "linux")]
use std::process::Command;
#[cfg(target_os = "linux")]
use std::fs::File;
const DB_DEFAULT_MEMORY_BUDGET_MB: usize = 128;
enum KeyState {
Insert(DBValue),
InsertCompressed(DBValue),
Delete,
}
/// Compaction profile for the database settings
#[derive(Clone, Copy, PartialEq, Debug)]
pub struct CompactionProfile {
/// L0-L1 target file size
pub initial_file_size: u64,
/// block size
pub block_size: usize,
/// rate limiter for background flushes and compactions, bytes/sec, if any
pub write_rate_limit: Option<u64>,
}
impl Default for CompactionProfile {
/// Default profile suitable for most storage
fn default() -> CompactionProfile {
CompactionProfile::ssd()
}
}
/// Given output of df command return Linux rotational flag file path.
#[cfg(target_os = "linux")]
pub fn rotational_from_df_output(df_out: Vec<u8>) -> Option<PathBuf> {
use std::str;
str::from_utf8(df_out.as_slice())
.ok()
// Get the drive name.
.and_then(|df_str| Regex::new(r"/dev/(sd[:alpha:]{1,2})")
.ok()
.and_then(|re| re.captures(df_str))
.and_then(|captures| captures.get(1)))
// Generate path e.g. /sys/block/sda/queue/rotational
.map(|drive_path| {
let mut p = PathBuf::from("/sys/block");
p.push(drive_path.as_str());
p.push("queue/rotational");
p
})
}
impl CompactionProfile {
/// Attempt to determine the best profile automatically, only Linux for now.
#[cfg(target_os = "linux")]
pub fn auto(db_path: &Path) -> CompactionProfile {
use std::io::Read;
let hdd_check_file = db_path
.to_str()
.and_then(|path_str| Command::new("df").arg(path_str).output().ok())
.and_then(|df_res| match df_res.status.success() {
true => Some(df_res.stdout),
false => None,
})
.and_then(rotational_from_df_output);
// Read out the file and match compaction profile.
if let Some(hdd_check) = hdd_check_file {
if let Ok(mut file) = File::open(hdd_check.as_path()) {
let mut buffer = [0; 1];
if file.read_exact(&mut buffer).is_ok() {
// 0 means not rotational.
if buffer == [48] { return Self::ssd(); }
// 1 means rotational.
if buffer == [49] { return Self::hdd(); }
}
}
}
// Fallback if drive type was not determined.
Self::default()
}
/// Just default for other platforms.
#[cfg(not(target_os = "linux"))]
pub fn auto(_db_path: &Path) -> CompactionProfile {
Self::default()
}
/// Default profile suitable for SSD storage
pub fn ssd() -> CompactionProfile {
CompactionProfile {
initial_file_size: 64 * 1024 * 1024,
block_size: 16 * 1024,
write_rate_limit: None,
}
}
/// Slow HDD compaction profile
pub fn hdd() -> CompactionProfile {
CompactionProfile {
initial_file_size: 256 * 1024 * 1024,
block_size: 64 * 1024,
write_rate_limit: Some(16 * 1024 * 1024),
}
}
}
/// Database configuration
#[derive(Clone)]
pub struct DatabaseConfig {
/// Max number of open files.
pub max_open_files: i32,
/// Memory budget (in MiB) used for setting block cache size, write buffer size.
pub memory_budget: Option<usize>,
/// Compaction profile
pub compaction: CompactionProfile,
/// Set number of columns
pub columns: Option<u32>,
/// Should we keep WAL enabled?
pub wal: bool,
}
impl DatabaseConfig {
/// Create new `DatabaseConfig` with default parameters and specified set of columns.
/// Note that cache sizes must be explicitly set.
pub fn with_columns(columns: Option<u32>) -> Self {
let mut config = Self::default();
config.columns = columns;
config
}
pub fn memory_budget(&self) -> usize {
self.memory_budget.unwrap_or(DB_DEFAULT_MEMORY_BUDGET_MB) * 1024 * 1024
}
pub fn memory_budget_per_col(&self) -> usize {
self.memory_budget() / self.columns.unwrap_or(1) as usize
}
}
impl Default for DatabaseConfig {
fn default() -> DatabaseConfig {
DatabaseConfig {
max_open_files: 512,
memory_budget: None,
compaction: CompactionProfile::default(),
columns: None,
wal: true,
}
}
}
/// Database iterator (for flushed data only)
// The compromise of holding only a virtual borrow vs. holding a lock on the
// inner DB (to prevent closing via restoration) may be re-evaluated in the future.
//
pub struct DatabaseIterator<'a> {
iter: DBIterator,
_marker: PhantomData<&'a Database>,
}
impl<'a> Iterator for DatabaseIterator<'a> {
type Item = (Box<[u8]>, Box<[u8]>);
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}
struct DBAndColumns {
db: DB,
cfs: Vec<Column>,
}
// get column family configuration from database config.
fn col_config(config: &DatabaseConfig, block_opts: &BlockBasedOptions) -> Result<Options, String> {
let mut opts = Options::new();
opts.set_parsed_options("level_compaction_dynamic_level_bytes=true")?;
opts.set_block_based_table_factory(block_opts);
opts.set_parsed_options(
&format!("block_based_table_factory={{{};{}}}",
"cache_index_and_filter_blocks=true",
"pin_l0_filter_and_index_blocks_in_cache=true"))?;
opts.optimize_level_style_compaction(config.memory_budget_per_col() as i32);
opts.set_target_file_size_base(config.compaction.initial_file_size);
opts.set_parsed_options("compression_per_level=")?;
Ok(opts)
}
/// Key-Value database.
pub struct Database {
db: RwLock<Option<DBAndColumns>>,
config: DatabaseConfig,
write_opts: WriteOptions,
read_opts: ReadOptions,
block_opts: BlockBasedOptions,
path: String,
// Dirty values added with `write_buffered`. Cleaned on `flush`.
overlay: RwLock<Vec<HashMap<ElasticArray32<u8>, KeyState>>>,
// Values currently being flushed. Cleared when `flush` completes.
flushing: RwLock<Vec<HashMap<ElasticArray32<u8>, KeyState>>>,
// Prevents concurrent flushes.
// Value indicates if a flush is in progress.
flushing_lock: Mutex<bool>,
}
impl Database {
/// Open database with default settings.
pub fn open_default(path: &str) -> Result<Database, String> {
Database::open(&DatabaseConfig::default(), path)
}
/// Open database file. Creates if it does not exist.
pub fn open(config: &DatabaseConfig, path: &str) -> Result<Database, String> {
let mut opts = Options::new();
if let Some(rate_limit) = config.compaction.write_rate_limit {
opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit))?;
}
opts.set_use_fsync(false);
opts.create_if_missing(true);
opts.set_max_open_files(config.max_open_files);
opts.set_parsed_options("keep_log_file_num=1")?;
opts.set_parsed_options("bytes_per_sync=1048576")?;
opts.set_db_write_buffer_size(config.memory_budget_per_col() / 2);
opts.increase_parallelism(cmp::max(1, ::num_cpus::get() as i32 / 2));
let mut block_opts = BlockBasedOptions::new();
{
block_opts.set_block_size(config.compaction.block_size);
let cache_size = cmp::max(8, config.memory_budget() / 3);
let cache = Cache::new(cache_size);
block_opts.set_cache(cache);
}
let columns = config.columns.unwrap_or(0) as usize;
let mut cf_options = Vec::with_capacity(columns);
let cfnames: Vec<_> = (0..columns).map(|c| format!("col{}", c)).collect();
let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect();
for _ in 0 .. config.columns.unwrap_or(0) {
cf_options.push(col_config(&config, &block_opts)?);
}
let mut write_opts = WriteOptions::new();
if !config.wal {
write_opts.disable_wal(true);
}
let mut read_opts = ReadOptions::new();
read_opts.set_verify_checksums(false);
let mut cfs: Vec<Column> = Vec::new();
let db = match config.columns {
Some(columns) => {
match DB::open_cf(&opts, path, &cfnames, &cf_options) {
Ok(db) => {
cfs = cfnames.iter().map(|n| db.cf_handle(n)
.expect("rocksdb opens a cf_handle for each cfname; qed")).collect();
assert!(cfs.len() == columns as usize);
Ok(db)
}
Err(_) => {
// retry and create CFs
match DB::open_cf(&opts, path, &[], &[]) {
Ok(mut db) => {
cfs = cfnames.iter().enumerate().map(|(i, n)| db.create_cf(n, &cf_options[i])).collect::<Result<_, _>>()?;
Ok(db)
},
err @ Err(_) => err,
}
}
}
},
None => DB::open(&opts, path)
};
let db = match db {
Ok(db) => db,
Err(ref s) if s.starts_with("Corruption:") => {
info!("{}", s);
info!("Attempting DB repair for {}", path);
DB::repair(&opts, path)?;
match cfnames.is_empty() {
true => DB::open(&opts, path)?,
false => DB::open_cf(&opts, path, &cfnames, &cf_options)?
}
},
Err(s) => { return Err(s); }
};
let num_cols = cfs.len();
Ok(Database {
db: RwLock::new(Some(DBAndColumns{ db: db, cfs: cfs })),
config: config.clone(),
write_opts: write_opts,
overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()),
flushing: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()),
flushing_lock: Mutex::new((false)),
path: path.to_owned(),
read_opts: read_opts,
block_opts: block_opts,
})
}
/// Helper to create new transaction for this database.
pub fn transaction(&self) -> DBTransaction {
DBTransaction::new()
}
fn to_overlay_column(col: Option<u32>) -> usize {
col.map_or(0, |c| (c + 1) as usize)
}
/// Commit transaction to database.
pub fn write_buffered(&self, tr: DBTransaction) {
let mut overlay = self.overlay.write();
let ops = tr.ops;
for op in ops {
match op {
DBOp::Insert { col, key, value } => {
let c = Self::to_overlay_column(col);
overlay[c].insert(key, KeyState::Insert(value));
},
DBOp::InsertCompressed { col, key, value } => {
let c = Self::to_overlay_column(col);
overlay[c].insert(key, KeyState::InsertCompressed(value));
},
DBOp::Delete { col, key } => {
let c = Self::to_overlay_column(col);
overlay[c].insert(key, KeyState::Delete);
},
}
};
}
/// Commit buffered changes to database. Must be called under `flush_lock`
fn write_flushing_with_lock(&self, _lock: &mut MutexGuard<bool>) -> Result<(), String> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let batch = WriteBatch::new();
mem::swap(&mut *self.overlay.write(), &mut *self.flushing.write());
{
for (c, column) in self.flushing.read().iter().enumerate() {
for (ref key, ref state) in column.iter() {
match **state {
KeyState::Delete => {
if c > 0 {
batch.delete_cf(cfs[c - 1], &key)?;
} else {
batch.delete(&key)?;
}
},
KeyState::Insert(ref value) => {
if c > 0 {
batch.put_cf(cfs[c - 1], &key, value)?;
} else {
batch.put(&key, &value)?;
}
},
KeyState::InsertCompressed(ref value) => {
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
if c > 0 {
batch.put_cf(cfs[c - 1], &key, &compressed)?;
} else {
batch.put(&key, &value)?;
}
}
}
}
}
}
db.write_opt(batch, &self.write_opts)?;
for column in self.flushing.write().iter_mut() {
column.clear();
column.shrink_to_fit();
}
Ok(())
},
None => Err("Database is closed".to_owned())
}
}
/// Commit buffered changes to database.
pub fn flush(&self) -> Result<(), String> {
let mut lock = self.flushing_lock.lock();
// If RocksDB batch allocation fails the thread gets terminated and the lock is released.
// The value inside the lock is used to detect that.
if *lock {
// This can only happen if another flushing thread is terminated unexpectedly.
return Err("Database write failure. Running low on memory perhaps?".to_owned());
}
*lock = true;
let result = self.write_flushing_with_lock(&mut lock);
*lock = false;
result
}
/// Commit transaction to database.
pub fn write(&self, tr: DBTransaction) -> Result<(), String> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let batch = WriteBatch::new();
let ops = tr.ops;
for op in ops {
match op {
DBOp::Insert { col, key, value } => {
col.map_or_else(|| batch.put(&key, &value), |c| batch.put_cf(cfs[c as usize], &key, &value))?
},
DBOp::InsertCompressed { col, key, value } => {
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
col.map_or_else(|| batch.put(&key, &compressed), |c| batch.put_cf(cfs[c as usize], &key, &compressed))?
},
DBOp::Delete { col, key } => {
col.map_or_else(|| batch.delete(&key), |c| batch.delete_cf(cfs[c as usize], &key))?
},
}
}
db.write_opt(batch, &self.write_opts)
},
None => Err("Database is closed".to_owned())
}
}
/// Get value by key.
pub fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let overlay = &self.overlay.read()[Self::to_overlay_column(col)];
match overlay.get(key) {
Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())),
Some(&KeyState::Delete) => Ok(None),
None => {
let flushing = &self.flushing.read()[Self::to_overlay_column(col)];
match flushing.get(key) {
Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())),
Some(&KeyState::Delete) => Ok(None),
None => {
col.map_or_else(
|| db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))),
|c| db.get_cf_opt(cfs[c as usize], key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))))
},
}
},
}
},
None => Ok(None),
}
}
/// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values.
// TODO: support prefix seek for unflushed data
pub fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
self.iter_from_prefix(col, prefix).and_then(|mut iter| {
match iter.next() {
// TODO: use prefix_same_as_start read option (not availabele in C API currently)
Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None },
_ => None
}
})
}
/// Get database iterator for flushed data.
pub fn iter(&self, col: Option<u32>) -> Option<DatabaseIterator> {
//TODO: iterate over overlay
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let iter = col.map_or_else(
|| db.iterator_opt(IteratorMode::Start, &self.read_opts),
|c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::Start, &self.read_opts)
.expect("iterator params are valid; qed")
);
Some(DatabaseIterator {
iter: iter,
_marker: PhantomData,
})
},
None => None,
}
}
fn iter_from_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<DatabaseIterator> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let iter = col.map_or_else(|| db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts),
|c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward), &self.read_opts)
.expect("iterator params are valid; qed"));
Some(DatabaseIterator {
iter: iter,
_marker: PhantomData,
})
},
None => None,
}
}
/// Close the database
fn close(&self) {
*self.db.write() = None;
self.overlay.write().clear();
self.flushing.write().clear();
}
/// Restore the database from a copy at given path.
pub fn restore(&self, new_db: &str) -> Result<(), Error> {
self.close();
let mut backup_db = PathBuf::from(&self.path);
backup_db.pop();
backup_db.push("backup_db");
let existed = match fs::rename(&self.path, &backup_db) {
Ok(_) => true,
Err(e) => if let io::ErrorKind::NotFound = e.kind() {
false
} else {
return Err(e.into());
}
};
match fs::rename(&new_db, &self.path) {
Ok(_) => {
// clean up the backup.
if existed {
fs::remove_dir_all(&backup_db)?;
}
}
Err(e) => {
// restore the backup.
if existed {
fs::rename(&backup_db, &self.path)?;
}
return Err(e.into())
}
}
// reopen the database and steal handles into self
let db = Self::open(&self.config, &self.path)?;
*self.db.write() = mem::replace(&mut *db.db.write(), None);
*self.overlay.write() = mem::replace(&mut *db.overlay.write(), Vec::new());
*self.flushing.write() = mem::replace(&mut *db.flushing.write(), Vec::new());
Ok(())
}
/// The number of non-default column families.
pub fn num_columns(&self) -> u32 {
self.db.read().as_ref()
.and_then(|db| if db.cfs.is_empty() { None } else { Some(db.cfs.len()) } )
.map(|n| n as u32)
.unwrap_or(0)
}
/// Drop a column family.
pub fn drop_column(&self) -> Result<(), String> {
match *self.db.write() {
Some(DBAndColumns { ref mut db, ref mut cfs }) => {
if let Some(col) = cfs.pop() {
let name = format!("col{}", cfs.len());
drop(col);
db.drop_cf(&name)?;
}
Ok(())
},
None => Ok(()),
}
}
/// Add a column family.
pub fn add_column(&self) -> Result<(), String> {
match *self.db.write() {
Some(DBAndColumns { ref mut db, ref mut cfs }) => {
let col = cfs.len() as u32;
let name = format!("col{}", col);
cfs.push(db.create_cf(&name, &col_config(&self.config, &self.block_opts)?)?);
Ok(())
},
None => Ok(()),
}
}
}
// duplicate declaration of methods here to avoid trait import in certain existing cases
// at time of addition.
impl KeyValueDB for Database {
fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
Database::get(self, col, key)
}
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
Database::get_by_prefix(self, col, prefix)
}
fn write_buffered(&self, transaction: DBTransaction) {
Database::write_buffered(self, transaction)
}
fn write(&self, transaction: DBTransaction) -> Result<(), String> {
Database::write(self, transaction)
}
fn flush(&self) -> Result<(), String> {
Database::flush(self)
}
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a> {
let unboxed = Database::iter(self, col);
Box::new(unboxed.into_iter().flat_map(|inner| inner))
}
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>
{
let unboxed = Database::iter_from_prefix(self, col, prefix);
Box::new(unboxed.into_iter().flat_map(|inner| inner))
}
fn restore(&self, new_db: &str) -> Result<(), Error> {
Database::restore(self, new_db)
}
}
impl Drop for Database {
fn drop(&mut self) {
// write all buffered changes if we can.
let _ = self.flush();
}
}
#[cfg(test)]
mod tests {
use bigint::hash::H256;
use super::*;
use devtools::*;
use std::str::FromStr;
fn test_db(config: &DatabaseConfig) {
let path = RandomTempPath::create_dir();
let db = Database::open(config, path.as_path().to_str().unwrap()).unwrap();
let key1 = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
let mut batch = db.transaction();
batch.put(None, &key1, b"cat");
batch.put(None, &key2, b"dog");
db.write(batch).unwrap();
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"cat");
let contents: Vec<_> = db.iter(None).into_iter().flat_map(|inner| inner).collect();
assert_eq!(contents.len(), 2);
assert_eq!(&*contents[0].0, &*key1);
assert_eq!(&*contents[0].1, b"cat");
assert_eq!(&*contents[1].0, &*key2);
assert_eq!(&*contents[1].1, b"dog");
let mut batch = db.transaction();
batch.delete(None, &key1);
db.write(batch).unwrap();
assert!(db.get(None, &key1).unwrap().is_none());
let mut batch = db.transaction();
batch.put(None, &key1, b"cat");
db.write(batch).unwrap();
let mut transaction = db.transaction();
transaction.put(None, &key3, b"elephant");
transaction.delete(None, &key1);
db.write(transaction).unwrap();
assert!(db.get(None, &key1).unwrap().is_none());
assert_eq!(&*db.get(None, &key3).unwrap().unwrap(), b"elephant");
assert_eq!(&*db.get_by_prefix(None, &key3).unwrap(), b"elephant");
assert_eq!(&*db.get_by_prefix(None, &key2).unwrap(), b"dog");
let mut transaction = db.transaction();
transaction.put(None, &key1, b"horse");
transaction.delete(None, &key3);
db.write_buffered(transaction);
assert!(db.get(None, &key3).unwrap().is_none());
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
db.flush().unwrap();
assert!(db.get(None, &key3).unwrap().is_none());
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
}
#[test]
fn kvdb() {
let path = RandomTempPath::create_dir();
let _ = Database::open_default(path.as_path().to_str().unwrap()).unwrap();
test_db(&DatabaseConfig::default());
}
#[test]
#[cfg(target_os = "linux")]
fn df_to_rotational() {
use std::path::PathBuf;
// Example df output.
let example_df = vec![70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10];
let expected_output = Some(PathBuf::from("/sys/block/sda/queue/rotational"));
assert_eq!(rotational_from_df_output(example_df), expected_output);
}
#[test]
fn add_columns() {
let config = DatabaseConfig::default();
let config_5 = DatabaseConfig::with_columns(Some(5));
let path = RandomTempPath::create_dir();
// open empty, add 5.
{
let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 0);
for i in 0..5 {
db.add_column().unwrap();
assert_eq!(db.num_columns(), i + 1);
}
}
// reopen as 5.
{
let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 5);
}
}
#[test]
fn drop_columns() {
let config = DatabaseConfig::default();
let config_5 = DatabaseConfig::with_columns(Some(5));
let path = RandomTempPath::create_dir();
// open 5, remove all.
{
let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 5);
for i in (0..5).rev() {
db.drop_column().unwrap();
assert_eq!(db.num_columns(), i);
}
}
// reopen as 0.
{
let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 0);
}
}
}

View File

@ -4,14 +4,6 @@ version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
log = "0.3"
ethcore-bytes = { path = "../bytes" }
ethcore-bigint = { path = "../bigint" }
ethcore-devtools = { path = "../../devtools" }
elastic-array = "0.9" elastic-array = "0.9"
hashdb = { path = "../hashdb" }
parking_lot = "0.4"
regex = "0.2"
rlp = { path = "../rlp" }
rocksdb = { git = "https://github.com/paritytech/rust-rocksdb" }
error-chain = "0.11.0-rc.2" error-chain = "0.11.0-rc.2"
ethcore-bytes = { path = "../bytes" }

View File

@ -16,48 +16,21 @@
//! Key-Value store abstraction with `RocksDB` backend. //! Key-Value store abstraction with `RocksDB` backend.
#[macro_use]
extern crate log;
#[macro_use] #[macro_use]
extern crate error_chain; extern crate error_chain;
extern crate ethcore_bytes as bytes;
extern crate ethcore_bigint as bigint;
extern crate ethcore_devtools as devtools;
extern crate elastic_array; extern crate elastic_array;
extern crate hashdb; extern crate ethcore_bytes as bytes;
extern crate parking_lot;
extern crate rlp;
extern crate rocksdb;
extern crate regex;
use std::{mem, fs, io}; use std::io;
use std::collections::{HashMap, BTreeMap}; use elastic_array::{ElasticArray128, ElasticArray32};
use std::marker::PhantomData;
use std::path::{PathBuf, Path};
use parking_lot::{Mutex, MutexGuard, RwLock};
use elastic_array::*;
use hashdb::DBValue;
use rlp::{UntrustedRlp, RlpType, Compressible};
use rocksdb::{DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator,
Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache, Column, ReadOptions};
use bytes::Bytes; use bytes::Bytes;
#[cfg(target_os = "linux")]
use regex::Regex;
#[cfg(target_os = "linux")]
use std::process::Command;
#[cfg(target_os = "linux")]
use std::fs::File;
const DB_BACKGROUND_FLUSHES: i32 = 2;
const DB_BACKGROUND_COMPACTIONS: i32 = 2;
const DB_WRITE_BUFFER_SIZE: usize = 2048 * 1000;
/// Required length of prefixes. /// Required length of prefixes.
pub const PREFIX_LEN: usize = 12; pub const PREFIX_LEN: usize = 12;
/// Database value.
pub type DBValue = ElasticArray128<u8>;
error_chain! { error_chain! {
types { types {
Error, ErrorKind, ResultExt; Error, ErrorKind, ResultExt;
@ -71,11 +44,13 @@ error_chain! {
/// Write transaction. Batches a sequence of put/delete operations for efficiency. /// Write transaction. Batches a sequence of put/delete operations for efficiency.
#[derive(Default, Clone, PartialEq)] #[derive(Default, Clone, PartialEq)]
pub struct DBTransaction { pub struct DBTransaction {
ops: Vec<DBOp>, /// Database operations.
pub ops: Vec<DBOp>,
} }
/// Database operation.
#[derive(Clone, PartialEq)] #[derive(Clone, PartialEq)]
enum DBOp { pub enum DBOp {
Insert { Insert {
col: Option<u32>, col: Option<u32>,
key: ElasticArray32<u8>, key: ElasticArray32<u8>,
@ -150,12 +125,6 @@ impl DBTransaction {
} }
} }
enum KeyState {
Insert(DBValue),
InsertCompressed(DBValue),
Delete,
}
/// Generic key-value database. /// Generic key-value database.
/// ///
/// This makes a distinction between "buffered" and "flushed" values. Values which have been /// This makes a distinction between "buffered" and "flushed" values. Values which have been
@ -206,847 +175,3 @@ pub trait KeyValueDB: Sync + Send {
/// Attempt to replace this database with a new one located at the given path. /// Attempt to replace this database with a new one located at the given path.
fn restore(&self, new_db: &str) -> Result<(), Error>; fn restore(&self, new_db: &str) -> Result<(), Error>;
} }
/// A key-value database fulfilling the `KeyValueDB` trait, living in memory.
/// This is generally intended for tests and is not particularly optimized.
pub struct InMemory {
columns: RwLock<HashMap<Option<u32>, BTreeMap<Vec<u8>, DBValue>>>,
}
/// Create an in-memory database with the given number of columns.
/// Columns will be indexable by 0..`num_cols`
pub fn in_memory(num_cols: u32) -> InMemory {
let mut cols = HashMap::new();
cols.insert(None, BTreeMap::new());
for idx in 0..num_cols {
cols.insert(Some(idx), BTreeMap::new());
}
InMemory {
columns: RwLock::new(cols)
}
}
impl KeyValueDB for InMemory {
fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
let columns = self.columns.read();
match columns.get(&col) {
None => Err(format!("No such column family: {:?}", col)),
Some(map) => Ok(map.get(key).cloned()),
}
}
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
let columns = self.columns.read();
match columns.get(&col) {
None => None,
Some(map) =>
map.iter()
.find(|&(ref k ,_)| k.starts_with(prefix))
.map(|(_, v)| v.to_vec().into_boxed_slice())
}
}
fn write_buffered(&self, transaction: DBTransaction) {
let mut columns = self.columns.write();
let ops = transaction.ops;
for op in ops {
match op {
DBOp::Insert { col, key, value } => {
if let Some(mut col) = columns.get_mut(&col) {
col.insert(key.into_vec(), value);
}
},
DBOp::InsertCompressed { col, key, value } => {
if let Some(mut col) = columns.get_mut(&col) {
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
let mut value = DBValue::new();
value.append_slice(&compressed);
col.insert(key.into_vec(), value);
}
},
DBOp::Delete { col, key } => {
if let Some(mut col) = columns.get_mut(&col) {
col.remove(&*key);
}
},
}
}
}
fn flush(&self) -> Result<(), String> { Ok(()) }
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a> {
match self.columns.read().get(&col) {
Some(map) => Box::new( // TODO: worth optimizing at all?
map.clone()
.into_iter()
.map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice()))
),
None => Box::new(None.into_iter()),
}
}
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>
{
match self.columns.read().get(&col) {
Some(map) => Box::new(
map.clone()
.into_iter()
.skip_while(move |&(ref k, _)| !k.starts_with(prefix))
.map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice()))
),
None => Box::new(None.into_iter()),
}
}
fn restore(&self, _new_db: &str) -> Result<(), Error> {
Err("Attempted to restore in-memory database".into())
}
}
/// Compaction profile for the database settings
#[derive(Clone, Copy, PartialEq, Debug)]
pub struct CompactionProfile {
/// L0-L1 target file size
pub initial_file_size: u64,
/// L2-LN target file size multiplier
pub file_size_multiplier: i32,
/// rate limiter for background flushes and compactions, bytes/sec, if any
pub write_rate_limit: Option<u64>,
}
impl Default for CompactionProfile {
/// Default profile suitable for most storage
fn default() -> CompactionProfile {
CompactionProfile::ssd()
}
}
/// Given output of df command return Linux rotational flag file path.
#[cfg(target_os = "linux")]
pub fn rotational_from_df_output(df_out: Vec<u8>) -> Option<PathBuf> {
use std::str;
str::from_utf8(df_out.as_slice())
.ok()
// Get the drive name.
.and_then(|df_str| Regex::new(r"/dev/(sd[:alpha:]{1,2})")
.ok()
.and_then(|re| re.captures(df_str))
.and_then(|captures| captures.get(1)))
// Generate path e.g. /sys/block/sda/queue/rotational
.map(|drive_path| {
let mut p = PathBuf::from("/sys/block");
p.push(drive_path.as_str());
p.push("queue/rotational");
p
})
}
impl CompactionProfile {
/// Attempt to determine the best profile automatically, only Linux for now.
#[cfg(target_os = "linux")]
pub fn auto(db_path: &Path) -> CompactionProfile {
use std::io::Read;
let hdd_check_file = db_path
.to_str()
.and_then(|path_str| Command::new("df").arg(path_str).output().ok())
.and_then(|df_res| match df_res.status.success() {
true => Some(df_res.stdout),
false => None,
})
.and_then(rotational_from_df_output);
// Read out the file and match compaction profile.
if let Some(hdd_check) = hdd_check_file {
if let Ok(mut file) = File::open(hdd_check.as_path()) {
let mut buffer = [0; 1];
if file.read_exact(&mut buffer).is_ok() {
// 0 means not rotational.
if buffer == [48] { return Self::ssd(); }
// 1 means rotational.
if buffer == [49] { return Self::hdd(); }
}
}
}
// Fallback if drive type was not determined.
Self::default()
}
/// Just default for other platforms.
#[cfg(not(target_os = "linux"))]
pub fn auto(_db_path: &Path) -> CompactionProfile {
Self::default()
}
/// Default profile suitable for SSD storage
pub fn ssd() -> CompactionProfile {
CompactionProfile {
initial_file_size: 32 * 1024 * 1024,
file_size_multiplier: 2,
write_rate_limit: None,
}
}
/// Slow HDD compaction profile
pub fn hdd() -> CompactionProfile {
CompactionProfile {
initial_file_size: 192 * 1024 * 1024,
file_size_multiplier: 1,
write_rate_limit: Some(8 * 1024 * 1024),
}
}
}
/// Database configuration
#[derive(Clone)]
pub struct DatabaseConfig {
/// Max number of open files.
pub max_open_files: i32,
/// Cache sizes (in MiB) for specific columns.
pub cache_sizes: HashMap<Option<u32>, usize>,
/// Compaction profile
pub compaction: CompactionProfile,
/// Set number of columns
pub columns: Option<u32>,
/// Should we keep WAL enabled?
pub wal: bool,
}
impl DatabaseConfig {
/// Create new `DatabaseConfig` with default parameters and specified set of columns.
/// Note that cache sizes must be explicitly set.
pub fn with_columns(columns: Option<u32>) -> Self {
let mut config = Self::default();
config.columns = columns;
config
}
/// Set the column cache size in MiB.
pub fn set_cache(&mut self, col: Option<u32>, size: usize) {
self.cache_sizes.insert(col, size);
}
}
impl Default for DatabaseConfig {
fn default() -> DatabaseConfig {
DatabaseConfig {
cache_sizes: HashMap::new(),
max_open_files: 512,
compaction: CompactionProfile::default(),
columns: None,
wal: true,
}
}
}
/// Database iterator (for flushed data only)
// The compromise of holding only a virtual borrow vs. holding a lock on the
// inner DB (to prevent closing via restoration) may be re-evaluated in the future.
//
pub struct DatabaseIterator<'a> {
iter: DBIterator,
_marker: PhantomData<&'a Database>,
}
impl<'a> Iterator for DatabaseIterator<'a> {
type Item = (Box<[u8]>, Box<[u8]>);
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}
struct DBAndColumns {
db: DB,
cfs: Vec<Column>,
}
// get column family configuration from database config.
fn col_config(col: u32, config: &DatabaseConfig) -> Options {
// default cache size for columns not specified.
const DEFAULT_CACHE: usize = 2;
let mut opts = Options::new();
opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction);
opts.set_target_file_size_base(config.compaction.initial_file_size);
opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier);
opts.set_db_write_buffer_size(DB_WRITE_BUFFER_SIZE);
let col_opt = config.columns.map(|_| col);
{
let cache_size = config.cache_sizes.get(&col_opt).cloned().unwrap_or(DEFAULT_CACHE);
let mut block_opts = BlockBasedOptions::new();
// all goes to read cache.
block_opts.set_cache(Cache::new(cache_size * 1024 * 1024));
opts.set_block_based_table_factory(&block_opts);
}
opts
}
/// Key-Value database.
pub struct Database {
db: RwLock<Option<DBAndColumns>>,
config: DatabaseConfig,
write_opts: WriteOptions,
read_opts: ReadOptions,
path: String,
// Dirty values added with `write_buffered`. Cleaned on `flush`.
overlay: RwLock<Vec<HashMap<ElasticArray32<u8>, KeyState>>>,
// Values currently being flushed. Cleared when `flush` completes.
flushing: RwLock<Vec<HashMap<ElasticArray32<u8>, KeyState>>>,
// Prevents concurrent flushes.
// Value indicates if a flush is in progress.
flushing_lock: Mutex<bool>,
}
impl Database {
/// Open database with default settings.
pub fn open_default(path: &str) -> Result<Database, String> {
Database::open(&DatabaseConfig::default(), path)
}
/// Open database file. Creates if it does not exist.
pub fn open(config: &DatabaseConfig, path: &str) -> Result<Database, String> {
let mut opts = Options::new();
if let Some(rate_limit) = config.compaction.write_rate_limit {
opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit))?;
}
opts.set_parsed_options(&format!("max_total_wal_size={}", 64 * 1024 * 1024))?;
opts.set_parsed_options("verify_checksums_in_compaction=0")?;
opts.set_parsed_options("keep_log_file_num=1")?;
opts.set_max_open_files(config.max_open_files);
opts.create_if_missing(true);
opts.set_use_fsync(false);
opts.set_db_write_buffer_size(DB_WRITE_BUFFER_SIZE);
opts.set_max_background_flushes(DB_BACKGROUND_FLUSHES);
opts.set_max_background_compactions(DB_BACKGROUND_COMPACTIONS);
// compaction settings
opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction);
opts.set_target_file_size_base(config.compaction.initial_file_size);
opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier);
let mut cf_options = Vec::with_capacity(config.columns.unwrap_or(0) as usize);
let cfnames: Vec<_> = (0..config.columns.unwrap_or(0)).map(|c| format!("col{}", c)).collect();
let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect();
for col in 0 .. config.columns.unwrap_or(0) {
cf_options.push(col_config(col, &config));
}
let mut write_opts = WriteOptions::new();
if !config.wal {
write_opts.disable_wal(true);
}
let mut read_opts = ReadOptions::new();
read_opts.set_verify_checksums(false);
let mut cfs: Vec<Column> = Vec::new();
let db = match config.columns {
Some(columns) => {
match DB::open_cf(&opts, path, &cfnames, &cf_options) {
Ok(db) => {
cfs = cfnames.iter().map(|n| db.cf_handle(n)
.expect("rocksdb opens a cf_handle for each cfname; qed")).collect();
assert!(cfs.len() == columns as usize);
Ok(db)
}
Err(_) => {
// retry and create CFs
match DB::open_cf(&opts, path, &[], &[]) {
Ok(mut db) => {
cfs = cfnames.iter().enumerate().map(|(i, n)| db.create_cf(n, &cf_options[i])).collect::<Result<_, _>>()?;
Ok(db)
},
err @ Err(_) => err,
}
}
}
},
None => DB::open(&opts, path)
};
let db = match db {
Ok(db) => db,
Err(ref s) if s.starts_with("Corruption:") => {
info!("{}", s);
info!("Attempting DB repair for {}", path);
DB::repair(&opts, path)?;
match cfnames.is_empty() {
true => DB::open(&opts, path)?,
false => DB::open_cf(&opts, path, &cfnames, &cf_options)?
}
},
Err(s) => { return Err(s); }
};
let num_cols = cfs.len();
Ok(Database {
db: RwLock::new(Some(DBAndColumns{ db: db, cfs: cfs })),
config: config.clone(),
write_opts: write_opts,
overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()),
flushing: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()),
flushing_lock: Mutex::new((false)),
path: path.to_owned(),
read_opts: read_opts,
})
}
/// Helper to create new transaction for this database.
pub fn transaction(&self) -> DBTransaction {
DBTransaction::new()
}
fn to_overlay_column(col: Option<u32>) -> usize {
col.map_or(0, |c| (c + 1) as usize)
}
/// Commit transaction to database.
pub fn write_buffered(&self, tr: DBTransaction) {
let mut overlay = self.overlay.write();
let ops = tr.ops;
for op in ops {
match op {
DBOp::Insert { col, key, value } => {
let c = Self::to_overlay_column(col);
overlay[c].insert(key, KeyState::Insert(value));
},
DBOp::InsertCompressed { col, key, value } => {
let c = Self::to_overlay_column(col);
overlay[c].insert(key, KeyState::InsertCompressed(value));
},
DBOp::Delete { col, key } => {
let c = Self::to_overlay_column(col);
overlay[c].insert(key, KeyState::Delete);
},
}
};
}
/// Commit buffered changes to database. Must be called under `flush_lock`
fn write_flushing_with_lock(&self, _lock: &mut MutexGuard<bool>) -> Result<(), String> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let batch = WriteBatch::new();
mem::swap(&mut *self.overlay.write(), &mut *self.flushing.write());
{
for (c, column) in self.flushing.read().iter().enumerate() {
for (ref key, ref state) in column.iter() {
match **state {
KeyState::Delete => {
if c > 0 {
batch.delete_cf(cfs[c - 1], &key)?;
} else {
batch.delete(&key)?;
}
},
KeyState::Insert(ref value) => {
if c > 0 {
batch.put_cf(cfs[c - 1], &key, value)?;
} else {
batch.put(&key, &value)?;
}
},
KeyState::InsertCompressed(ref value) => {
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
if c > 0 {
batch.put_cf(cfs[c - 1], &key, &compressed)?;
} else {
batch.put(&key, &value)?;
}
}
}
}
}
}
db.write_opt(batch, &self.write_opts)?;
for column in self.flushing.write().iter_mut() {
column.clear();
column.shrink_to_fit();
}
Ok(())
},
None => Err("Database is closed".to_owned())
}
}
/// Commit buffered changes to database.
pub fn flush(&self) -> Result<(), String> {
let mut lock = self.flushing_lock.lock();
// If RocksDB batch allocation fails the thread gets terminated and the lock is released.
// The value inside the lock is used to detect that.
if *lock {
// This can only happen if another flushing thread is terminated unexpectedly.
return Err("Database write failure. Running low on memory perhaps?".to_owned());
}
*lock = true;
let result = self.write_flushing_with_lock(&mut lock);
*lock = false;
result
}
/// Commit transaction to database.
pub fn write(&self, tr: DBTransaction) -> Result<(), String> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let batch = WriteBatch::new();
let ops = tr.ops;
for op in ops {
match op {
DBOp::Insert { col, key, value } => {
col.map_or_else(|| batch.put(&key, &value), |c| batch.put_cf(cfs[c as usize], &key, &value))?
},
DBOp::InsertCompressed { col, key, value } => {
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
col.map_or_else(|| batch.put(&key, &compressed), |c| batch.put_cf(cfs[c as usize], &key, &compressed))?
},
DBOp::Delete { col, key } => {
col.map_or_else(|| batch.delete(&key), |c| batch.delete_cf(cfs[c as usize], &key))?
},
}
}
db.write_opt(batch, &self.write_opts)
},
None => Err("Database is closed".to_owned())
}
}
/// Get value by key.
pub fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let overlay = &self.overlay.read()[Self::to_overlay_column(col)];
match overlay.get(key) {
Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())),
Some(&KeyState::Delete) => Ok(None),
None => {
let flushing = &self.flushing.read()[Self::to_overlay_column(col)];
match flushing.get(key) {
Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())),
Some(&KeyState::Delete) => Ok(None),
None => {
col.map_or_else(
|| db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))),
|c| db.get_cf_opt(cfs[c as usize], key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))))
},
}
},
}
},
None => Ok(None),
}
}
/// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values.
// TODO: support prefix seek for unflushed data
pub fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
self.iter_from_prefix(col, prefix).and_then(|mut iter| {
match iter.next() {
// TODO: use prefix_same_as_start read option (not availabele in C API currently)
Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None },
_ => None
}
})
}
/// Get database iterator for flushed data.
pub fn iter(&self, col: Option<u32>) -> Option<DatabaseIterator> {
//TODO: iterate over overlay
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let iter = col.map_or_else(
|| db.iterator_opt(IteratorMode::Start, &self.read_opts),
|c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::Start, &self.read_opts)
.expect("iterator params are valid; qed")
);
Some(DatabaseIterator {
iter: iter,
_marker: PhantomData,
})
},
None => None,
}
}
fn iter_from_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<DatabaseIterator> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let iter = col.map_or_else(|| db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts),
|c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward), &self.read_opts)
.expect("iterator params are valid; qed"));
Some(DatabaseIterator {
iter: iter,
_marker: PhantomData,
})
},
None => None,
}
}
/// Close the database
fn close(&self) {
*self.db.write() = None;
self.overlay.write().clear();
self.flushing.write().clear();
}
/// Restore the database from a copy at given path.
pub fn restore(&self, new_db: &str) -> Result<(), Error> {
self.close();
let mut backup_db = PathBuf::from(&self.path);
backup_db.pop();
backup_db.push("backup_db");
let existed = match fs::rename(&self.path, &backup_db) {
Ok(_) => true,
Err(e) => if let io::ErrorKind::NotFound = e.kind() {
false
} else {
return Err(e.into());
}
};
match fs::rename(&new_db, &self.path) {
Ok(_) => {
// clean up the backup.
if existed {
fs::remove_dir_all(&backup_db)?;
}
}
Err(e) => {
// restore the backup.
if existed {
fs::rename(&backup_db, &self.path)?;
}
return Err(e.into())
}
}
// reopen the database and steal handles into self
let db = Self::open(&self.config, &self.path)?;
*self.db.write() = mem::replace(&mut *db.db.write(), None);
*self.overlay.write() = mem::replace(&mut *db.overlay.write(), Vec::new());
*self.flushing.write() = mem::replace(&mut *db.flushing.write(), Vec::new());
Ok(())
}
/// The number of non-default column families.
pub fn num_columns(&self) -> u32 {
self.db.read().as_ref()
.and_then(|db| if db.cfs.is_empty() { None } else { Some(db.cfs.len()) } )
.map(|n| n as u32)
.unwrap_or(0)
}
/// Drop a column family.
pub fn drop_column(&self) -> Result<(), String> {
match *self.db.write() {
Some(DBAndColumns { ref mut db, ref mut cfs }) => {
if let Some(col) = cfs.pop() {
let name = format!("col{}", cfs.len());
drop(col);
db.drop_cf(&name)?;
}
Ok(())
},
None => Ok(()),
}
}
/// Add a column family.
pub fn add_column(&self) -> Result<(), String> {
match *self.db.write() {
Some(DBAndColumns { ref mut db, ref mut cfs }) => {
let col = cfs.len() as u32;
let name = format!("col{}", col);
cfs.push(db.create_cf(&name, &col_config(col, &self.config))?);
Ok(())
},
None => Ok(()),
}
}
}
// duplicate declaration of methods here to avoid trait import in certain existing cases
// at time of addition.
impl KeyValueDB for Database {
fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
Database::get(self, col, key)
}
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
Database::get_by_prefix(self, col, prefix)
}
fn write_buffered(&self, transaction: DBTransaction) {
Database::write_buffered(self, transaction)
}
fn write(&self, transaction: DBTransaction) -> Result<(), String> {
Database::write(self, transaction)
}
fn flush(&self) -> Result<(), String> {
Database::flush(self)
}
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a> {
let unboxed = Database::iter(self, col);
Box::new(unboxed.into_iter().flat_map(|inner| inner))
}
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>
{
let unboxed = Database::iter_from_prefix(self, col, prefix);
Box::new(unboxed.into_iter().flat_map(|inner| inner))
}
fn restore(&self, new_db: &str) -> Result<(), Error> {
Database::restore(self, new_db)
}
}
impl Drop for Database {
fn drop(&mut self) {
// write all buffered changes if we can.
let _ = self.flush();
}
}
#[cfg(test)]
mod tests {
use bigint::hash::H256;
use super::*;
use devtools::*;
use std::str::FromStr;
fn test_db(config: &DatabaseConfig) {
let path = RandomTempPath::create_dir();
let db = Database::open(config, path.as_path().to_str().unwrap()).unwrap();
let key1 = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
let mut batch = db.transaction();
batch.put(None, &key1, b"cat");
batch.put(None, &key2, b"dog");
db.write(batch).unwrap();
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"cat");
let contents: Vec<_> = db.iter(None).into_iter().flat_map(|inner| inner).collect();
assert_eq!(contents.len(), 2);
assert_eq!(&*contents[0].0, &*key1);
assert_eq!(&*contents[0].1, b"cat");
assert_eq!(&*contents[1].0, &*key2);
assert_eq!(&*contents[1].1, b"dog");
let mut batch = db.transaction();
batch.delete(None, &key1);
db.write(batch).unwrap();
assert!(db.get(None, &key1).unwrap().is_none());
let mut batch = db.transaction();
batch.put(None, &key1, b"cat");
db.write(batch).unwrap();
let mut transaction = db.transaction();
transaction.put(None, &key3, b"elephant");
transaction.delete(None, &key1);
db.write(transaction).unwrap();
assert!(db.get(None, &key1).unwrap().is_none());
assert_eq!(&*db.get(None, &key3).unwrap().unwrap(), b"elephant");
assert_eq!(&*db.get_by_prefix(None, &key3).unwrap(), b"elephant");
assert_eq!(&*db.get_by_prefix(None, &key2).unwrap(), b"dog");
let mut transaction = db.transaction();
transaction.put(None, &key1, b"horse");
transaction.delete(None, &key3);
db.write_buffered(transaction);
assert!(db.get(None, &key3).unwrap().is_none());
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
db.flush().unwrap();
assert!(db.get(None, &key3).unwrap().is_none());
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
}
#[test]
fn kvdb() {
let path = RandomTempPath::create_dir();
let _ = Database::open_default(path.as_path().to_str().unwrap()).unwrap();
test_db(&DatabaseConfig::default());
}
#[test]
#[cfg(target_os = "linux")]
fn df_to_rotational() {
use std::path::PathBuf;
// Example df output.
let example_df = vec![70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10];
let expected_output = Some(PathBuf::from("/sys/block/sda/queue/rotational"));
assert_eq!(rotational_from_df_output(example_df), expected_output);
}
#[test]
fn add_columns() {
let config = DatabaseConfig::default();
let config_5 = DatabaseConfig::with_columns(Some(5));
let path = RandomTempPath::create_dir();
// open empty, add 5.
{
let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 0);
for i in 0..5 {
db.add_column().unwrap();
assert_eq!(db.num_columns(), i + 1);
}
}
// reopen as 5.
{
let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 5);
}
}
#[test]
fn drop_columns() {
let config = DatabaseConfig::default();
let config_5 = DatabaseConfig::with_columns(Some(5));
let path = RandomTempPath::create_dir();
// open 5, remove all.
{
let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 5);
for i in (0..5).rev() {
db.drop_column().unwrap();
assert_eq!(db.num_columns(), i);
}
}
// reopen as 0.
{
let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 0);
}
}
}

View File

@ -7,4 +7,5 @@ authors = ["Parity Technologies <admin@parity.io>"]
log = "0.3" log = "0.3"
macros = { path = "../macros" } macros = { path = "../macros" }
kvdb = { path = "../kvdb" } kvdb = { path = "../kvdb" }
kvdb-rocksdb = { path = "../kvdb-rocksdb" }
ethcore-devtools = { path = "../../devtools" } ethcore-devtools = { path = "../../devtools" }

View File

@ -25,14 +25,15 @@ extern crate macros;
extern crate ethcore_devtools as devtools; extern crate ethcore_devtools as devtools;
extern crate kvdb; extern crate kvdb;
extern crate kvdb_rocksdb;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::fs;
use std::fmt;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use std::{fs, fmt};
use kvdb::{CompactionProfile, Database, DatabaseConfig, DBTransaction}; use kvdb::DBTransaction;
use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig};
/// Migration config. /// Migration config.
#[derive(Clone)] #[derive(Clone)]
@ -274,7 +275,7 @@ impl Manager {
trace!(target: "migration", "Expecting database to contain {:?} columns", columns); trace!(target: "migration", "Expecting database to contain {:?} columns", columns);
let mut db_config = DatabaseConfig { let mut db_config = DatabaseConfig {
max_open_files: 64, max_open_files: 64,
cache_sizes: Default::default(), memory_budget: None,
compaction: config.compaction_profile, compaction: config.compaction_profile,
columns: columns, columns: columns,
wal: true, wal: true,

View File

@ -22,7 +22,7 @@ use std::collections::BTreeMap;
use std::sync::Arc; use std::sync::Arc;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use {Batch, Config, Error, SimpleMigration, Migration, Manager, ChangeColumns}; use {Batch, Config, Error, SimpleMigration, Migration, Manager, ChangeColumns};
use kvdb::Database; use kvdb_rocksdb::Database;
use devtools::RandomTempPath; use devtools::RandomTempPath;
fn db_path(path: &Path) -> PathBuf { fn db_path(path: &Path) -> PathBuf {
@ -229,7 +229,7 @@ fn pre_columns() {
#[test] #[test]
fn change_columns() { fn change_columns() {
use kvdb::DatabaseConfig; use kvdb_rocksdb::DatabaseConfig;
let mut manager = Manager::new(Config::default()); let mut manager = Manager::new(Config::default());
manager.add_migration(ChangeColumns { manager.add_migration(ChangeColumns {

View File

@ -55,13 +55,6 @@ impl ArchiveDB {
} }
} }
/// Create a new instance with an anonymous temporary database.
#[cfg(test)]
fn new_temp() -> ArchiveDB {
let backing = Arc::new(::kvdb::in_memory(0));
Self::new(backing, None)
}
fn payload(&self, key: &H256) -> Option<DBValue> { fn payload(&self, key: &H256) -> Option<DBValue> {
self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?") self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?")
} }
@ -206,18 +199,16 @@ mod tests {
#![cfg_attr(feature="dev", allow(blacklisted_name))] #![cfg_attr(feature="dev", allow(blacklisted_name))]
#![cfg_attr(feature="dev", allow(similar_names))] #![cfg_attr(feature="dev", allow(similar_names))]
use std::path::Path;
use keccak::keccak; use keccak::keccak;
use hashdb::{HashDB, DBValue}; use hashdb::{HashDB, DBValue};
use super::*; use super::*;
use journaldb::traits::JournalDB; use journaldb::traits::JournalDB;
use kvdb::Database; use kvdb_memorydb;
use bigint::hash::H32;
#[test] #[test]
fn insert_same_in_fork() { fn insert_same_in_fork() {
// history is 1 // history is 1
let mut jdb = ArchiveDB::new_temp(); let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let x = jdb.insert(b"X"); let x = jdb.insert(b"X");
jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); jdb.commit_batch(1, &keccak(b"1"), None).unwrap();
@ -239,7 +230,7 @@ mod tests {
#[test] #[test]
fn long_history() { fn long_history() {
// history is 3 // history is 3
let mut jdb = ArchiveDB::new_temp(); let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let h = jdb.insert(b"foo"); let h = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.contains(&h)); assert!(jdb.contains(&h));
@ -257,7 +248,7 @@ mod tests {
#[test] #[test]
#[should_panic] #[should_panic]
fn multiple_owed_removal_not_allowed() { fn multiple_owed_removal_not_allowed() {
let mut jdb = ArchiveDB::new_temp(); let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let h = jdb.insert(b"foo"); let h = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.contains(&h)); assert!(jdb.contains(&h));
@ -271,7 +262,7 @@ mod tests {
#[test] #[test]
fn complex() { fn complex() {
// history is 1 // history is 1
let mut jdb = ArchiveDB::new_temp(); let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -303,7 +294,7 @@ mod tests {
#[test] #[test]
fn fork() { fn fork() {
// history is 1 // history is 1
let mut jdb = ArchiveDB::new_temp(); let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -329,7 +320,7 @@ mod tests {
#[test] #[test]
fn overwrite() { fn overwrite() {
// history is 1 // history is 1
let mut jdb = ArchiveDB::new_temp(); let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
@ -348,7 +339,7 @@ mod tests {
#[test] #[test]
fn fork_same_key() { fn fork_same_key() {
// history is 1 // history is 1
let mut jdb = ArchiveDB::new_temp(); let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
@ -362,19 +353,13 @@ mod tests {
assert!(jdb.contains(&foo)); assert!(jdb.contains(&foo));
} }
fn new_db(dir: &Path) -> ArchiveDB {
let db = Database::open_default(dir.to_str().unwrap()).unwrap();
ArchiveDB::new(Arc::new(db), None)
}
#[test] #[test]
fn reopen() { fn reopen() {
let mut dir = ::std::env::temp_dir(); let shared_db = Arc::new(kvdb_memorydb::create(0));
dir.push(H32::random().hex());
let bar = H256::random(); let bar = H256::random();
let foo = { let foo = {
let mut jdb = new_db(&dir); let mut jdb = ArchiveDB::new(shared_db.clone(), None);
// history is 1 // history is 1
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); jdb.emplace(bar.clone(), DBValue::from_slice(b"bar"));
@ -383,13 +368,13 @@ mod tests {
}; };
{ {
let mut jdb = new_db(&dir); let mut jdb = ArchiveDB::new(shared_db.clone(), None);
jdb.remove(&foo); jdb.remove(&foo);
jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
} }
{ {
let mut jdb = new_db(&dir); let mut jdb = ArchiveDB::new(shared_db, None);
assert!(jdb.contains(&foo)); assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar)); assert!(jdb.contains(&bar));
jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
@ -398,11 +383,10 @@ mod tests {
#[test] #[test]
fn reopen_remove() { fn reopen_remove() {
let mut dir = ::std::env::temp_dir(); let shared_db = Arc::new(kvdb_memorydb::create(0));
dir.push(H32::random().hex());
let foo = { let foo = {
let mut jdb = new_db(&dir); let mut jdb = ArchiveDB::new(shared_db.clone(), None);
// history is 1 // history is 1
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
@ -416,7 +400,7 @@ mod tests {
}; };
{ {
let mut jdb = new_db(&dir); let mut jdb = ArchiveDB::new(shared_db, None);
jdb.remove(&foo); jdb.remove(&foo);
jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap();
assert!(jdb.contains(&foo)); assert!(jdb.contains(&foo));
@ -428,10 +412,9 @@ mod tests {
#[test] #[test]
fn reopen_fork() { fn reopen_fork() {
let mut dir = ::std::env::temp_dir(); let shared_db = Arc::new(kvdb_memorydb::create(0));
dir.push(H32::random().hex());
let (foo, _, _) = { let (foo, _, _) = {
let mut jdb = new_db(&dir); let mut jdb = ArchiveDB::new(shared_db.clone(), None);
// history is 1 // history is 1
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -446,7 +429,7 @@ mod tests {
}; };
{ {
let mut jdb = new_db(&dir); let mut jdb = ArchiveDB::new(shared_db, None);
jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
assert!(jdb.contains(&foo)); assert!(jdb.contains(&foo));
} }
@ -454,17 +437,17 @@ mod tests {
#[test] #[test]
fn returns_state() { fn returns_state() {
let temp = ::devtools::RandomTempPath::new(); let shared_db = Arc::new(kvdb_memorydb::create(0));
let key = { let key = {
let mut jdb = new_db(temp.as_path().as_path()); let mut jdb = ArchiveDB::new(shared_db.clone(), None);
let key = jdb.insert(b"foo"); let key = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
key key
}; };
{ {
let jdb = new_db(temp.as_path().as_path()); let jdb = ArchiveDB::new(shared_db, None);
let state = jdb.state(&key); let state = jdb.state(&key);
assert!(state.is_some()); assert!(state.is_some());
} }
@ -472,9 +455,7 @@ mod tests {
#[test] #[test]
fn inject() { fn inject() {
let temp = ::devtools::RandomTempPath::new(); let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let mut jdb = new_db(temp.as_path().as_path());
let key = jdb.insert(b"dog"); let key = jdb.insert(b"dog");
jdb.inject_batch().unwrap(); jdb.inject_batch().unwrap();

View File

@ -140,13 +140,6 @@ impl EarlyMergeDB {
} }
} }
/// Create a new instance with an anonymous temporary database.
#[cfg(test)]
fn new_temp() -> EarlyMergeDB {
let backing = Arc::new(::kvdb::in_memory(0));
Self::new(backing, None)
}
fn morph_key(key: &H256, index: u8) -> Bytes { fn morph_key(key: &H256, index: u8) -> Bytes {
let mut ret = (&**key).to_owned(); let mut ret = (&**key).to_owned();
ret.push(index); ret.push(index);
@ -554,19 +547,17 @@ mod tests {
#![cfg_attr(feature="dev", allow(blacklisted_name))] #![cfg_attr(feature="dev", allow(blacklisted_name))]
#![cfg_attr(feature="dev", allow(similar_names))] #![cfg_attr(feature="dev", allow(similar_names))]
use std::path::Path;
use keccak::keccak; use keccak::keccak;
use hashdb::{HashDB, DBValue}; use hashdb::{HashDB, DBValue};
use super::*; use super::*;
use super::super::traits::JournalDB; use super::super::traits::JournalDB;
use ethcore_logger::init_log; use ethcore_logger::init_log;
use kvdb::{DatabaseConfig}; use kvdb_memorydb;
use bigint::hash::H32;
#[test] #[test]
fn insert_same_in_fork() { fn insert_same_in_fork() {
// history is 1 // history is 1
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
let x = jdb.insert(b"X"); let x = jdb.insert(b"X");
jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); jdb.commit_batch(1, &keccak(b"1"), None).unwrap();
@ -595,7 +586,7 @@ mod tests {
#[test] #[test]
fn insert_older_era() { fn insert_older_era() {
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0a"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0a"), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -616,7 +607,7 @@ mod tests {
#[test] #[test]
fn long_history() { fn long_history() {
// history is 3 // history is 3
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
let h = jdb.insert(b"foo"); let h = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -639,7 +630,7 @@ mod tests {
#[test] #[test]
fn complex() { fn complex() {
// history is 1 // history is 1
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -682,7 +673,7 @@ mod tests {
#[test] #[test]
fn fork() { fn fork() {
// history is 1 // history is 1
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -714,7 +705,7 @@ mod tests {
#[test] #[test]
fn overwrite() { fn overwrite() {
// history is 1 // history is 1
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
@ -737,7 +728,7 @@ mod tests {
#[test] #[test]
fn fork_same_key_one() { fn fork_same_key_one() {
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -762,7 +753,7 @@ mod tests {
#[test] #[test]
fn fork_same_key_other() { fn fork_same_key_other() {
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -787,7 +778,7 @@ mod tests {
#[test] #[test]
fn fork_ins_del_ins() { fn fork_ins_del_ins() {
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -818,20 +809,18 @@ mod tests {
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
} }
fn new_db(path: &Path) -> EarlyMergeDB { fn new_db() -> EarlyMergeDB {
let config = DatabaseConfig::with_columns(Some(1)); let backing = Arc::new(kvdb_memorydb::create(0));
let backing = Arc::new(::kvdb::Database::open(&config, path.to_str().unwrap()).unwrap()); EarlyMergeDB::new(backing, None)
EarlyMergeDB::new(backing, Some(0))
} }
#[test] #[test]
fn reopen() { fn reopen() {
let mut dir = ::std::env::temp_dir(); let shared_db = Arc::new(kvdb_memorydb::create(0));
dir.push(H32::random().hex());
let bar = H256::random(); let bar = H256::random();
let foo = { let foo = {
let mut jdb = new_db(&dir); let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
// history is 1 // history is 1
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); jdb.emplace(bar.clone(), DBValue::from_slice(b"bar"));
@ -841,14 +830,14 @@ mod tests {
}; };
{ {
let mut jdb = new_db(&dir); let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
jdb.remove(&foo); jdb.remove(&foo);
jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
} }
{ {
let mut jdb = new_db(&dir); let mut jdb = EarlyMergeDB::new(shared_db, None);
assert!(jdb.contains(&foo)); assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar)); assert!(jdb.contains(&bar));
jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
@ -861,7 +850,7 @@ mod tests {
fn insert_delete_insert_delete_insert_expunge() { fn insert_delete_insert_delete_insert_expunge() {
init_log(); init_log();
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
// history is 4 // history is 4
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
@ -887,7 +876,7 @@ mod tests {
#[test] #[test]
fn forked_insert_delete_insert_delete_insert_expunge() { fn forked_insert_delete_insert_delete_insert_expunge() {
init_log(); init_log();
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
// history is 4 // history is 4
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
@ -933,7 +922,7 @@ mod tests {
#[test] #[test]
fn broken_assert() { fn broken_assert() {
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
// history is 1 // history is 1
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
@ -962,7 +951,7 @@ mod tests {
#[test] #[test]
fn reopen_test() { fn reopen_test() {
let mut jdb = EarlyMergeDB::new_temp(); let mut jdb = new_db();
// history is 4 // history is 4
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
@ -997,13 +986,11 @@ mod tests {
fn reopen_remove_three() { fn reopen_remove_three() {
init_log(); init_log();
let mut dir = ::std::env::temp_dir(); let shared_db = Arc::new(kvdb_memorydb::create(0));
dir.push(H32::random().hex());
let foo = keccak(b"foo"); let foo = keccak(b"foo");
{ {
let mut jdb = new_db(&dir); let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
// history is 1 // history is 1
jdb.insert(b"foo"); jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
@ -1025,7 +1012,7 @@ mod tests {
// incantation to reopen the db // incantation to reopen the db
}; { }; {
let mut jdb = new_db(&dir); let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
jdb.remove(&foo); jdb.remove(&foo);
jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap(); jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap();
@ -1034,7 +1021,7 @@ mod tests {
// incantation to reopen the db // incantation to reopen the db
}; { }; {
let mut jdb = new_db(&dir); let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap(); jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -1042,7 +1029,7 @@ mod tests {
// incantation to reopen the db // incantation to reopen the db
}; { }; {
let mut jdb = new_db(&dir); let mut jdb = EarlyMergeDB::new(shared_db, None);
jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap(); jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -1052,10 +1039,10 @@ mod tests {
#[test] #[test]
fn reopen_fork() { fn reopen_fork() {
let mut dir = ::std::env::temp_dir(); let shared_db = Arc::new(kvdb_memorydb::create(0));
dir.push(H32::random().hex());
let (foo, bar, baz) = { let (foo, bar, baz) = {
let mut jdb = new_db(&dir); let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
// history is 1 // history is 1
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -1073,7 +1060,7 @@ mod tests {
}; };
{ {
let mut jdb = new_db(&dir); let mut jdb = EarlyMergeDB::new(shared_db, None);
jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo)); assert!(jdb.contains(&foo));
@ -1084,9 +1071,7 @@ mod tests {
#[test] #[test]
fn inject() { fn inject() {
let temp = ::devtools::RandomTempPath::new(); let mut jdb = new_db();
let mut jdb = new_db(temp.as_path().as_path());
let key = jdb.insert(b"dog"); let key = jdb.insert(b"dog");
jdb.inject_batch().unwrap(); jdb.inject_batch().unwrap();

View File

@ -117,13 +117,6 @@ impl OverlayRecentDB {
} }
} }
/// Create a new instance with an anonymous temporary database.
#[cfg(test)]
pub fn new_temp() -> OverlayRecentDB {
let backing = Arc::new(::kvdb::in_memory(0));
Self::new(backing, None)
}
#[cfg(test)] #[cfg(test)]
fn can_reconstruct_refs(&self) -> bool { fn can_reconstruct_refs(&self) -> bool {
let reconstructed = Self::read_overlay(&*self.backing, self.column); let reconstructed = Self::read_overlay(&*self.backing, self.column);
@ -462,24 +455,22 @@ mod tests {
#![cfg_attr(feature="dev", allow(blacklisted_name))] #![cfg_attr(feature="dev", allow(blacklisted_name))]
#![cfg_attr(feature="dev", allow(similar_names))] #![cfg_attr(feature="dev", allow(similar_names))]
use std::path::Path;
use keccak::keccak; use keccak::keccak;
use super::*; use super::*;
use hashdb::{HashDB, DBValue}; use hashdb::{HashDB, DBValue};
use ethcore_logger::init_log; use ethcore_logger::init_log;
use journaldb::JournalDB; use journaldb::JournalDB;
use kvdb::Database; use kvdb_memorydb;
use bigint::hash::H32;
fn new_db(path: &Path) -> OverlayRecentDB { fn new_db() -> OverlayRecentDB {
let backing = Arc::new(Database::open_default(path.to_str().unwrap()).unwrap()); let backing = Arc::new(kvdb_memorydb::create(0));
OverlayRecentDB::new(backing, None) OverlayRecentDB::new(backing, None)
} }
#[test] #[test]
fn insert_same_in_fork() { fn insert_same_in_fork() {
// history is 1 // history is 1
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
let x = jdb.insert(b"X"); let x = jdb.insert(b"X");
jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); jdb.commit_batch(1, &keccak(b"1"), None).unwrap();
@ -509,7 +500,7 @@ mod tests {
#[test] #[test]
fn long_history() { fn long_history() {
// history is 3 // history is 3
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
let h = jdb.insert(b"foo"); let h = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -532,7 +523,7 @@ mod tests {
#[test] #[test]
fn complex() { fn complex() {
// history is 1 // history is 1
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -575,7 +566,7 @@ mod tests {
#[test] #[test]
fn fork() { fn fork() {
// history is 1 // history is 1
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -607,7 +598,7 @@ mod tests {
#[test] #[test]
fn overwrite() { fn overwrite() {
// history is 1 // history is 1
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
@ -629,7 +620,7 @@ mod tests {
#[test] #[test]
fn fork_same_key_one() { fn fork_same_key_one() {
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -654,7 +645,7 @@ mod tests {
#[test] #[test]
fn fork_same_key_other() { fn fork_same_key_other() {
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -680,7 +671,7 @@ mod tests {
#[test] #[test]
fn fork_ins_del_ins() { fn fork_ins_del_ins() {
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -714,12 +705,11 @@ mod tests {
#[test] #[test]
fn reopen() { fn reopen() {
let mut dir = ::std::env::temp_dir(); let shared_db = Arc::new(kvdb_memorydb::create(0));
dir.push(H32::random().hex());
let bar = H256::random(); let bar = H256::random();
let foo = { let foo = {
let mut jdb = new_db(&dir); let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
// history is 1 // history is 1
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); jdb.emplace(bar.clone(), DBValue::from_slice(b"bar"));
@ -729,14 +719,14 @@ mod tests {
}; };
{ {
let mut jdb = new_db(&dir); let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
jdb.remove(&foo); jdb.remove(&foo);
jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
} }
{ {
let mut jdb = new_db(&dir); let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
assert!(jdb.contains(&foo)); assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar)); assert!(jdb.contains(&bar));
jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
@ -748,7 +738,7 @@ mod tests {
#[test] #[test]
fn insert_delete_insert_delete_insert_expunge() { fn insert_delete_insert_delete_insert_expunge() {
init_log(); init_log();
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
// history is 4 // history is 4
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
@ -774,7 +764,7 @@ mod tests {
#[test] #[test]
fn forked_insert_delete_insert_delete_insert_expunge() { fn forked_insert_delete_insert_delete_insert_expunge() {
init_log(); init_log();
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
// history is 4 // history is 4
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
@ -820,7 +810,7 @@ mod tests {
#[test] #[test]
fn broken_assert() { fn broken_assert() {
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
@ -848,7 +838,7 @@ mod tests {
#[test] #[test]
fn reopen_test() { fn reopen_test() {
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
// history is 4 // history is 4
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
@ -882,13 +872,11 @@ mod tests {
fn reopen_remove_three() { fn reopen_remove_three() {
init_log(); init_log();
let mut dir = ::std::env::temp_dir(); let shared_db = Arc::new(kvdb_memorydb::create(0));
dir.push(H32::random().hex());
let foo = keccak(b"foo"); let foo = keccak(b"foo");
{ {
let mut jdb = new_db(&dir); let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
// history is 1 // history is 1
jdb.insert(b"foo"); jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
@ -910,7 +898,7 @@ mod tests {
// incantation to reopen the db // incantation to reopen the db
}; { }; {
let mut jdb = new_db(&dir); let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
jdb.remove(&foo); jdb.remove(&foo);
jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap(); jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap();
@ -919,7 +907,7 @@ mod tests {
// incantation to reopen the db // incantation to reopen the db
}; { }; {
let mut jdb = new_db(&dir); let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap(); jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -927,7 +915,7 @@ mod tests {
// incantation to reopen the db // incantation to reopen the db
}; { }; {
let mut jdb = new_db(&dir); let mut jdb = OverlayRecentDB::new(shared_db, None);
jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap(); jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -937,10 +925,10 @@ mod tests {
#[test] #[test]
fn reopen_fork() { fn reopen_fork() {
let mut dir = ::std::env::temp_dir(); let shared_db = Arc::new(kvdb_memorydb::create(0));
dir.push(H32::random().hex());
let (foo, bar, baz) = { let (foo, bar, baz) = {
let mut jdb = new_db(&dir); let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
// history is 1 // history is 1
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -958,7 +946,7 @@ mod tests {
}; };
{ {
let mut jdb = new_db(&dir); let mut jdb = OverlayRecentDB::new(shared_db, None);
jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo)); assert!(jdb.contains(&foo));
@ -969,7 +957,7 @@ mod tests {
#[test] #[test]
fn insert_older_era() { fn insert_older_era() {
let mut jdb = OverlayRecentDB::new_temp(); let mut jdb = new_db();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0a"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0a"), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -989,9 +977,7 @@ mod tests {
#[test] #[test]
fn inject() { fn inject() {
let temp = ::devtools::RandomTempPath::new(); let mut jdb = new_db();
let mut jdb = new_db(temp.as_path().as_path());
let key = jdb.insert(b"dog"); let key = jdb.insert(b"dog");
jdb.inject_batch().unwrap(); jdb.inject_batch().unwrap();
@ -1004,10 +990,10 @@ mod tests {
#[test] #[test]
fn earliest_era() { fn earliest_era() {
let temp = ::devtools::RandomTempPath::new(); let shared_db = Arc::new(kvdb_memorydb::create(0));
// empty DB // empty DB
let mut jdb = new_db(temp.as_path().as_path()); let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
assert!(jdb.earliest_era().is_none()); assert!(jdb.earliest_era().is_none());
// single journalled era. // single journalled era.
@ -1041,7 +1027,7 @@ mod tests {
// reconstructed: no journal entries. // reconstructed: no journal entries.
drop(jdb); drop(jdb);
let jdb = new_db(temp.as_path().as_path()); let jdb = OverlayRecentDB::new(shared_db, None);
assert_eq!(jdb.earliest_era(), None); assert_eq!(jdb.earliest_era(), None);
} }
} }

View File

@ -75,13 +75,6 @@ impl RefCountedDB {
column: col, column: col,
} }
} }
/// Create a new instance with an anonymous temporary database.
#[cfg(test)]
fn new_temp() -> RefCountedDB {
let backing = Arc::new(::kvdb::in_memory(0));
Self::new(backing, None)
}
} }
impl HashDB for RefCountedDB { impl HashDB for RefCountedDB {
@ -217,13 +210,19 @@ mod tests {
use keccak::keccak; use keccak::keccak;
use hashdb::{HashDB, DBValue}; use hashdb::{HashDB, DBValue};
use kvdb_memorydb;
use super::*; use super::*;
use super::super::traits::JournalDB; use super::super::traits::JournalDB;
fn new_db() -> RefCountedDB {
let backing = Arc::new(kvdb_memorydb::create(0));
RefCountedDB::new(backing, None)
}
#[test] #[test]
fn long_history() { fn long_history() {
// history is 3 // history is 3
let mut jdb = RefCountedDB::new_temp(); let mut jdb = new_db();
let h = jdb.insert(b"foo"); let h = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.contains(&h)); assert!(jdb.contains(&h));
@ -241,7 +240,7 @@ mod tests {
#[test] #[test]
fn latest_era_should_work() { fn latest_era_should_work() {
// history is 3 // history is 3
let mut jdb = RefCountedDB::new_temp(); let mut jdb = new_db();
assert_eq!(jdb.latest_era(), None); assert_eq!(jdb.latest_era(), None);
let h = jdb.insert(b"foo"); let h = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
@ -260,7 +259,7 @@ mod tests {
#[test] #[test]
fn complex() { fn complex() {
// history is 1 // history is 1
let mut jdb = RefCountedDB::new_temp(); let mut jdb = new_db();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -298,7 +297,7 @@ mod tests {
#[test] #[test]
fn fork() { fn fork() {
// history is 1 // history is 1
let mut jdb = RefCountedDB::new_temp(); let mut jdb = new_db();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -325,7 +324,7 @@ mod tests {
#[test] #[test]
fn inject() { fn inject() {
let mut jdb = RefCountedDB::new_temp(); let mut jdb = new_db();
let key = jdb.insert(b"dog"); let key = jdb.insert(b"dog");
jdb.inject_batch().unwrap(); jdb.inject_batch().unwrap();

View File

@ -110,6 +110,9 @@ extern crate patricia_trie as trie;
extern crate kvdb; extern crate kvdb;
extern crate util_error as error; extern crate util_error as error;
#[cfg(test)]
extern crate kvdb_memorydb;
#[macro_use] #[macro_use]
extern crate log as rlog; extern crate log as rlog;

View File

@ -50,7 +50,7 @@ impl OverlayDB {
/// Create a new instance of OverlayDB with an anonymous temporary database. /// Create a new instance of OverlayDB with an anonymous temporary database.
#[cfg(test)] #[cfg(test)]
pub fn new_temp() -> OverlayDB { pub fn new_temp() -> OverlayDB {
let backing = Arc::new(::kvdb::in_memory(0)); let backing = Arc::new(::kvdb_memorydb::create(0));
Self::new(backing, None) Self::new(backing, None)
} }