diff --git a/Cargo.lock b/Cargo.lock index 845988155..531ec4ce8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -233,6 +233,9 @@ dependencies = [ name = "cc" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "cfg-if" @@ -587,6 +590,8 @@ dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "kvdb 0.1.0", + "kvdb-memorydb 0.1.0", + "kvdb-rocksdb 0.1.0", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "linked-hash-map 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -596,7 +601,7 @@ dependencies = [ "migration 0.1.0", "native-contracts 0.1.0", "num 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-machine 0.1.0", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "patricia_trie 0.1.0", @@ -744,6 +749,8 @@ dependencies = [ "heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "kvdb 0.1.0", + "kvdb-memorydb 0.1.0", + "kvdb-rocksdb 0.1.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "memorydb 0.1.0", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -830,6 +837,7 @@ dependencies = [ "hash 0.1.0", "hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)", "kvdb 0.1.0", + "kvdb-rocksdb 0.1.0", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "native-contracts 0.1.0", @@ -885,6 +893,7 @@ dependencies = [ "hashdb 0.1.0", "heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "kvdb 0.1.0", + "kvdb-memorydb 0.1.0", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1010,6 +1019,7 @@ dependencies = [ "heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)", "kvdb 0.1.0", + "kvdb-memorydb 0.1.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "macros 0.1.0", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1128,7 +1138,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1263,7 +1273,7 @@ dependencies = [ "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", "traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1500,11 +1510,28 @@ version = "0.1.0" dependencies = [ "elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore-bigint 0.1.3", "ethcore-bytes 0.1.0", +] + +[[package]] +name = "kvdb-memorydb" +version = "0.1.0" +dependencies = [ + "kvdb 0.1.0", + "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "rlp 0.2.0", +] + +[[package]] +name = "kvdb-rocksdb" +version = "0.1.0" +dependencies = [ + "elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.3", "ethcore-devtools 1.8.0", - "hashdb 0.1.0", + "kvdb 0.1.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", @@ -1656,6 +1683,7 @@ version = "0.1.0" dependencies = [ "ethcore-devtools 1.8.0", "kvdb 0.1.0", + "kvdb-rocksdb 0.1.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "macros 0.1.0", ] @@ -1845,7 +1873,7 @@ dependencies = [ "ethcore-network 1.8.0", "ethcore-util 1.8.5", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", - "kvdb 0.1.0", + "kvdb-memorydb 0.1.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "native-contracts 0.1.0", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1960,7 +1988,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "num_cpus" -version = "1.6.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2076,12 +2104,12 @@ dependencies = [ "ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)", "isatty 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", - "kvdb 0.1.0", + "kvdb-rocksdb 0.1.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "migration 0.1.0", "node-filter 1.8.0", "node-health 0.1.0", - "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "panic_hook 0.1.0", "parity-dapps 1.8.0", @@ -2223,6 +2251,7 @@ dependencies = [ "ethcore-util 1.8.5", "ethkey 0.2.0", "kvdb 0.1.0", + "kvdb-memorydb 0.1.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.0", "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2280,7 +2309,7 @@ dependencies = [ "jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", "jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)", - "kvdb 0.1.0", + "kvdb-memorydb 0.1.0", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "macros 0.1.0", "multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2693,7 +2722,7 @@ dependencies = [ "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2780,7 +2809,7 @@ dependencies = [ [[package]] name = "rocksdb" version = "0.4.5" -source = "git+https://github.com/paritytech/rust-rocksdb#4364caec4dd5da1a1d78c39276774ee65bf55c7d" +source = "git+https://github.com/paritytech/rust-rocksdb#166e14ed63cbd2e44b51267b8b98e4b89b0f236f" dependencies = [ "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "local-encoding 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2790,10 +2819,11 @@ dependencies = [ [[package]] name = "rocksdb-sys" version = "0.3.0" -source = "git+https://github.com/paritytech/rust-rocksdb#4364caec4dd5da1a1d78c39276774ee65bf55c7d" +source = "git+https://github.com/paritytech/rust-rocksdb#166e14ed63cbd2e44b51267b8b98e4b89b0f236f" dependencies = [ - "gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "snappy-sys 0.1.0 (git+https://github.com/paritytech/rust-snappy)", ] [[package]] @@ -3090,6 +3120,15 @@ dependencies = [ "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "snappy-sys" +version = "0.1.0" +source = "git+https://github.com/paritytech/rust-snappy#858eac97192ea25d18d3f3626a8cc13ca0b175bb" +dependencies = [ + "gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "spmc" version = "0.2.2" @@ -3803,7 +3842,7 @@ dependencies = [ "checksum num-iter 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)" = "7485fcc84f85b4ecd0ea527b14189281cf27d60e583ae65ebc9c088b13dffe01" "checksum num-rational 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "288629c76fac4b33556f4b7ab57ba21ae202da65ba8b77466e6d598e31990790" "checksum num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "99843c856d68d8b4313b03a17e33c4bb42ae8f6610ea81b28abe076ac721b9b0" -"checksum num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "aec53c34f2d0247c5ca5d32cca1478762f301740468ee9ee6dcb7a0dd7a0c584" +"checksum num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "514f0d73e64be53ff320680ca671b64fe3fb91da01e1ae2ddc99eb51d453b20d" "checksum number_prefix 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "59a14be9c211cb9c602bad35ac99f41e9a84b44d71b8cbd3040e3bd02a214902" "checksum odds 0.2.25 (registry+https://github.com/rust-lang/crates.io-index)" = "c3df9b730298cea3a1c3faa90b7e2f9df3a9c400d0936d6015e6165734eefcba" "checksum ole32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d2c49021782e5233cd243168edfa8037574afed4eba4bbaf538b3d8d1789d8c" @@ -3888,6 +3927,7 @@ dependencies = [ "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" "checksum smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c8cbcd6df1e117c2210e13ab5109635ad68a929fcbb8964dc965b76cb5ee013" "checksum smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8fcd03faf178110ab0334d74ca9631d77f94c8c11cc77fcb59538abf0025695d" +"checksum snappy-sys 0.1.0 (git+https://github.com/paritytech/rust-snappy)" = "" "checksum spmc 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cd1f11d1fb5fd41834e55ce0b85a186efbf2f2afd9fdb09e2c8d72f9bff1ad1a" "checksum stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15132e0e364248108c5e2c02e3ab539be8d6f5d52a01ca9bbf27ed657316f02b" "checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694" diff --git a/Cargo.toml b/Cargo.toml index 07727d883..35200ef40 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,7 +63,7 @@ path = { path = "util/path" } panic_hook = { path = "panic_hook" } hash = { path = "util/hash" } migration = { path = "util/migration" } -kvdb = { path = "util/kvdb" } +kvdb-rocksdb = { path = "util/kvdb-rocksdb" } parity-dapps = { path = "dapps", optional = true } clippy = { version = "0.0.103", optional = true} diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index e7d63703d..a12083b56 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -56,6 +56,8 @@ rand = "0.3" rlp = { path = "../util/rlp" } rlp_derive = { path = "../util/rlp_derive" } kvdb = { path = "../util/kvdb" } +kvdb-rocksdb = { path = "../util/kvdb-rocksdb" } +kvdb-memorydb = { path = "../util/kvdb-memorydb" } util-error = { path = "../util/error" } snappy = { path = "../util/snappy" } migration = { path = "../util/migration" } diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 734eb7432..69a58270c 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -40,6 +40,8 @@ stats = { path = "../../util/stats" } hash = { path = "../../util/hash" } triehash = { path = "../../util/triehash" } kvdb = { path = "../../util/kvdb" } +kvdb-rocksdb = { path = "../../util/kvdb-rocksdb" } +kvdb-memorydb = { path = "../../util/kvdb-memorydb" } [features] default = [] diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index c31fd4787..f1cb18573 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -728,13 +728,14 @@ mod tests { use ethcore::header::Header; use ethcore::spec::Spec; use cache::Cache; - use kvdb::{in_memory, KeyValueDB}; + use kvdb::KeyValueDB; + use kvdb_memorydb; use time::Duration; use parking_lot::Mutex; fn make_db() -> Arc { - Arc::new(in_memory(0)) + Arc::new(kvdb_memorydb::create(0)) } #[test] diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index fb0ec3917..5e9eee92f 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -36,7 +36,8 @@ use bigint::prelude::U256; use bigint::hash::H256; use futures::{IntoFuture, Future}; -use kvdb::{KeyValueDB, CompactionProfile}; +use kvdb::KeyValueDB; +use kvdb_rocksdb::CompactionProfile; use self::fetch::ChainDataFetcher; use self::header_chain::{AncestryIter, HeaderChain}; @@ -214,7 +215,7 @@ impl Client { io_channel: IoChannel, cache: Arc> ) -> Self { - let db = ::kvdb::in_memory(0); + let db = ::kvdb_memorydb::create(0); Client::new( config, diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index f20d0ad90..11d6a8e77 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -25,7 +25,7 @@ use ethcore::db; use ethcore::service::ClientIoMessage; use ethcore::spec::Spec; use io::{IoContext, IoError, IoHandler, IoService}; -use kvdb::{Database, DatabaseConfig}; +use kvdb_rocksdb::{Database, DatabaseConfig}; use cache::Cache; use parking_lot::Mutex; @@ -63,11 +63,7 @@ impl Service { // initialize database. let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS); - // give all rocksdb cache to the header chain column. - if let Some(size) = config.db_cache_size { - db_config.set_cache(db::COL_LIGHT_CHAIN, size); - } - + db_config.memory_budget = config.db_cache_size; db_config.compaction = config.db_compaction; db_config.wal = config.db_wal; diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index edf655e4d..9c860a273 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -92,6 +92,8 @@ extern crate vm; extern crate hash; extern crate triehash; extern crate kvdb; +extern crate kvdb_memorydb; +extern crate kvdb_rocksdb; #[cfg(feature = "ipc")] extern crate ethcore_ipc as ipc; diff --git a/ethcore/node_filter/Cargo.toml b/ethcore/node_filter/Cargo.toml index c95e83091..dc8797d8e 100644 --- a/ethcore/node_filter/Cargo.toml +++ b/ethcore/node_filter/Cargo.toml @@ -11,10 +11,12 @@ ethcore = { path = ".."} ethcore-util = { path = "../../util" } ethcore-bigint = { path = "../../util/bigint" } ethcore-bytes = { path = "../../util/bytes" } -ethcore-io = { path = "../../util/io" } ethcore-network = { path = "../../util/network" } -kvdb = { path = "../../util/kvdb" } native-contracts = { path = "../native_contracts" } futures = "0.1" log = "0.3" parking_lot = "0.4" + +[dev-dependencies] +kvdb-memorydb = { path = "../../util/kvdb-memorydb" } +ethcore-io = { path = "../../util/io" } diff --git a/ethcore/node_filter/src/lib.rs b/ethcore/node_filter/src/lib.rs index e92dba61d..d2c64cb39 100644 --- a/ethcore/node_filter/src/lib.rs +++ b/ethcore/node_filter/src/lib.rs @@ -24,9 +24,14 @@ extern crate ethcore_network as network; extern crate native_contracts; extern crate futures; extern crate parking_lot; -extern crate kvdb; -#[cfg(test)] extern crate ethcore_io as io; -#[macro_use] extern crate log; + +#[macro_use] +extern crate log; + +#[cfg(test)] +extern crate kvdb_memorydb; +#[cfg(test)] +extern crate ethcore_io as io; use std::sync::Weak; use std::collections::HashMap; @@ -135,7 +140,7 @@ mod test { let contract_addr = Address::from_str("0000000000000000000000000000000000000005").unwrap(); let data = include_bytes!("../res/node_filter.json"); let spec = Spec::load(&::std::env::temp_dir(), &data[..]).unwrap(); - let client_db = Arc::new(::kvdb::in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0))); + let client_db = Arc::new(::kvdb_memorydb::create(::ethcore::db::NUM_COLUMNS.unwrap_or(0))); let client = Client::new( ClientConfig::default(), diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index a55cf669f..8e71d5f42 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -1479,7 +1479,8 @@ mod tests { use std::sync::Arc; use rustc_hex::FromHex; use hash::keccak; - use kvdb::{in_memory, KeyValueDB}; + use kvdb::KeyValueDB; + use kvdb_memorydb; use bigint::hash::*; use receipt::{Receipt, TransactionOutcome}; use blockchain::{BlockProvider, BlockChain, Config, ImportRoute}; @@ -1493,7 +1494,7 @@ mod tests { use header::BlockNumber; fn new_db() -> Arc { - Arc::new(in_memory(::db::NUM_COLUMNS.unwrap_or(0))) + Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0))) } fn new_chain(genesis: &[u8], db: Arc) -> BlockChain { diff --git a/ethcore/src/blockchain/config.rs b/ethcore/src/blockchain/config.rs index 4be606b33..312289b06 100644 --- a/ethcore/src/blockchain/config.rs +++ b/ethcore/src/blockchain/config.rs @@ -23,8 +23,6 @@ pub struct Config { pub pref_cache_size: usize, /// Maximum cache size in bytes. pub max_cache_size: usize, - /// Backing db cache_size - pub db_cache_size: Option, } impl Default for Config { @@ -32,8 +30,6 @@ impl Default for Config { Config { pref_cache_size: 1 << 14, max_cache_size: 1 << 20, - db_cache_size: None, } } } - diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index e629732d5..e5385ffee 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -21,7 +21,7 @@ use std::fmt::{Display, Formatter, Error as FmtError}; use mode::Mode as IpcMode; use verification::{VerifierType, QueueConfig}; use util::journaldb; -use kvdb::CompactionProfile; +use kvdb_rocksdb::CompactionProfile; pub use std::time::Duration; pub use blockchain::Config as BlockChainConfig; @@ -141,7 +141,7 @@ pub struct ClientConfig { pub pruning: journaldb::Algorithm, /// The name of the client instance. pub name: String, - /// RocksDB state column cache-size if not default + /// RocksDB column cache-size if not default pub db_cache_size: Option, /// State db compaction profile pub db_compaction: DatabaseCompactionProfile, diff --git a/ethcore/src/client/evm_test_client.rs b/ethcore/src/client/evm_test_client.rs index ff03554f9..567a6c944 100644 --- a/ethcore/src/client/evm_test_client.rs +++ b/ethcore/src/client/evm_test_client.rs @@ -21,8 +21,7 @@ use std::sync::Arc; use bigint::prelude::U256; use bigint::hash::H256; use util::journaldb; -use trie; -use bytes; +use {trie, kvdb_memorydb, bytes}; use kvdb::{self, KeyValueDB}; use {state, state_db, client, executive, trace, transaction, db, spec, pod_state}; use factory::Factories; @@ -128,7 +127,7 @@ impl<'a> EvmTestClient<'a> { } fn state_from_spec(spec: &'a spec::Spec, factories: &Factories) -> Result, EvmTestError> { - let db = Arc::new(kvdb::in_memory(db::NUM_COLUMNS.expect("We use column-based DB; qed"))); + let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS.expect("We use column-based DB; qed"))); let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE); let mut state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024); state_db = spec.ensure_db_good(state_db, factories)?; @@ -150,7 +149,7 @@ impl<'a> EvmTestClient<'a> { } fn state_from_pod(spec: &'a spec::Spec, factories: &Factories, pod_state: pod_state::PodState) -> Result, EvmTestError> { - let db = Arc::new(kvdb::in_memory(db::NUM_COLUMNS.expect("We use column-based DB; qed"))); + let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS.expect("We use column-based DB; qed"))); let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE); let state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024); let mut state = state::State::new( diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 82f969320..4ee3c420c 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -27,7 +27,7 @@ use bigint::prelude::U256; use bigint::hash::H256; use parking_lot::RwLock; use util::*; -use kvdb::{Database, DatabaseConfig}; +use kvdb_rocksdb::{Database, DatabaseConfig}; use bytes::Bytes; use rlp::*; use ethkey::{Generator, Random}; diff --git a/ethcore/src/json_tests/chain.rs b/ethcore/src/json_tests/chain.rs index 9bd0d4c94..e82bc7740 100644 --- a/ethcore/src/json_tests/chain.rs +++ b/ethcore/src/json_tests/chain.rs @@ -57,7 +57,7 @@ pub fn json_chain_test(json_data: &[u8]) -> Vec { }; { - let db = Arc::new(::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); + let db = Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0))); let mut config = ClientConfig::default(); config.history = 8; let client = Client::new( diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index a67eb879a..c480beabe 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -113,6 +113,8 @@ extern crate ansi_term; extern crate semantic_version; extern crate unexpected; extern crate kvdb; +extern crate kvdb_rocksdb; +extern crate kvdb_memorydb; extern crate util_error; extern crate snappy; extern crate migration; diff --git a/ethcore/src/migrations/state/v7.rs b/ethcore/src/migrations/state/v7.rs index cbf517d1d..c15935117 100644 --- a/ethcore/src/migrations/state/v7.rs +++ b/ethcore/src/migrations/state/v7.rs @@ -22,7 +22,7 @@ use std::collections::HashMap; use bigint::hash::H256; use util::Address; use bytes::Bytes; -use kvdb::Database; +use kvdb_rocksdb::Database; use migration::{Batch, Config, Error, Migration, SimpleMigration, Progress}; use hash::keccak; use std::sync::Arc; diff --git a/ethcore/src/migrations/v10.rs b/ethcore/src/migrations/v10.rs index 155d6f4c0..3a236e719 100644 --- a/ethcore/src/migrations/v10.rs +++ b/ethcore/src/migrations/v10.rs @@ -26,7 +26,8 @@ use migration::{Error, Migration, Progress, Batch, Config}; use util::journaldb; use bigint::hash::H256; use trie::Trie; -use kvdb::{Database, DBTransaction}; +use kvdb::DBTransaction; +use kvdb_rocksdb::Database; /// Account bloom upgrade routine. If bloom already present, does nothing. /// If database empty (no best block), does nothing. diff --git a/ethcore/src/migrations/v9.rs b/ethcore/src/migrations/v9.rs index 7c28054fa..39637dc4e 100644 --- a/ethcore/src/migrations/v9.rs +++ b/ethcore/src/migrations/v9.rs @@ -18,7 +18,7 @@ //! This migration consolidates all databases into single one using Column Families. use rlp::{Rlp, RlpStream}; -use kvdb::Database; +use kvdb_rocksdb::Database; use migration::{Batch, Config, Error, Migration, Progress}; use std::sync::Arc; diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 64773a236..56cc534f1 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -19,7 +19,8 @@ use std::sync::Arc; use std::path::Path; use bigint::hash::H256; -use kvdb::{Database, DatabaseConfig, KeyValueDB}; +use kvdb::KeyValueDB; +use kvdb_rocksdb::{Database, DatabaseConfig}; use bytes::Bytes; use io::*; use spec::Spec; @@ -82,12 +83,7 @@ impl ClientService { let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - // give all rocksdb cache to state column; everything else has its - // own caches. - if let Some(size) = config.db_cache_size { - db_config.set_cache(::db::COL_STATE, size); - } - + db_config.memory_budget = config.db_cache_size; db_config.compaction = config.db_compaction.compaction_profile(client_path); db_config.wal = config.db_wal; diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index a53824b1f..ae6a34cfa 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -40,7 +40,7 @@ use parking_lot::{Mutex, RwLock, RwLockReadGuard}; use util_error::UtilError; use bytes::Bytes; use util::journaldb::Algorithm; -use kvdb::{Database, DatabaseConfig}; +use kvdb_rocksdb::{Database, DatabaseConfig}; use snappy; /// Helper for removing directories in case of error. @@ -682,7 +682,7 @@ mod tests { #[test] fn cannot_finish_with_invalid_chunks() { use bigint::hash::H256; - use kvdb::DatabaseConfig; + use kvdb_rocksdb::DatabaseConfig; let spec = get_test_spec(); let dir = RandomTempPath::new(); diff --git a/ethcore/src/snapshot/tests/proof_of_authority.rs b/ethcore/src/snapshot/tests/proof_of_authority.rs index 9634fd531..f15954ce8 100644 --- a/ethcore/src/snapshot/tests/proof_of_authority.rs +++ b/ethcore/src/snapshot/tests/proof_of_authority.rs @@ -31,7 +31,7 @@ use tests::helpers; use transaction::{Transaction, Action, SignedTransaction}; use util::Address; -use kvdb; +use kvdb_memorydb; const PASS: &'static str = ""; const TRANSITION_BLOCK_1: usize = 2; // block at which the contract becomes activated. @@ -238,7 +238,7 @@ fn fixed_to_contract_only() { assert_eq!(client.chain_info().best_block_number, 11); let reader = snapshot_helpers::snap(&*client); - let new_db = kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)); + let new_db = kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)); let spec = spec_fixed_to_contract(); // ensure fresh engine's step matches. @@ -270,7 +270,7 @@ fn fixed_to_contract_to_contract() { assert_eq!(client.chain_info().best_block_number, 16); let reader = snapshot_helpers::snap(&*client); - let new_db = kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)); + let new_db = kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)); let spec = spec_fixed_to_contract(); for _ in 0..16 { spec.engine.step() } diff --git a/ethcore/src/snapshot/tests/proof_of_work.rs b/ethcore/src/snapshot/tests/proof_of_work.rs index 8002e4362..d4df7bb10 100644 --- a/ethcore/src/snapshot/tests/proof_of_work.rs +++ b/ethcore/src/snapshot/tests/proof_of_work.rs @@ -26,7 +26,8 @@ use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}; use parking_lot::Mutex; use snappy; -use kvdb::{self, KeyValueDB, DBTransaction}; +use kvdb::{KeyValueDB, DBTransaction}; +use kvdb_memorydb; use std::sync::Arc; use std::sync::atomic::AtomicBool; @@ -43,7 +44,7 @@ fn chunk_and_restore(amount: u64) { let mut snapshot_path = new_path.as_path().to_owned(); snapshot_path.push("SNAP"); - let old_db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); + let old_db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0))); let bc = BlockChain::new(Default::default(), &genesis, old_db.clone()); // build the blockchain. @@ -80,7 +81,7 @@ fn chunk_and_restore(amount: u64) { writer.into_inner().finish(manifest.clone()).unwrap(); // restore it. - let new_db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); + let new_db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0))); let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone()); let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap(); @@ -127,7 +128,7 @@ fn checks_flag() { let chunk = stream.out(); - let db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); + let db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0))); let engine = ::spec::Spec::new_test().engine; let chain = BlockChain::new(Default::default(), &genesis, db.clone()); diff --git a/ethcore/src/snapshot/tests/service.rs b/ethcore/src/snapshot/tests/service.rs index d391883a9..ccaf819b0 100644 --- a/ethcore/src/snapshot/tests/service.rs +++ b/ethcore/src/snapshot/tests/service.rs @@ -27,7 +27,7 @@ use tests::helpers::generate_dummy_client_with_spec_and_data; use devtools::RandomTempPath; use io::IoChannel; -use kvdb::{Database, DatabaseConfig}; +use kvdb_rocksdb::{Database, DatabaseConfig}; struct NoopDBRestore; diff --git a/ethcore/src/snapshot/tests/state.rs b/ethcore/src/snapshot/tests/state.rs index 175ae4eb8..9f9b434df 100644 --- a/ethcore/src/snapshot/tests/state.rs +++ b/ethcore/src/snapshot/tests/state.rs @@ -27,7 +27,7 @@ use error::Error; use rand::{XorShiftRng, SeedableRng}; use bigint::hash::H256; use util::journaldb::{self, Algorithm}; -use kvdb::{Database, DatabaseConfig}; +use kvdb_rocksdb::{Database, DatabaseConfig}; use memorydb::MemoryDB; use parking_lot::Mutex; use devtools::RandomTempPath; diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 451cb13f2..3d559430d 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -672,13 +672,13 @@ impl Spec { pub fn genesis_epoch_data(&self) -> Result, String> { use transaction::{Action, Transaction}; use util::journaldb; - use kvdb; + use kvdb_memorydb; let genesis = self.genesis_header(); let factories = Default::default(); let mut db = journaldb::new( - Arc::new(kvdb::in_memory(0)), + Arc::new(kvdb_memorydb::create(0)), journaldb::Algorithm::Archive, None, ); diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 2f67ece8f..b75f0863a 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -27,7 +27,7 @@ use tests::helpers::*; use types::filter::Filter; use bigint::prelude::U256; use util::*; -use kvdb::{Database, DatabaseConfig}; +use kvdb_rocksdb::{Database, DatabaseConfig}; use devtools::*; use miner::Miner; use spec::Spec; diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index 52a0dedc6..cf14915c5 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -232,7 +232,7 @@ pub fn get_test_client_with_blocks(blocks: Vec) -> Arc { } fn new_db() -> Arc<::kvdb::KeyValueDB> { - Arc::new(::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))) + Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0))) } pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain { diff --git a/ethcore/src/tests/trace.rs b/ethcore/src/tests/trace.rs index 270d29597..57a745a1a 100644 --- a/ethcore/src/tests/trace.rs +++ b/ethcore/src/tests/trace.rs @@ -27,7 +27,7 @@ use client::*; use tests::helpers::*; use devtools::RandomTempPath; use client::{BlockChainClient, Client, ClientConfig}; -use kvdb::{Database, DatabaseConfig}; +use kvdb_rocksdb::{Database, DatabaseConfig}; use std::sync::Arc; use header::Header; use miner::Miner; diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs index 087acdcc9..9d7136cb1 100644 --- a/ethcore/src/trace/db.rs +++ b/ethcore/src/trace/db.rs @@ -416,7 +416,8 @@ mod tests { use bigint::prelude::U256; use bigint::hash::H256; use util::Address; - use kvdb::{DBTransaction, in_memory, KeyValueDB}; + use kvdb::{DBTransaction, KeyValueDB}; + use kvdb_memorydb; use header::BlockNumber; use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest}; use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError}; @@ -467,7 +468,7 @@ mod tests { } fn new_db() -> Arc { - Arc::new(in_memory(::db::NUM_COLUMNS.unwrap_or(0))) + Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0))) } #[test] diff --git a/ethcore/src/tx_filter.rs b/ethcore/src/tx_filter.rs index ac40b8ab6..d0bff8dff 100644 --- a/ethcore/src/tx_filter.rs +++ b/ethcore/src/tx_filter.rs @@ -178,7 +178,7 @@ mod test { "#; let spec = Spec::load(&::std::env::temp_dir(), spec_data.as_bytes()).unwrap(); - let client_db = Arc::new(::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0))); + let client_db = Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0))); let client = Client::new( ClientConfig::default(), diff --git a/local-store/Cargo.toml b/local-store/Cargo.toml index db9830c40..e4b97f7a4 100644 --- a/local-store/Cargo.toml +++ b/local-store/Cargo.toml @@ -14,4 +14,7 @@ serde = "1.0" serde_derive = "1.0" serde_json = "1.0" log = "0.3" + +[dev-dependencies] ethkey = { path = "../ethkey" } +kvdb-memorydb = { path = "../util/kvdb-memorydb" } diff --git a/local-store/src/lib.rs b/local-store/src/lib.rs index 61fe54976..a3234daf1 100644 --- a/local-store/src/lib.rs +++ b/local-store/src/lib.rs @@ -44,6 +44,8 @@ extern crate log; #[cfg(test)] extern crate ethkey; +#[cfg(test)] +extern crate kvdb_memorydb; const LOCAL_TRANSACTIONS_KEY: &'static [u8] = &*b"LOCAL_TXS"; @@ -243,7 +245,7 @@ mod tests { #[test] fn twice_empty() { - let db = Arc::new(::kvdb::in_memory(0)); + let db = Arc::new(::kvdb_memorydb::create(0)); { let store = super::create(db.clone(), None, Dummy(vec![])); @@ -272,7 +274,7 @@ mod tests { PendingTransaction::new(signed, condition) }).collect(); - let db = Arc::new(::kvdb::in_memory(0)); + let db = Arc::new(::kvdb_memorydb::create(0)); { // nothing written yet, will write pending. @@ -311,7 +313,7 @@ mod tests { PendingTransaction::new(signed, None) }); - let db = Arc::new(::kvdb::in_memory(0)); + let db = Arc::new(::kvdb_memorydb::create(0)); { // nothing written, will write bad. let store = super::create(db.clone(), None, Dummy(transactions.clone())); diff --git a/parity/cache.rs b/parity/cache.rs index 8784ffa3d..0bf0717a3 100644 --- a/parity/cache.rs +++ b/parity/cache.rs @@ -17,8 +17,10 @@ use std::cmp::max; const MIN_BC_CACHE_MB: u32 = 4; -const MIN_DB_CACHE_MB: u32 = 2; +const MIN_DB_CACHE_MB: u32 = 8; const MIN_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 16; +const DEFAULT_DB_CACHE_SIZE: u32 = 128; +const DEFAULT_BC_CACHE_SIZE: u32 = 8; const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 40; const DEFAULT_TRACE_CACHE_SIZE: u32 = 20; const DEFAULT_STATE_CACHE_SIZE: u32 = 25; @@ -41,7 +43,11 @@ pub struct CacheConfig { impl Default for CacheConfig { fn default() -> Self { - CacheConfig::new(32, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, DEFAULT_STATE_CACHE_SIZE) + CacheConfig::new( + DEFAULT_DB_CACHE_SIZE, + DEFAULT_BC_CACHE_SIZE, + DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, + DEFAULT_STATE_CACHE_SIZE) } } @@ -68,14 +74,9 @@ impl CacheConfig { } } - /// Size of db cache for blockchain. - pub fn db_blockchain_cache_size(&self) -> u32 { - max(MIN_DB_CACHE_MB, self.db / 4) - } - - /// Size of db cache for state. - pub fn db_state_cache_size(&self) -> u32 { - max(MIN_DB_CACHE_MB, self.db * 3 / 4) + /// Size of db cache. + pub fn db_cache_size(&self) -> u32 { + max(MIN_DB_CACHE_MB, self.db) } /// Size of block queue size limit @@ -122,13 +123,16 @@ mod tests { fn test_cache_config_db_cache_sizes() { let config = CacheConfig::new_with_total_cache_size(400); assert_eq!(config.db, 280); - assert_eq!(config.db_blockchain_cache_size(), 70); - assert_eq!(config.db_state_cache_size(), 210); + assert_eq!(config.db_cache_size(), 280); } #[test] fn test_cache_config_default() { assert_eq!(CacheConfig::default(), - CacheConfig::new(32, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, super::DEFAULT_STATE_CACHE_SIZE)); + CacheConfig::new( + super::DEFAULT_DB_CACHE_SIZE, + super::DEFAULT_BC_CACHE_SIZE, + super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, + super::DEFAULT_STATE_CACHE_SIZE)); } } diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 3f5099b61..f9f7e28d0 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -767,7 +767,7 @@ usage! { "--pruning-memory=[MB]", "The ideal amount of memory in megabytes to use to store recent states. As many states as possible will be kept within this limit, and at least --pruning-history states will always be kept.", - ARG arg_cache_size_db: (u32) = 32u32, or |c: &Config| otry!(c.footprint).cache_size_db.clone(), + ARG arg_cache_size_db: (u32) = 128u32, or |c: &Config| otry!(c.footprint).cache_size_db.clone(), "--cache-size-db=[MB]", "Override database cache size.", @@ -1776,7 +1776,7 @@ mod tests { pruning_memory: None, fast_and_loose: None, cache_size: None, - cache_size_db: Some(128), + cache_size_db: Some(256), cache_size_blocks: Some(16), cache_size_queue: Some(100), cache_size_state: Some(25), diff --git a/parity/cli/tests/config.toml b/parity/cli/tests/config.toml index 08da653de..abdf3e0c7 100644 --- a/parity/cli/tests/config.toml +++ b/parity/cli/tests/config.toml @@ -63,7 +63,7 @@ tx_queue_gas = "off" tracing = "on" pruning = "fast" pruning_history = 64 -cache_size_db = 128 +cache_size_db = 256 cache_size_blocks = 16 cache_size_queue = 100 cache_size_state = 25 diff --git a/parity/helpers.rs b/parity/helpers.rs index da54f6763..bbb23abc5 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -21,7 +21,7 @@ use std::fs::File; use bigint::prelude::U256; use bigint::hash::clean_0x; use util::Address; -use kvdb::CompactionProfile; +use kvdb_rocksdb::CompactionProfile; use util::journaldb::Algorithm; use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType}; use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy}; @@ -239,10 +239,8 @@ pub fn to_client_config( client_config.blockchain.max_cache_size = cache_config.blockchain() as usize * mb; // in bytes client_config.blockchain.pref_cache_size = cache_config.blockchain() as usize * 3 / 4 * mb; - // db blockchain cache size, in megabytes - client_config.blockchain.db_cache_size = Some(cache_config.db_blockchain_cache_size() as usize); - // db state cache size, in megabytes - client_config.db_cache_size = Some(cache_config.db_state_cache_size() as usize); + // db cache size, in megabytes + client_config.db_cache_size = Some(cache_config.db_cache_size() as usize); // db queue cache size, in bytes client_config.queue.max_mem_use = cache_config.queue() as usize * mb; // in bytes diff --git a/parity/main.rs b/parity/main.rs index 97ffbca5f..144af116a 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -62,7 +62,7 @@ extern crate ethcore_bigint as bigint; extern crate ethcore_bytes as bytes; extern crate ethcore_network as network; extern crate migration as migr; -extern crate kvdb; +extern crate kvdb_rocksdb; extern crate ethkey; extern crate ethsync; extern crate node_health; diff --git a/parity/migration.rs b/parity/migration.rs index 508491a0a..f5f7a1981 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -22,7 +22,7 @@ use std::fmt::{Display, Formatter, Error as FmtError}; use std::sync::Arc; use util::journaldb::Algorithm; use migr::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError, Migration}; -use kvdb::{CompactionProfile, Database, DatabaseConfig}; +use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig}; use ethcore::migrations; use ethcore::db; use ethcore::migrations::Extract; @@ -167,7 +167,7 @@ fn consolidate_database( let config = default_migration_settings(compaction_profile); let mut db_config = DatabaseConfig { max_open_files: 64, - cache_sizes: Default::default(), + memory_budget: None, compaction: config.compaction_profile, columns: None, wal: true, @@ -283,7 +283,7 @@ mod legacy { use std::path::{Path, PathBuf}; use util::journaldb::Algorithm; use migr::{Manager as MigrationManager}; - use kvdb::CompactionProfile; + use kvdb_rocksdb::CompactionProfile; use ethcore::migrations; /// Blocks database path. diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 59f5a117c..ceb57639c 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -57,7 +57,6 @@ rlp = { path = "../util/rlp" } stats = { path = "../util/stats" } vm = { path = "../ethcore/vm" } hash = { path = "../util/hash" } -kvdb = { path = "../util/kvdb" } hardware-wallet = { path = "../hw" } clippy = { version = "0.0.103", optional = true} @@ -66,6 +65,7 @@ pretty_assertions = "0.1" [dev-dependencies] macros = { path = "../util/macros" } ethcore-network = { path = "../util/network" } +kvdb-memorydb = { path = "../util/kvdb-memorydb" } [features] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"] diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 2e6d6148f..a4572835f 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -65,7 +65,6 @@ extern crate rlp; extern crate stats; extern crate hash; extern crate hardware_wallet; -extern crate kvdb; #[macro_use] extern crate log; @@ -85,6 +84,9 @@ extern crate pretty_assertions; #[macro_use] extern crate macros; +#[cfg(test)] +extern crate kvdb_memorydb; + pub extern crate jsonrpc_ws_server as ws; mod authcodes; diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index a2b23f52e..c7f1f2dab 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -33,7 +33,7 @@ use io::IoChannel; use bigint::prelude::U256; use bigint::hash::H256; use util::Address; -use kvdb::in_memory; +use kvdb_memorydb; use jsonrpc_core::IoHandler; use v1::impls::{EthClient, SigningUnsafeClient}; @@ -131,7 +131,7 @@ impl EthTester { let client = Client::new( ClientConfig::default(), &spec, - Arc::new(in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0))), + Arc::new(kvdb_memorydb::create(::ethcore::db::NUM_COLUMNS.unwrap_or(0))), miner_service.clone(), IoChannel::disconnected(), ).unwrap(); diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index 3e98c62e7..ca9070440 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -33,6 +33,7 @@ ethcore-devtools = { path = "../devtools" } ethcore-util = { path = "../util" } ethcore-bigint = { path = "../util/bigint" } kvdb = { path = "../util/kvdb" } +kvdb-rocksdb = { path = "../util/kvdb-rocksdb" } hash = { path = "../util/hash" } ethcore-ipc = { path = "../ipc/rpc" } ethcore-ipc-nano = { path = "../ipc/nano" } diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index ca408dfc1..87f1ec084 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -18,7 +18,7 @@ use std::path::PathBuf; use std::collections::BTreeMap; use serde_json; use ethkey::{Secret, Public}; -use kvdb::{Database, DatabaseIterator}; +use kvdb_rocksdb::{Database, DatabaseIterator}; use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId}; use serialization::{SerializablePublic, SerializableSecret}; @@ -293,7 +293,7 @@ pub mod tests { use serde_json; use devtools::RandomTempPath; use ethkey::{Random, Generator, Public, Secret}; - use kvdb::Database; + use kvdb_rocksdb::Database; use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId}; use super::{DB_META_KEY_VERSION, CURRENT_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare, SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1, diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 6aa3bd708..73bf92627 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -49,6 +49,7 @@ extern crate ethkey; extern crate native_contracts; extern crate hash; extern crate kvdb; +extern crate kvdb_rocksdb; mod key_server_cluster; mod types; diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 9260f99ce..8f1e08093 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -34,11 +34,14 @@ ethcore-ipc = { path = "../ipc/rpc" } semver = "0.6" smallvec = { version = "0.4", features = ["heapsizeof"] } ethcore-ipc-nano = { path = "../ipc/nano" } -ethcore-devtools = { path = "../devtools" } -ethkey = { path = "../ethkey" } parking_lot = "0.4" ipnetwork = "0.12.6" +[dev-dependencies] +ethkey = { path = "../ethkey" } +kvdb-memorydb = { path = "../util/kvdb-memorydb" } +ethcore-devtools = { path = "../devtools" } + [features] default = [] dev = ["clippy", "ethcore/dev", "ethcore-util/dev"] diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 33b1d021f..1bcc744a7 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -48,6 +48,7 @@ extern crate ethcore_light as light; #[cfg(test)] extern crate ethcore_devtools as devtools; #[cfg(test)] extern crate ethkey; +#[cfg(test)] extern crate kvdb_memorydb; #[macro_use] extern crate macros; diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index 1fe4fde46..b37ab89ba 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -291,7 +291,7 @@ impl TestNet> { let client = EthcoreClient::new( ClientConfig::default(), &spec, - Arc::new(::kvdb::in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0))), + Arc::new(::kvdb_memorydb::create(::ethcore::db::NUM_COLUMNS.unwrap_or(0))), Arc::new(Miner::with_spec_and_accounts(&spec, accounts)), IoChannel::disconnected(), ).unwrap(); diff --git a/util/Cargo.toml b/util/Cargo.toml index daa30fb34..0744005ff 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -35,6 +35,9 @@ memorydb = { path = "memorydb" } util-error = { path = "error" } kvdb = { path = "kvdb" } +[dev-dependencies] +kvdb-memorydb = { path = "kvdb-memorydb" } + [features] default = [] dev = ["clippy"] diff --git a/util/kvdb-memorydb/Cargo.toml b/util/kvdb-memorydb/Cargo.toml new file mode 100644 index 000000000..9ff69909d --- /dev/null +++ b/util/kvdb-memorydb/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "kvdb-memorydb" +version = "0.1.0" +authors = ["Parity Technologies "] + +[dependencies] +parking_lot = "0.4" +rlp = { path = "../rlp" } +kvdb = { path = "../kvdb" } diff --git a/util/kvdb-memorydb/src/lib.rs b/util/kvdb-memorydb/src/lib.rs new file mode 100644 index 000000000..6cee7b9b1 --- /dev/null +++ b/util/kvdb-memorydb/src/lib.rs @@ -0,0 +1,124 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate parking_lot; +extern crate kvdb; +extern crate rlp; + +use std::collections::{BTreeMap, HashMap}; +use parking_lot::RwLock; +use kvdb::{DBValue, Error, DBTransaction, KeyValueDB, DBOp}; +use rlp::{RlpType, UntrustedRlp, Compressible}; + +/// A key-value database fulfilling the `KeyValueDB` trait, living in memory. +/// This is generally intended for tests and is not particularly optimized. +#[derive(Default)] +pub struct InMemory { + columns: RwLock, BTreeMap, DBValue>>>, +} + +/// Create an in-memory database with the given number of columns. +/// Columns will be indexable by 0..`num_cols` +pub fn create(num_cols: u32) -> InMemory { + let mut cols = HashMap::new(); + cols.insert(None, BTreeMap::new()); + + for idx in 0..num_cols { + cols.insert(Some(idx), BTreeMap::new()); + } + + InMemory { + columns: RwLock::new(cols) + } +} + +impl KeyValueDB for InMemory { + fn get(&self, col: Option, key: &[u8]) -> Result, String> { + let columns = self.columns.read(); + match columns.get(&col) { + None => Err(format!("No such column family: {:?}", col)), + Some(map) => Ok(map.get(key).cloned()), + } + } + + fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { + let columns = self.columns.read(); + match columns.get(&col) { + None => None, + Some(map) => + map.iter() + .find(|&(ref k ,_)| k.starts_with(prefix)) + .map(|(_, v)| v.to_vec().into_boxed_slice()) + } + } + + fn write_buffered(&self, transaction: DBTransaction) { + let mut columns = self.columns.write(); + let ops = transaction.ops; + for op in ops { + match op { + DBOp::Insert { col, key, value } => { + if let Some(col) = columns.get_mut(&col) { + col.insert(key.into_vec(), value); + } + }, + DBOp::InsertCompressed { col, key, value } => { + if let Some(col) = columns.get_mut(&col) { + let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); + let mut value = DBValue::new(); + value.append_slice(&compressed); + col.insert(key.into_vec(), value); + } + }, + DBOp::Delete { col, key } => { + if let Some(col) = columns.get_mut(&col) { + col.remove(&*key); + } + }, + } + } + } + + fn flush(&self) -> Result<(), String> { Ok(()) } + fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + match self.columns.read().get(&col) { + Some(map) => Box::new( // TODO: worth optimizing at all? + map.clone() + .into_iter() + .map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())) + ), + None => Box::new(None.into_iter()), + } + } + + fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) + -> Box, Box<[u8]>)> + 'a> + { + match self.columns.read().get(&col) { + Some(map) => Box::new( + map.clone() + .into_iter() + .skip_while(move |&(ref k, _)| !k.starts_with(prefix)) + .map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())) + ), + None => Box::new(None.into_iter()), + } + } + + fn restore(&self, _new_db: &str) -> Result<(), Error> { + Err("Attempted to restore in-memory database".into()) + } +} diff --git a/util/kvdb-rocksdb/Cargo.toml b/util/kvdb-rocksdb/Cargo.toml new file mode 100644 index 000000000..a3361af98 --- /dev/null +++ b/util/kvdb-rocksdb/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "kvdb-rocksdb" +version = "0.1.0" +authors = ["Parity Technologies "] + +[dependencies] +elastic-array = "0.9" +ethcore-bigint = { path = "../bigint" } +ethcore-devtools = { path = "../../devtools" } +kvdb = { path = "../kvdb" } +log = "0.3" +num_cpus = "1.0" +parking_lot = "0.4" +regex = "0.2" +rlp = { path = "../rlp" } +rocksdb = { git = "https://github.com/paritytech/rust-rocksdb" } diff --git a/util/kvdb-rocksdb/src/lib.rs b/util/kvdb-rocksdb/src/lib.rs new file mode 100644 index 000000000..901ed9f80 --- /dev/null +++ b/util/kvdb-rocksdb/src/lib.rs @@ -0,0 +1,811 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#[macro_use] +extern crate log; + +extern crate elastic_array; +extern crate num_cpus; +extern crate parking_lot; +extern crate regex; +extern crate rocksdb; + +extern crate ethcore_bigint as bigint; +extern crate ethcore_devtools as devtools; +extern crate kvdb; +extern crate rlp; + +use std::cmp; +use std::collections::HashMap; +use std::marker::PhantomData; +use std::path::{PathBuf, Path}; +use std::{mem, fs, io}; + +use parking_lot::{Mutex, MutexGuard, RwLock}; +use rocksdb::{ + DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator, + Options, BlockBasedOptions, Direction, Cache, Column, ReadOptions +}; + +use elastic_array::ElasticArray32; +use rlp::{UntrustedRlp, RlpType, Compressible}; +use kvdb::{KeyValueDB, DBTransaction, DBValue, Error, DBOp}; + +#[cfg(target_os = "linux")] +use regex::Regex; +#[cfg(target_os = "linux")] +use std::process::Command; +#[cfg(target_os = "linux")] +use std::fs::File; + +const DB_DEFAULT_MEMORY_BUDGET_MB: usize = 128; + +enum KeyState { + Insert(DBValue), + InsertCompressed(DBValue), + Delete, +} + +/// Compaction profile for the database settings +#[derive(Clone, Copy, PartialEq, Debug)] +pub struct CompactionProfile { + /// L0-L1 target file size + pub initial_file_size: u64, + /// block size + pub block_size: usize, + /// rate limiter for background flushes and compactions, bytes/sec, if any + pub write_rate_limit: Option, +} + +impl Default for CompactionProfile { + /// Default profile suitable for most storage + fn default() -> CompactionProfile { + CompactionProfile::ssd() + } +} + +/// Given output of df command return Linux rotational flag file path. +#[cfg(target_os = "linux")] +pub fn rotational_from_df_output(df_out: Vec) -> Option { + use std::str; + str::from_utf8(df_out.as_slice()) + .ok() + // Get the drive name. + .and_then(|df_str| Regex::new(r"/dev/(sd[:alpha:]{1,2})") + .ok() + .and_then(|re| re.captures(df_str)) + .and_then(|captures| captures.get(1))) + // Generate path e.g. /sys/block/sda/queue/rotational + .map(|drive_path| { + let mut p = PathBuf::from("/sys/block"); + p.push(drive_path.as_str()); + p.push("queue/rotational"); + p + }) +} + +impl CompactionProfile { + /// Attempt to determine the best profile automatically, only Linux for now. + #[cfg(target_os = "linux")] + pub fn auto(db_path: &Path) -> CompactionProfile { + use std::io::Read; + let hdd_check_file = db_path + .to_str() + .and_then(|path_str| Command::new("df").arg(path_str).output().ok()) + .and_then(|df_res| match df_res.status.success() { + true => Some(df_res.stdout), + false => None, + }) + .and_then(rotational_from_df_output); + // Read out the file and match compaction profile. + if let Some(hdd_check) = hdd_check_file { + if let Ok(mut file) = File::open(hdd_check.as_path()) { + let mut buffer = [0; 1]; + if file.read_exact(&mut buffer).is_ok() { + // 0 means not rotational. + if buffer == [48] { return Self::ssd(); } + // 1 means rotational. + if buffer == [49] { return Self::hdd(); } + } + } + } + // Fallback if drive type was not determined. + Self::default() + } + + /// Just default for other platforms. + #[cfg(not(target_os = "linux"))] + pub fn auto(_db_path: &Path) -> CompactionProfile { + Self::default() + } + + /// Default profile suitable for SSD storage + pub fn ssd() -> CompactionProfile { + CompactionProfile { + initial_file_size: 64 * 1024 * 1024, + block_size: 16 * 1024, + write_rate_limit: None, + } + } + + /// Slow HDD compaction profile + pub fn hdd() -> CompactionProfile { + CompactionProfile { + initial_file_size: 256 * 1024 * 1024, + block_size: 64 * 1024, + write_rate_limit: Some(16 * 1024 * 1024), + } + } +} + +/// Database configuration +#[derive(Clone)] +pub struct DatabaseConfig { + /// Max number of open files. + pub max_open_files: i32, + /// Memory budget (in MiB) used for setting block cache size, write buffer size. + pub memory_budget: Option, + /// Compaction profile + pub compaction: CompactionProfile, + /// Set number of columns + pub columns: Option, + /// Should we keep WAL enabled? + pub wal: bool, +} + +impl DatabaseConfig { + /// Create new `DatabaseConfig` with default parameters and specified set of columns. + /// Note that cache sizes must be explicitly set. + pub fn with_columns(columns: Option) -> Self { + let mut config = Self::default(); + config.columns = columns; + config + } + + pub fn memory_budget(&self) -> usize { + self.memory_budget.unwrap_or(DB_DEFAULT_MEMORY_BUDGET_MB) * 1024 * 1024 + } + + pub fn memory_budget_per_col(&self) -> usize { + self.memory_budget() / self.columns.unwrap_or(1) as usize + } +} + +impl Default for DatabaseConfig { + fn default() -> DatabaseConfig { + DatabaseConfig { + max_open_files: 512, + memory_budget: None, + compaction: CompactionProfile::default(), + columns: None, + wal: true, + } + } +} + +/// Database iterator (for flushed data only) +// The compromise of holding only a virtual borrow vs. holding a lock on the +// inner DB (to prevent closing via restoration) may be re-evaluated in the future. +// +pub struct DatabaseIterator<'a> { + iter: DBIterator, + _marker: PhantomData<&'a Database>, +} + +impl<'a> Iterator for DatabaseIterator<'a> { + type Item = (Box<[u8]>, Box<[u8]>); + + fn next(&mut self) -> Option { + self.iter.next() + } +} + +struct DBAndColumns { + db: DB, + cfs: Vec, +} + +// get column family configuration from database config. +fn col_config(config: &DatabaseConfig, block_opts: &BlockBasedOptions) -> Result { + let mut opts = Options::new(); + + opts.set_parsed_options("level_compaction_dynamic_level_bytes=true")?; + + opts.set_block_based_table_factory(block_opts); + + opts.set_parsed_options( + &format!("block_based_table_factory={{{};{}}}", + "cache_index_and_filter_blocks=true", + "pin_l0_filter_and_index_blocks_in_cache=true"))?; + + opts.optimize_level_style_compaction(config.memory_budget_per_col() as i32); + opts.set_target_file_size_base(config.compaction.initial_file_size); + + opts.set_parsed_options("compression_per_level=")?; + + Ok(opts) +} + +/// Key-Value database. +pub struct Database { + db: RwLock>, + config: DatabaseConfig, + write_opts: WriteOptions, + read_opts: ReadOptions, + block_opts: BlockBasedOptions, + path: String, + // Dirty values added with `write_buffered`. Cleaned on `flush`. + overlay: RwLock, KeyState>>>, + // Values currently being flushed. Cleared when `flush` completes. + flushing: RwLock, KeyState>>>, + // Prevents concurrent flushes. + // Value indicates if a flush is in progress. + flushing_lock: Mutex, +} + +impl Database { + /// Open database with default settings. + pub fn open_default(path: &str) -> Result { + Database::open(&DatabaseConfig::default(), path) + } + + /// Open database file. Creates if it does not exist. + pub fn open(config: &DatabaseConfig, path: &str) -> Result { + let mut opts = Options::new(); + + if let Some(rate_limit) = config.compaction.write_rate_limit { + opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit))?; + } + opts.set_use_fsync(false); + opts.create_if_missing(true); + opts.set_max_open_files(config.max_open_files); + opts.set_parsed_options("keep_log_file_num=1")?; + opts.set_parsed_options("bytes_per_sync=1048576")?; + opts.set_db_write_buffer_size(config.memory_budget_per_col() / 2); + opts.increase_parallelism(cmp::max(1, ::num_cpus::get() as i32 / 2)); + + let mut block_opts = BlockBasedOptions::new(); + + { + block_opts.set_block_size(config.compaction.block_size); + let cache_size = cmp::max(8, config.memory_budget() / 3); + let cache = Cache::new(cache_size); + block_opts.set_cache(cache); + } + + let columns = config.columns.unwrap_or(0) as usize; + + let mut cf_options = Vec::with_capacity(columns); + let cfnames: Vec<_> = (0..columns).map(|c| format!("col{}", c)).collect(); + let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect(); + + for _ in 0 .. config.columns.unwrap_or(0) { + cf_options.push(col_config(&config, &block_opts)?); + } + + let mut write_opts = WriteOptions::new(); + if !config.wal { + write_opts.disable_wal(true); + } + let mut read_opts = ReadOptions::new(); + read_opts.set_verify_checksums(false); + + let mut cfs: Vec = Vec::new(); + let db = match config.columns { + Some(columns) => { + match DB::open_cf(&opts, path, &cfnames, &cf_options) { + Ok(db) => { + cfs = cfnames.iter().map(|n| db.cf_handle(n) + .expect("rocksdb opens a cf_handle for each cfname; qed")).collect(); + assert!(cfs.len() == columns as usize); + Ok(db) + } + Err(_) => { + // retry and create CFs + match DB::open_cf(&opts, path, &[], &[]) { + Ok(mut db) => { + cfs = cfnames.iter().enumerate().map(|(i, n)| db.create_cf(n, &cf_options[i])).collect::>()?; + Ok(db) + }, + err @ Err(_) => err, + } + } + } + }, + None => DB::open(&opts, path) + }; + + let db = match db { + Ok(db) => db, + Err(ref s) if s.starts_with("Corruption:") => { + info!("{}", s); + info!("Attempting DB repair for {}", path); + DB::repair(&opts, path)?; + + match cfnames.is_empty() { + true => DB::open(&opts, path)?, + false => DB::open_cf(&opts, path, &cfnames, &cf_options)? + } + }, + Err(s) => { return Err(s); } + }; + let num_cols = cfs.len(); + Ok(Database { + db: RwLock::new(Some(DBAndColumns{ db: db, cfs: cfs })), + config: config.clone(), + write_opts: write_opts, + overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), + flushing: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), + flushing_lock: Mutex::new((false)), + path: path.to_owned(), + read_opts: read_opts, + block_opts: block_opts, + }) + } + + /// Helper to create new transaction for this database. + pub fn transaction(&self) -> DBTransaction { + DBTransaction::new() + } + + + fn to_overlay_column(col: Option) -> usize { + col.map_or(0, |c| (c + 1) as usize) + } + + /// Commit transaction to database. + pub fn write_buffered(&self, tr: DBTransaction) { + let mut overlay = self.overlay.write(); + let ops = tr.ops; + for op in ops { + match op { + DBOp::Insert { col, key, value } => { + let c = Self::to_overlay_column(col); + overlay[c].insert(key, KeyState::Insert(value)); + }, + DBOp::InsertCompressed { col, key, value } => { + let c = Self::to_overlay_column(col); + overlay[c].insert(key, KeyState::InsertCompressed(value)); + }, + DBOp::Delete { col, key } => { + let c = Self::to_overlay_column(col); + overlay[c].insert(key, KeyState::Delete); + }, + } + }; + } + + /// Commit buffered changes to database. Must be called under `flush_lock` + fn write_flushing_with_lock(&self, _lock: &mut MutexGuard) -> Result<(), String> { + match *self.db.read() { + Some(DBAndColumns { ref db, ref cfs }) => { + let batch = WriteBatch::new(); + mem::swap(&mut *self.overlay.write(), &mut *self.flushing.write()); + { + for (c, column) in self.flushing.read().iter().enumerate() { + for (ref key, ref state) in column.iter() { + match **state { + KeyState::Delete => { + if c > 0 { + batch.delete_cf(cfs[c - 1], &key)?; + } else { + batch.delete(&key)?; + } + }, + KeyState::Insert(ref value) => { + if c > 0 { + batch.put_cf(cfs[c - 1], &key, value)?; + } else { + batch.put(&key, &value)?; + } + }, + KeyState::InsertCompressed(ref value) => { + let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); + if c > 0 { + batch.put_cf(cfs[c - 1], &key, &compressed)?; + } else { + batch.put(&key, &value)?; + } + } + } + } + } + } + db.write_opt(batch, &self.write_opts)?; + for column in self.flushing.write().iter_mut() { + column.clear(); + column.shrink_to_fit(); + } + Ok(()) + }, + None => Err("Database is closed".to_owned()) + } + } + + /// Commit buffered changes to database. + pub fn flush(&self) -> Result<(), String> { + let mut lock = self.flushing_lock.lock(); + // If RocksDB batch allocation fails the thread gets terminated and the lock is released. + // The value inside the lock is used to detect that. + if *lock { + // This can only happen if another flushing thread is terminated unexpectedly. + return Err("Database write failure. Running low on memory perhaps?".to_owned()); + } + *lock = true; + let result = self.write_flushing_with_lock(&mut lock); + *lock = false; + result + } + + /// Commit transaction to database. + pub fn write(&self, tr: DBTransaction) -> Result<(), String> { + match *self.db.read() { + Some(DBAndColumns { ref db, ref cfs }) => { + let batch = WriteBatch::new(); + let ops = tr.ops; + for op in ops { + match op { + DBOp::Insert { col, key, value } => { + col.map_or_else(|| batch.put(&key, &value), |c| batch.put_cf(cfs[c as usize], &key, &value))? + }, + DBOp::InsertCompressed { col, key, value } => { + let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); + col.map_or_else(|| batch.put(&key, &compressed), |c| batch.put_cf(cfs[c as usize], &key, &compressed))? + }, + DBOp::Delete { col, key } => { + col.map_or_else(|| batch.delete(&key), |c| batch.delete_cf(cfs[c as usize], &key))? + }, + } + } + db.write_opt(batch, &self.write_opts) + }, + None => Err("Database is closed".to_owned()) + } + } + + /// Get value by key. + pub fn get(&self, col: Option, key: &[u8]) -> Result, String> { + match *self.db.read() { + Some(DBAndColumns { ref db, ref cfs }) => { + let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; + match overlay.get(key) { + Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())), + Some(&KeyState::Delete) => Ok(None), + None => { + let flushing = &self.flushing.read()[Self::to_overlay_column(col)]; + match flushing.get(key) { + Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())), + Some(&KeyState::Delete) => Ok(None), + None => { + col.map_or_else( + || db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))), + |c| db.get_cf_opt(cfs[c as usize], key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v)))) + }, + } + }, + } + }, + None => Ok(None), + } + } + + /// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values. + // TODO: support prefix seek for unflushed data + pub fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { + self.iter_from_prefix(col, prefix).and_then(|mut iter| { + match iter.next() { + // TODO: use prefix_same_as_start read option (not availabele in C API currently) + Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None }, + _ => None + } + }) + } + + /// Get database iterator for flushed data. + pub fn iter(&self, col: Option) -> Option { + //TODO: iterate over overlay + match *self.db.read() { + Some(DBAndColumns { ref db, ref cfs }) => { + let iter = col.map_or_else( + || db.iterator_opt(IteratorMode::Start, &self.read_opts), + |c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::Start, &self.read_opts) + .expect("iterator params are valid; qed") + ); + + Some(DatabaseIterator { + iter: iter, + _marker: PhantomData, + }) + }, + None => None, + } + } + + fn iter_from_prefix(&self, col: Option, prefix: &[u8]) -> Option { + match *self.db.read() { + Some(DBAndColumns { ref db, ref cfs }) => { + let iter = col.map_or_else(|| db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts), + |c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward), &self.read_opts) + .expect("iterator params are valid; qed")); + + Some(DatabaseIterator { + iter: iter, + _marker: PhantomData, + }) + }, + None => None, + } + } + + /// Close the database + fn close(&self) { + *self.db.write() = None; + self.overlay.write().clear(); + self.flushing.write().clear(); + } + + /// Restore the database from a copy at given path. + pub fn restore(&self, new_db: &str) -> Result<(), Error> { + self.close(); + + let mut backup_db = PathBuf::from(&self.path); + backup_db.pop(); + backup_db.push("backup_db"); + + let existed = match fs::rename(&self.path, &backup_db) { + Ok(_) => true, + Err(e) => if let io::ErrorKind::NotFound = e.kind() { + false + } else { + return Err(e.into()); + } + }; + + match fs::rename(&new_db, &self.path) { + Ok(_) => { + // clean up the backup. + if existed { + fs::remove_dir_all(&backup_db)?; + } + } + Err(e) => { + // restore the backup. + if existed { + fs::rename(&backup_db, &self.path)?; + } + return Err(e.into()) + } + } + + // reopen the database and steal handles into self + let db = Self::open(&self.config, &self.path)?; + *self.db.write() = mem::replace(&mut *db.db.write(), None); + *self.overlay.write() = mem::replace(&mut *db.overlay.write(), Vec::new()); + *self.flushing.write() = mem::replace(&mut *db.flushing.write(), Vec::new()); + Ok(()) + } + + /// The number of non-default column families. + pub fn num_columns(&self) -> u32 { + self.db.read().as_ref() + .and_then(|db| if db.cfs.is_empty() { None } else { Some(db.cfs.len()) } ) + .map(|n| n as u32) + .unwrap_or(0) + } + + /// Drop a column family. + pub fn drop_column(&self) -> Result<(), String> { + match *self.db.write() { + Some(DBAndColumns { ref mut db, ref mut cfs }) => { + if let Some(col) = cfs.pop() { + let name = format!("col{}", cfs.len()); + drop(col); + db.drop_cf(&name)?; + } + Ok(()) + }, + None => Ok(()), + } + } + + /// Add a column family. + pub fn add_column(&self) -> Result<(), String> { + match *self.db.write() { + Some(DBAndColumns { ref mut db, ref mut cfs }) => { + let col = cfs.len() as u32; + let name = format!("col{}", col); + cfs.push(db.create_cf(&name, &col_config(&self.config, &self.block_opts)?)?); + Ok(()) + }, + None => Ok(()), + } + } +} + +// duplicate declaration of methods here to avoid trait import in certain existing cases +// at time of addition. +impl KeyValueDB for Database { + fn get(&self, col: Option, key: &[u8]) -> Result, String> { + Database::get(self, col, key) + } + + fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { + Database::get_by_prefix(self, col, prefix) + } + + fn write_buffered(&self, transaction: DBTransaction) { + Database::write_buffered(self, transaction) + } + + fn write(&self, transaction: DBTransaction) -> Result<(), String> { + Database::write(self, transaction) + } + + fn flush(&self) -> Result<(), String> { + Database::flush(self) + } + + fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + let unboxed = Database::iter(self, col); + Box::new(unboxed.into_iter().flat_map(|inner| inner)) + } + + fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) + -> Box, Box<[u8]>)> + 'a> + { + let unboxed = Database::iter_from_prefix(self, col, prefix); + Box::new(unboxed.into_iter().flat_map(|inner| inner)) + } + + fn restore(&self, new_db: &str) -> Result<(), Error> { + Database::restore(self, new_db) + } +} + +impl Drop for Database { + fn drop(&mut self) { + // write all buffered changes if we can. + let _ = self.flush(); + } +} + +#[cfg(test)] +mod tests { + use bigint::hash::H256; + use super::*; + use devtools::*; + use std::str::FromStr; + + fn test_db(config: &DatabaseConfig) { + let path = RandomTempPath::create_dir(); + let db = Database::open(config, path.as_path().to_str().unwrap()).unwrap(); + let key1 = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); + let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); + let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); + + let mut batch = db.transaction(); + batch.put(None, &key1, b"cat"); + batch.put(None, &key2, b"dog"); + db.write(batch).unwrap(); + + assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"cat"); + + let contents: Vec<_> = db.iter(None).into_iter().flat_map(|inner| inner).collect(); + assert_eq!(contents.len(), 2); + assert_eq!(&*contents[0].0, &*key1); + assert_eq!(&*contents[0].1, b"cat"); + assert_eq!(&*contents[1].0, &*key2); + assert_eq!(&*contents[1].1, b"dog"); + + let mut batch = db.transaction(); + batch.delete(None, &key1); + db.write(batch).unwrap(); + + assert!(db.get(None, &key1).unwrap().is_none()); + + let mut batch = db.transaction(); + batch.put(None, &key1, b"cat"); + db.write(batch).unwrap(); + + let mut transaction = db.transaction(); + transaction.put(None, &key3, b"elephant"); + transaction.delete(None, &key1); + db.write(transaction).unwrap(); + assert!(db.get(None, &key1).unwrap().is_none()); + assert_eq!(&*db.get(None, &key3).unwrap().unwrap(), b"elephant"); + + assert_eq!(&*db.get_by_prefix(None, &key3).unwrap(), b"elephant"); + assert_eq!(&*db.get_by_prefix(None, &key2).unwrap(), b"dog"); + + let mut transaction = db.transaction(); + transaction.put(None, &key1, b"horse"); + transaction.delete(None, &key3); + db.write_buffered(transaction); + assert!(db.get(None, &key3).unwrap().is_none()); + assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse"); + + db.flush().unwrap(); + assert!(db.get(None, &key3).unwrap().is_none()); + assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse"); + } + + #[test] + fn kvdb() { + let path = RandomTempPath::create_dir(); + let _ = Database::open_default(path.as_path().to_str().unwrap()).unwrap(); + test_db(&DatabaseConfig::default()); + } + + #[test] + #[cfg(target_os = "linux")] + fn df_to_rotational() { + use std::path::PathBuf; + // Example df output. + let example_df = vec![70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10]; + let expected_output = Some(PathBuf::from("/sys/block/sda/queue/rotational")); + assert_eq!(rotational_from_df_output(example_df), expected_output); + } + + #[test] + fn add_columns() { + let config = DatabaseConfig::default(); + let config_5 = DatabaseConfig::with_columns(Some(5)); + + let path = RandomTempPath::create_dir(); + + // open empty, add 5. + { + let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap(); + assert_eq!(db.num_columns(), 0); + + for i in 0..5 { + db.add_column().unwrap(); + assert_eq!(db.num_columns(), i + 1); + } + } + + // reopen as 5. + { + let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap(); + assert_eq!(db.num_columns(), 5); + } + } + + #[test] + fn drop_columns() { + let config = DatabaseConfig::default(); + let config_5 = DatabaseConfig::with_columns(Some(5)); + + let path = RandomTempPath::create_dir(); + + // open 5, remove all. + { + let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap(); + assert_eq!(db.num_columns(), 5); + + for i in (0..5).rev() { + db.drop_column().unwrap(); + assert_eq!(db.num_columns(), i); + } + } + + // reopen as 0. + { + let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap(); + assert_eq!(db.num_columns(), 0); + } + } +} diff --git a/util/kvdb/Cargo.toml b/util/kvdb/Cargo.toml index 16c3cb86d..c3418a714 100644 --- a/util/kvdb/Cargo.toml +++ b/util/kvdb/Cargo.toml @@ -4,14 +4,6 @@ version = "0.1.0" authors = ["Parity Technologies "] [dependencies] -log = "0.3" -ethcore-bytes = { path = "../bytes" } -ethcore-bigint = { path = "../bigint" } -ethcore-devtools = { path = "../../devtools" } elastic-array = "0.9" -hashdb = { path = "../hashdb" } -parking_lot = "0.4" -regex = "0.2" -rlp = { path = "../rlp" } -rocksdb = { git = "https://github.com/paritytech/rust-rocksdb" } error-chain = "0.11.0-rc.2" +ethcore-bytes = { path = "../bytes" } diff --git a/util/kvdb/src/lib.rs b/util/kvdb/src/lib.rs index d10d663ba..bf17c1460 100644 --- a/util/kvdb/src/lib.rs +++ b/util/kvdb/src/lib.rs @@ -16,48 +16,21 @@ //! Key-Value store abstraction with `RocksDB` backend. -#[macro_use] -extern crate log; #[macro_use] extern crate error_chain; - -extern crate ethcore_bytes as bytes; -extern crate ethcore_bigint as bigint; -extern crate ethcore_devtools as devtools; extern crate elastic_array; -extern crate hashdb; -extern crate parking_lot; -extern crate rlp; -extern crate rocksdb; -extern crate regex; +extern crate ethcore_bytes as bytes; -use std::{mem, fs, io}; -use std::collections::{HashMap, BTreeMap}; -use std::marker::PhantomData; -use std::path::{PathBuf, Path}; -use parking_lot::{Mutex, MutexGuard, RwLock}; - -use elastic_array::*; -use hashdb::DBValue; -use rlp::{UntrustedRlp, RlpType, Compressible}; -use rocksdb::{DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator, - Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache, Column, ReadOptions}; +use std::io; +use elastic_array::{ElasticArray128, ElasticArray32}; use bytes::Bytes; -#[cfg(target_os = "linux")] -use regex::Regex; -#[cfg(target_os = "linux")] -use std::process::Command; -#[cfg(target_os = "linux")] -use std::fs::File; - -const DB_BACKGROUND_FLUSHES: i32 = 2; -const DB_BACKGROUND_COMPACTIONS: i32 = 2; -const DB_WRITE_BUFFER_SIZE: usize = 2048 * 1000; - /// Required length of prefixes. pub const PREFIX_LEN: usize = 12; +/// Database value. +pub type DBValue = ElasticArray128; + error_chain! { types { Error, ErrorKind, ResultExt; @@ -71,11 +44,13 @@ error_chain! { /// Write transaction. Batches a sequence of put/delete operations for efficiency. #[derive(Default, Clone, PartialEq)] pub struct DBTransaction { - ops: Vec, + /// Database operations. + pub ops: Vec, } +/// Database operation. #[derive(Clone, PartialEq)] -enum DBOp { +pub enum DBOp { Insert { col: Option, key: ElasticArray32, @@ -150,12 +125,6 @@ impl DBTransaction { } } -enum KeyState { - Insert(DBValue), - InsertCompressed(DBValue), - Delete, -} - /// Generic key-value database. /// /// This makes a distinction between "buffered" and "flushed" values. Values which have been @@ -206,847 +175,3 @@ pub trait KeyValueDB: Sync + Send { /// Attempt to replace this database with a new one located at the given path. fn restore(&self, new_db: &str) -> Result<(), Error>; } - -/// A key-value database fulfilling the `KeyValueDB` trait, living in memory. -/// This is generally intended for tests and is not particularly optimized. -pub struct InMemory { - columns: RwLock, BTreeMap, DBValue>>>, -} - -/// Create an in-memory database with the given number of columns. -/// Columns will be indexable by 0..`num_cols` -pub fn in_memory(num_cols: u32) -> InMemory { - let mut cols = HashMap::new(); - cols.insert(None, BTreeMap::new()); - - for idx in 0..num_cols { - cols.insert(Some(idx), BTreeMap::new()); - } - - InMemory { - columns: RwLock::new(cols) - } -} - -impl KeyValueDB for InMemory { - fn get(&self, col: Option, key: &[u8]) -> Result, String> { - let columns = self.columns.read(); - match columns.get(&col) { - None => Err(format!("No such column family: {:?}", col)), - Some(map) => Ok(map.get(key).cloned()), - } - } - - fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { - let columns = self.columns.read(); - match columns.get(&col) { - None => None, - Some(map) => - map.iter() - .find(|&(ref k ,_)| k.starts_with(prefix)) - .map(|(_, v)| v.to_vec().into_boxed_slice()) - } - } - - fn write_buffered(&self, transaction: DBTransaction) { - let mut columns = self.columns.write(); - let ops = transaction.ops; - for op in ops { - match op { - DBOp::Insert { col, key, value } => { - if let Some(mut col) = columns.get_mut(&col) { - col.insert(key.into_vec(), value); - } - }, - DBOp::InsertCompressed { col, key, value } => { - if let Some(mut col) = columns.get_mut(&col) { - let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); - let mut value = DBValue::new(); - value.append_slice(&compressed); - col.insert(key.into_vec(), value); - } - }, - DBOp::Delete { col, key } => { - if let Some(mut col) = columns.get_mut(&col) { - col.remove(&*key); - } - }, - } - } - } - - fn flush(&self) -> Result<(), String> { Ok(()) } - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { - match self.columns.read().get(&col) { - Some(map) => Box::new( // TODO: worth optimizing at all? - map.clone() - .into_iter() - .map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())) - ), - None => Box::new(None.into_iter()), - } - } - - fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) - -> Box, Box<[u8]>)> + 'a> - { - match self.columns.read().get(&col) { - Some(map) => Box::new( - map.clone() - .into_iter() - .skip_while(move |&(ref k, _)| !k.starts_with(prefix)) - .map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())) - ), - None => Box::new(None.into_iter()), - } - } - - fn restore(&self, _new_db: &str) -> Result<(), Error> { - Err("Attempted to restore in-memory database".into()) - } -} - -/// Compaction profile for the database settings -#[derive(Clone, Copy, PartialEq, Debug)] -pub struct CompactionProfile { - /// L0-L1 target file size - pub initial_file_size: u64, - /// L2-LN target file size multiplier - pub file_size_multiplier: i32, - /// rate limiter for background flushes and compactions, bytes/sec, if any - pub write_rate_limit: Option, -} - -impl Default for CompactionProfile { - /// Default profile suitable for most storage - fn default() -> CompactionProfile { - CompactionProfile::ssd() - } -} - -/// Given output of df command return Linux rotational flag file path. -#[cfg(target_os = "linux")] -pub fn rotational_from_df_output(df_out: Vec) -> Option { - use std::str; - str::from_utf8(df_out.as_slice()) - .ok() - // Get the drive name. - .and_then(|df_str| Regex::new(r"/dev/(sd[:alpha:]{1,2})") - .ok() - .and_then(|re| re.captures(df_str)) - .and_then(|captures| captures.get(1))) - // Generate path e.g. /sys/block/sda/queue/rotational - .map(|drive_path| { - let mut p = PathBuf::from("/sys/block"); - p.push(drive_path.as_str()); - p.push("queue/rotational"); - p - }) -} - -impl CompactionProfile { - /// Attempt to determine the best profile automatically, only Linux for now. - #[cfg(target_os = "linux")] - pub fn auto(db_path: &Path) -> CompactionProfile { - use std::io::Read; - let hdd_check_file = db_path - .to_str() - .and_then(|path_str| Command::new("df").arg(path_str).output().ok()) - .and_then(|df_res| match df_res.status.success() { - true => Some(df_res.stdout), - false => None, - }) - .and_then(rotational_from_df_output); - // Read out the file and match compaction profile. - if let Some(hdd_check) = hdd_check_file { - if let Ok(mut file) = File::open(hdd_check.as_path()) { - let mut buffer = [0; 1]; - if file.read_exact(&mut buffer).is_ok() { - // 0 means not rotational. - if buffer == [48] { return Self::ssd(); } - // 1 means rotational. - if buffer == [49] { return Self::hdd(); } - } - } - } - // Fallback if drive type was not determined. - Self::default() - } - - /// Just default for other platforms. - #[cfg(not(target_os = "linux"))] - pub fn auto(_db_path: &Path) -> CompactionProfile { - Self::default() - } - - /// Default profile suitable for SSD storage - pub fn ssd() -> CompactionProfile { - CompactionProfile { - initial_file_size: 32 * 1024 * 1024, - file_size_multiplier: 2, - write_rate_limit: None, - } - } - - /// Slow HDD compaction profile - pub fn hdd() -> CompactionProfile { - CompactionProfile { - initial_file_size: 192 * 1024 * 1024, - file_size_multiplier: 1, - write_rate_limit: Some(8 * 1024 * 1024), - } - } -} - -/// Database configuration -#[derive(Clone)] -pub struct DatabaseConfig { - /// Max number of open files. - pub max_open_files: i32, - /// Cache sizes (in MiB) for specific columns. - pub cache_sizes: HashMap, usize>, - /// Compaction profile - pub compaction: CompactionProfile, - /// Set number of columns - pub columns: Option, - /// Should we keep WAL enabled? - pub wal: bool, -} - -impl DatabaseConfig { - /// Create new `DatabaseConfig` with default parameters and specified set of columns. - /// Note that cache sizes must be explicitly set. - pub fn with_columns(columns: Option) -> Self { - let mut config = Self::default(); - config.columns = columns; - config - } - - /// Set the column cache size in MiB. - pub fn set_cache(&mut self, col: Option, size: usize) { - self.cache_sizes.insert(col, size); - } -} - -impl Default for DatabaseConfig { - fn default() -> DatabaseConfig { - DatabaseConfig { - cache_sizes: HashMap::new(), - max_open_files: 512, - compaction: CompactionProfile::default(), - columns: None, - wal: true, - } - } -} - -/// Database iterator (for flushed data only) -// The compromise of holding only a virtual borrow vs. holding a lock on the -// inner DB (to prevent closing via restoration) may be re-evaluated in the future. -// -pub struct DatabaseIterator<'a> { - iter: DBIterator, - _marker: PhantomData<&'a Database>, -} - -impl<'a> Iterator for DatabaseIterator<'a> { - type Item = (Box<[u8]>, Box<[u8]>); - - fn next(&mut self) -> Option { - self.iter.next() - } -} - -struct DBAndColumns { - db: DB, - cfs: Vec, -} - -// get column family configuration from database config. -fn col_config(col: u32, config: &DatabaseConfig) -> Options { - // default cache size for columns not specified. - const DEFAULT_CACHE: usize = 2; - - let mut opts = Options::new(); - opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction); - opts.set_target_file_size_base(config.compaction.initial_file_size); - opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier); - opts.set_db_write_buffer_size(DB_WRITE_BUFFER_SIZE); - - let col_opt = config.columns.map(|_| col); - - { - let cache_size = config.cache_sizes.get(&col_opt).cloned().unwrap_or(DEFAULT_CACHE); - let mut block_opts = BlockBasedOptions::new(); - // all goes to read cache. - block_opts.set_cache(Cache::new(cache_size * 1024 * 1024)); - opts.set_block_based_table_factory(&block_opts); - } - - opts -} - -/// Key-Value database. -pub struct Database { - db: RwLock>, - config: DatabaseConfig, - write_opts: WriteOptions, - read_opts: ReadOptions, - path: String, - // Dirty values added with `write_buffered`. Cleaned on `flush`. - overlay: RwLock, KeyState>>>, - // Values currently being flushed. Cleared when `flush` completes. - flushing: RwLock, KeyState>>>, - // Prevents concurrent flushes. - // Value indicates if a flush is in progress. - flushing_lock: Mutex, -} - -impl Database { - /// Open database with default settings. - pub fn open_default(path: &str) -> Result { - Database::open(&DatabaseConfig::default(), path) - } - - /// Open database file. Creates if it does not exist. - pub fn open(config: &DatabaseConfig, path: &str) -> Result { - let mut opts = Options::new(); - if let Some(rate_limit) = config.compaction.write_rate_limit { - opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit))?; - } - opts.set_parsed_options(&format!("max_total_wal_size={}", 64 * 1024 * 1024))?; - opts.set_parsed_options("verify_checksums_in_compaction=0")?; - opts.set_parsed_options("keep_log_file_num=1")?; - opts.set_max_open_files(config.max_open_files); - opts.create_if_missing(true); - opts.set_use_fsync(false); - opts.set_db_write_buffer_size(DB_WRITE_BUFFER_SIZE); - - opts.set_max_background_flushes(DB_BACKGROUND_FLUSHES); - opts.set_max_background_compactions(DB_BACKGROUND_COMPACTIONS); - - // compaction settings - opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction); - opts.set_target_file_size_base(config.compaction.initial_file_size); - opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier); - - let mut cf_options = Vec::with_capacity(config.columns.unwrap_or(0) as usize); - let cfnames: Vec<_> = (0..config.columns.unwrap_or(0)).map(|c| format!("col{}", c)).collect(); - let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect(); - - for col in 0 .. config.columns.unwrap_or(0) { - cf_options.push(col_config(col, &config)); - } - - let mut write_opts = WriteOptions::new(); - if !config.wal { - write_opts.disable_wal(true); - } - let mut read_opts = ReadOptions::new(); - read_opts.set_verify_checksums(false); - - let mut cfs: Vec = Vec::new(); - let db = match config.columns { - Some(columns) => { - match DB::open_cf(&opts, path, &cfnames, &cf_options) { - Ok(db) => { - cfs = cfnames.iter().map(|n| db.cf_handle(n) - .expect("rocksdb opens a cf_handle for each cfname; qed")).collect(); - assert!(cfs.len() == columns as usize); - Ok(db) - } - Err(_) => { - // retry and create CFs - match DB::open_cf(&opts, path, &[], &[]) { - Ok(mut db) => { - cfs = cfnames.iter().enumerate().map(|(i, n)| db.create_cf(n, &cf_options[i])).collect::>()?; - Ok(db) - }, - err @ Err(_) => err, - } - } - } - }, - None => DB::open(&opts, path) - }; - - let db = match db { - Ok(db) => db, - Err(ref s) if s.starts_with("Corruption:") => { - info!("{}", s); - info!("Attempting DB repair for {}", path); - DB::repair(&opts, path)?; - - match cfnames.is_empty() { - true => DB::open(&opts, path)?, - false => DB::open_cf(&opts, path, &cfnames, &cf_options)? - } - }, - Err(s) => { return Err(s); } - }; - let num_cols = cfs.len(); - Ok(Database { - db: RwLock::new(Some(DBAndColumns{ db: db, cfs: cfs })), - config: config.clone(), - write_opts: write_opts, - overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), - flushing: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), - flushing_lock: Mutex::new((false)), - path: path.to_owned(), - read_opts: read_opts, - }) - } - - /// Helper to create new transaction for this database. - pub fn transaction(&self) -> DBTransaction { - DBTransaction::new() - } - - - fn to_overlay_column(col: Option) -> usize { - col.map_or(0, |c| (c + 1) as usize) - } - - /// Commit transaction to database. - pub fn write_buffered(&self, tr: DBTransaction) { - let mut overlay = self.overlay.write(); - let ops = tr.ops; - for op in ops { - match op { - DBOp::Insert { col, key, value } => { - let c = Self::to_overlay_column(col); - overlay[c].insert(key, KeyState::Insert(value)); - }, - DBOp::InsertCompressed { col, key, value } => { - let c = Self::to_overlay_column(col); - overlay[c].insert(key, KeyState::InsertCompressed(value)); - }, - DBOp::Delete { col, key } => { - let c = Self::to_overlay_column(col); - overlay[c].insert(key, KeyState::Delete); - }, - } - }; - } - - /// Commit buffered changes to database. Must be called under `flush_lock` - fn write_flushing_with_lock(&self, _lock: &mut MutexGuard) -> Result<(), String> { - match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let batch = WriteBatch::new(); - mem::swap(&mut *self.overlay.write(), &mut *self.flushing.write()); - { - for (c, column) in self.flushing.read().iter().enumerate() { - for (ref key, ref state) in column.iter() { - match **state { - KeyState::Delete => { - if c > 0 { - batch.delete_cf(cfs[c - 1], &key)?; - } else { - batch.delete(&key)?; - } - }, - KeyState::Insert(ref value) => { - if c > 0 { - batch.put_cf(cfs[c - 1], &key, value)?; - } else { - batch.put(&key, &value)?; - } - }, - KeyState::InsertCompressed(ref value) => { - let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); - if c > 0 { - batch.put_cf(cfs[c - 1], &key, &compressed)?; - } else { - batch.put(&key, &value)?; - } - } - } - } - } - } - db.write_opt(batch, &self.write_opts)?; - for column in self.flushing.write().iter_mut() { - column.clear(); - column.shrink_to_fit(); - } - Ok(()) - }, - None => Err("Database is closed".to_owned()) - } - } - - /// Commit buffered changes to database. - pub fn flush(&self) -> Result<(), String> { - let mut lock = self.flushing_lock.lock(); - // If RocksDB batch allocation fails the thread gets terminated and the lock is released. - // The value inside the lock is used to detect that. - if *lock { - // This can only happen if another flushing thread is terminated unexpectedly. - return Err("Database write failure. Running low on memory perhaps?".to_owned()); - } - *lock = true; - let result = self.write_flushing_with_lock(&mut lock); - *lock = false; - result - } - - /// Commit transaction to database. - pub fn write(&self, tr: DBTransaction) -> Result<(), String> { - match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let batch = WriteBatch::new(); - let ops = tr.ops; - for op in ops { - match op { - DBOp::Insert { col, key, value } => { - col.map_or_else(|| batch.put(&key, &value), |c| batch.put_cf(cfs[c as usize], &key, &value))? - }, - DBOp::InsertCompressed { col, key, value } => { - let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks); - col.map_or_else(|| batch.put(&key, &compressed), |c| batch.put_cf(cfs[c as usize], &key, &compressed))? - }, - DBOp::Delete { col, key } => { - col.map_or_else(|| batch.delete(&key), |c| batch.delete_cf(cfs[c as usize], &key))? - }, - } - } - db.write_opt(batch, &self.write_opts) - }, - None => Err("Database is closed".to_owned()) - } - } - - /// Get value by key. - pub fn get(&self, col: Option, key: &[u8]) -> Result, String> { - match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; - match overlay.get(key) { - Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())), - Some(&KeyState::Delete) => Ok(None), - None => { - let flushing = &self.flushing.read()[Self::to_overlay_column(col)]; - match flushing.get(key) { - Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())), - Some(&KeyState::Delete) => Ok(None), - None => { - col.map_or_else( - || db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))), - |c| db.get_cf_opt(cfs[c as usize], key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v)))) - }, - } - }, - } - }, - None => Ok(None), - } - } - - /// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values. - // TODO: support prefix seek for unflushed data - pub fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { - self.iter_from_prefix(col, prefix).and_then(|mut iter| { - match iter.next() { - // TODO: use prefix_same_as_start read option (not availabele in C API currently) - Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None }, - _ => None - } - }) - } - - /// Get database iterator for flushed data. - pub fn iter(&self, col: Option) -> Option { - //TODO: iterate over overlay - match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let iter = col.map_or_else( - || db.iterator_opt(IteratorMode::Start, &self.read_opts), - |c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::Start, &self.read_opts) - .expect("iterator params are valid; qed") - ); - - Some(DatabaseIterator { - iter: iter, - _marker: PhantomData, - }) - }, - None => None, - } - } - - fn iter_from_prefix(&self, col: Option, prefix: &[u8]) -> Option { - match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let iter = col.map_or_else(|| db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts), - |c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward), &self.read_opts) - .expect("iterator params are valid; qed")); - - Some(DatabaseIterator { - iter: iter, - _marker: PhantomData, - }) - }, - None => None, - } - } - - /// Close the database - fn close(&self) { - *self.db.write() = None; - self.overlay.write().clear(); - self.flushing.write().clear(); - } - - /// Restore the database from a copy at given path. - pub fn restore(&self, new_db: &str) -> Result<(), Error> { - self.close(); - - let mut backup_db = PathBuf::from(&self.path); - backup_db.pop(); - backup_db.push("backup_db"); - - let existed = match fs::rename(&self.path, &backup_db) { - Ok(_) => true, - Err(e) => if let io::ErrorKind::NotFound = e.kind() { - false - } else { - return Err(e.into()); - } - }; - - match fs::rename(&new_db, &self.path) { - Ok(_) => { - // clean up the backup. - if existed { - fs::remove_dir_all(&backup_db)?; - } - } - Err(e) => { - // restore the backup. - if existed { - fs::rename(&backup_db, &self.path)?; - } - return Err(e.into()) - } - } - - // reopen the database and steal handles into self - let db = Self::open(&self.config, &self.path)?; - *self.db.write() = mem::replace(&mut *db.db.write(), None); - *self.overlay.write() = mem::replace(&mut *db.overlay.write(), Vec::new()); - *self.flushing.write() = mem::replace(&mut *db.flushing.write(), Vec::new()); - Ok(()) - } - - /// The number of non-default column families. - pub fn num_columns(&self) -> u32 { - self.db.read().as_ref() - .and_then(|db| if db.cfs.is_empty() { None } else { Some(db.cfs.len()) } ) - .map(|n| n as u32) - .unwrap_or(0) - } - - /// Drop a column family. - pub fn drop_column(&self) -> Result<(), String> { - match *self.db.write() { - Some(DBAndColumns { ref mut db, ref mut cfs }) => { - if let Some(col) = cfs.pop() { - let name = format!("col{}", cfs.len()); - drop(col); - db.drop_cf(&name)?; - } - Ok(()) - }, - None => Ok(()), - } - } - - /// Add a column family. - pub fn add_column(&self) -> Result<(), String> { - match *self.db.write() { - Some(DBAndColumns { ref mut db, ref mut cfs }) => { - let col = cfs.len() as u32; - let name = format!("col{}", col); - cfs.push(db.create_cf(&name, &col_config(col, &self.config))?); - Ok(()) - }, - None => Ok(()), - } - } -} - -// duplicate declaration of methods here to avoid trait import in certain existing cases -// at time of addition. -impl KeyValueDB for Database { - fn get(&self, col: Option, key: &[u8]) -> Result, String> { - Database::get(self, col, key) - } - - fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { - Database::get_by_prefix(self, col, prefix) - } - - fn write_buffered(&self, transaction: DBTransaction) { - Database::write_buffered(self, transaction) - } - - fn write(&self, transaction: DBTransaction) -> Result<(), String> { - Database::write(self, transaction) - } - - fn flush(&self) -> Result<(), String> { - Database::flush(self) - } - - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { - let unboxed = Database::iter(self, col); - Box::new(unboxed.into_iter().flat_map(|inner| inner)) - } - - fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) - -> Box, Box<[u8]>)> + 'a> - { - let unboxed = Database::iter_from_prefix(self, col, prefix); - Box::new(unboxed.into_iter().flat_map(|inner| inner)) - } - - fn restore(&self, new_db: &str) -> Result<(), Error> { - Database::restore(self, new_db) - } -} - -impl Drop for Database { - fn drop(&mut self) { - // write all buffered changes if we can. - let _ = self.flush(); - } -} - -#[cfg(test)] -mod tests { - use bigint::hash::H256; - use super::*; - use devtools::*; - use std::str::FromStr; - - fn test_db(config: &DatabaseConfig) { - let path = RandomTempPath::create_dir(); - let db = Database::open(config, path.as_path().to_str().unwrap()).unwrap(); - let key1 = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - - let mut batch = db.transaction(); - batch.put(None, &key1, b"cat"); - batch.put(None, &key2, b"dog"); - db.write(batch).unwrap(); - - assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"cat"); - - let contents: Vec<_> = db.iter(None).into_iter().flat_map(|inner| inner).collect(); - assert_eq!(contents.len(), 2); - assert_eq!(&*contents[0].0, &*key1); - assert_eq!(&*contents[0].1, b"cat"); - assert_eq!(&*contents[1].0, &*key2); - assert_eq!(&*contents[1].1, b"dog"); - - let mut batch = db.transaction(); - batch.delete(None, &key1); - db.write(batch).unwrap(); - - assert!(db.get(None, &key1).unwrap().is_none()); - - let mut batch = db.transaction(); - batch.put(None, &key1, b"cat"); - db.write(batch).unwrap(); - - let mut transaction = db.transaction(); - transaction.put(None, &key3, b"elephant"); - transaction.delete(None, &key1); - db.write(transaction).unwrap(); - assert!(db.get(None, &key1).unwrap().is_none()); - assert_eq!(&*db.get(None, &key3).unwrap().unwrap(), b"elephant"); - - assert_eq!(&*db.get_by_prefix(None, &key3).unwrap(), b"elephant"); - assert_eq!(&*db.get_by_prefix(None, &key2).unwrap(), b"dog"); - - let mut transaction = db.transaction(); - transaction.put(None, &key1, b"horse"); - transaction.delete(None, &key3); - db.write_buffered(transaction); - assert!(db.get(None, &key3).unwrap().is_none()); - assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse"); - - db.flush().unwrap(); - assert!(db.get(None, &key3).unwrap().is_none()); - assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse"); - } - - #[test] - fn kvdb() { - let path = RandomTempPath::create_dir(); - let _ = Database::open_default(path.as_path().to_str().unwrap()).unwrap(); - test_db(&DatabaseConfig::default()); - } - - #[test] - #[cfg(target_os = "linux")] - fn df_to_rotational() { - use std::path::PathBuf; - // Example df output. - let example_df = vec![70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10]; - let expected_output = Some(PathBuf::from("/sys/block/sda/queue/rotational")); - assert_eq!(rotational_from_df_output(example_df), expected_output); - } - - #[test] - fn add_columns() { - let config = DatabaseConfig::default(); - let config_5 = DatabaseConfig::with_columns(Some(5)); - - let path = RandomTempPath::create_dir(); - - // open empty, add 5. - { - let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap(); - assert_eq!(db.num_columns(), 0); - - for i in 0..5 { - db.add_column().unwrap(); - assert_eq!(db.num_columns(), i + 1); - } - } - - // reopen as 5. - { - let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap(); - assert_eq!(db.num_columns(), 5); - } - } - - #[test] - fn drop_columns() { - let config = DatabaseConfig::default(); - let config_5 = DatabaseConfig::with_columns(Some(5)); - - let path = RandomTempPath::create_dir(); - - // open 5, remove all. - { - let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap(); - assert_eq!(db.num_columns(), 5); - - for i in (0..5).rev() { - db.drop_column().unwrap(); - assert_eq!(db.num_columns(), i); - } - } - - // reopen as 0. - { - let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap(); - assert_eq!(db.num_columns(), 0); - } - } -} diff --git a/util/migration/Cargo.toml b/util/migration/Cargo.toml index 927ea232d..d16264198 100644 --- a/util/migration/Cargo.toml +++ b/util/migration/Cargo.toml @@ -7,4 +7,5 @@ authors = ["Parity Technologies "] log = "0.3" macros = { path = "../macros" } kvdb = { path = "../kvdb" } +kvdb-rocksdb = { path = "../kvdb-rocksdb" } ethcore-devtools = { path = "../../devtools" } diff --git a/util/migration/src/lib.rs b/util/migration/src/lib.rs index e854c12f2..f832db865 100644 --- a/util/migration/src/lib.rs +++ b/util/migration/src/lib.rs @@ -25,14 +25,15 @@ extern crate macros; extern crate ethcore_devtools as devtools; extern crate kvdb; +extern crate kvdb_rocksdb; use std::collections::BTreeMap; -use std::fs; -use std::fmt; use std::path::{Path, PathBuf}; use std::sync::Arc; +use std::{fs, fmt}; -use kvdb::{CompactionProfile, Database, DatabaseConfig, DBTransaction}; +use kvdb::DBTransaction; +use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig}; /// Migration config. #[derive(Clone)] @@ -274,7 +275,7 @@ impl Manager { trace!(target: "migration", "Expecting database to contain {:?} columns", columns); let mut db_config = DatabaseConfig { max_open_files: 64, - cache_sizes: Default::default(), + memory_budget: None, compaction: config.compaction_profile, columns: columns, wal: true, diff --git a/util/migration/src/tests.rs b/util/migration/src/tests.rs index 6445d58f7..1f712262f 100644 --- a/util/migration/src/tests.rs +++ b/util/migration/src/tests.rs @@ -22,7 +22,7 @@ use std::collections::BTreeMap; use std::sync::Arc; use std::path::{Path, PathBuf}; use {Batch, Config, Error, SimpleMigration, Migration, Manager, ChangeColumns}; -use kvdb::Database; +use kvdb_rocksdb::Database; use devtools::RandomTempPath; fn db_path(path: &Path) -> PathBuf { @@ -229,7 +229,7 @@ fn pre_columns() { #[test] fn change_columns() { - use kvdb::DatabaseConfig; + use kvdb_rocksdb::DatabaseConfig; let mut manager = Manager::new(Config::default()); manager.add_migration(ChangeColumns { diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index 52f80a39f..446a5459c 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -55,13 +55,6 @@ impl ArchiveDB { } } - /// Create a new instance with an anonymous temporary database. - #[cfg(test)] - fn new_temp() -> ArchiveDB { - let backing = Arc::new(::kvdb::in_memory(0)); - Self::new(backing, None) - } - fn payload(&self, key: &H256) -> Option { self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?") } @@ -206,18 +199,16 @@ mod tests { #![cfg_attr(feature="dev", allow(blacklisted_name))] #![cfg_attr(feature="dev", allow(similar_names))] - use std::path::Path; use keccak::keccak; use hashdb::{HashDB, DBValue}; use super::*; use journaldb::traits::JournalDB; - use kvdb::Database; - use bigint::hash::H32; + use kvdb_memorydb; #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = ArchiveDB::new_temp(); + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let x = jdb.insert(b"X"); jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); @@ -239,7 +230,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = ArchiveDB::new_temp(); + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let h = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h)); @@ -257,7 +248,7 @@ mod tests { #[test] #[should_panic] fn multiple_owed_removal_not_allowed() { - let mut jdb = ArchiveDB::new_temp(); + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let h = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h)); @@ -271,7 +262,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = ArchiveDB::new_temp(); + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -303,7 +294,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = ArchiveDB::new_temp(); + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -329,7 +320,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = ArchiveDB::new_temp(); + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let foo = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); @@ -348,7 +339,7 @@ mod tests { #[test] fn fork_same_key() { // history is 1 - let mut jdb = ArchiveDB::new_temp(); + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); let foo = jdb.insert(b"foo"); @@ -362,19 +353,13 @@ mod tests { assert!(jdb.contains(&foo)); } - fn new_db(dir: &Path) -> ArchiveDB { - let db = Database::open_default(dir.to_str().unwrap()).unwrap(); - ArchiveDB::new(Arc::new(db), None) - } - #[test] fn reopen() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); + let shared_db = Arc::new(kvdb_memorydb::create(0)); let bar = H256::random(); let foo = { - let mut jdb = new_db(&dir); + let mut jdb = ArchiveDB::new(shared_db.clone(), None); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); @@ -383,13 +368,13 @@ mod tests { }; { - let mut jdb = new_db(&dir); + let mut jdb = ArchiveDB::new(shared_db.clone(), None); jdb.remove(&foo); jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); } { - let mut jdb = new_db(&dir); + let mut jdb = ArchiveDB::new(shared_db, None); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); @@ -398,11 +383,10 @@ mod tests { #[test] fn reopen_remove() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); + let shared_db = Arc::new(kvdb_memorydb::create(0)); let foo = { - let mut jdb = new_db(&dir); + let mut jdb = ArchiveDB::new(shared_db.clone(), None); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); @@ -416,7 +400,7 @@ mod tests { }; { - let mut jdb = new_db(&dir); + let mut jdb = ArchiveDB::new(shared_db, None); jdb.remove(&foo); jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo)); @@ -428,10 +412,9 @@ mod tests { #[test] fn reopen_fork() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); + let shared_db = Arc::new(kvdb_memorydb::create(0)); let (foo, _, _) = { - let mut jdb = new_db(&dir); + let mut jdb = ArchiveDB::new(shared_db.clone(), None); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -446,7 +429,7 @@ mod tests { }; { - let mut jdb = new_db(&dir); + let mut jdb = ArchiveDB::new(shared_db, None); jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.contains(&foo)); } @@ -454,17 +437,17 @@ mod tests { #[test] fn returns_state() { - let temp = ::devtools::RandomTempPath::new(); + let shared_db = Arc::new(kvdb_memorydb::create(0)); let key = { - let mut jdb = new_db(temp.as_path().as_path()); + let mut jdb = ArchiveDB::new(shared_db.clone(), None); let key = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); key }; { - let jdb = new_db(temp.as_path().as_path()); + let jdb = ArchiveDB::new(shared_db, None); let state = jdb.state(&key); assert!(state.is_some()); } @@ -472,9 +455,7 @@ mod tests { #[test] fn inject() { - let temp = ::devtools::RandomTempPath::new(); - - let mut jdb = new_db(temp.as_path().as_path()); + let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let key = jdb.insert(b"dog"); jdb.inject_batch().unwrap(); diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index a21a6eedb..5ca023cb0 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -140,13 +140,6 @@ impl EarlyMergeDB { } } - /// Create a new instance with an anonymous temporary database. - #[cfg(test)] - fn new_temp() -> EarlyMergeDB { - let backing = Arc::new(::kvdb::in_memory(0)); - Self::new(backing, None) - } - fn morph_key(key: &H256, index: u8) -> Bytes { let mut ret = (&**key).to_owned(); ret.push(index); @@ -554,19 +547,17 @@ mod tests { #![cfg_attr(feature="dev", allow(blacklisted_name))] #![cfg_attr(feature="dev", allow(similar_names))] - use std::path::Path; use keccak::keccak; use hashdb::{HashDB, DBValue}; use super::*; use super::super::traits::JournalDB; use ethcore_logger::init_log; - use kvdb::{DatabaseConfig}; - use bigint::hash::H32; + use kvdb_memorydb; #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); let x = jdb.insert(b"X"); jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); @@ -595,7 +586,7 @@ mod tests { #[test] fn insert_older_era() { - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); let foo = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -616,7 +607,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); let h = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -639,7 +630,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -682,7 +673,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -714,7 +705,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); let foo = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); @@ -737,7 +728,7 @@ mod tests { #[test] fn fork_same_key_one() { - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -762,7 +753,7 @@ mod tests { #[test] fn fork_same_key_other() { - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -787,7 +778,7 @@ mod tests { #[test] fn fork_ins_del_ins() { - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -818,20 +809,18 @@ mod tests { assert!(jdb.can_reconstruct_refs()); } - fn new_db(path: &Path) -> EarlyMergeDB { - let config = DatabaseConfig::with_columns(Some(1)); - let backing = Arc::new(::kvdb::Database::open(&config, path.to_str().unwrap()).unwrap()); - EarlyMergeDB::new(backing, Some(0)) + fn new_db() -> EarlyMergeDB { + let backing = Arc::new(kvdb_memorydb::create(0)); + EarlyMergeDB::new(backing, None) } #[test] fn reopen() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); + let shared_db = Arc::new(kvdb_memorydb::create(0)); let bar = H256::random(); let foo = { - let mut jdb = new_db(&dir); + let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); @@ -841,14 +830,14 @@ mod tests { }; { - let mut jdb = new_db(&dir); + let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); jdb.remove(&foo); jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = new_db(&dir); + let mut jdb = EarlyMergeDB::new(shared_db, None); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); @@ -861,7 +850,7 @@ mod tests { fn insert_delete_insert_delete_insert_expunge() { init_log(); - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); // history is 4 let foo = jdb.insert(b"foo"); @@ -887,7 +876,7 @@ mod tests { #[test] fn forked_insert_delete_insert_delete_insert_expunge() { init_log(); - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); // history is 4 let foo = jdb.insert(b"foo"); @@ -933,7 +922,7 @@ mod tests { #[test] fn broken_assert() { - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); // history is 1 let foo = jdb.insert(b"foo"); @@ -962,7 +951,7 @@ mod tests { #[test] fn reopen_test() { - let mut jdb = EarlyMergeDB::new_temp(); + let mut jdb = new_db(); // history is 4 let foo = jdb.insert(b"foo"); @@ -997,13 +986,11 @@ mod tests { fn reopen_remove_three() { init_log(); - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - + let shared_db = Arc::new(kvdb_memorydb::create(0)); let foo = keccak(b"foo"); { - let mut jdb = new_db(&dir); + let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); // history is 1 jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); @@ -1025,7 +1012,7 @@ mod tests { // incantation to reopen the db }; { - let mut jdb = new_db(&dir); + let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); jdb.remove(&foo); jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap(); @@ -1034,7 +1021,7 @@ mod tests { // incantation to reopen the db }; { - let mut jdb = new_db(&dir); + let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -1042,7 +1029,7 @@ mod tests { // incantation to reopen the db }; { - let mut jdb = new_db(&dir); + let mut jdb = EarlyMergeDB::new(shared_db, None); jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -1052,10 +1039,10 @@ mod tests { #[test] fn reopen_fork() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); + let shared_db = Arc::new(kvdb_memorydb::create(0)); + let (foo, bar, baz) = { - let mut jdb = new_db(&dir); + let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -1073,7 +1060,7 @@ mod tests { }; { - let mut jdb = new_db(&dir); + let mut jdb = EarlyMergeDB::new(shared_db, None); jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); @@ -1084,9 +1071,7 @@ mod tests { #[test] fn inject() { - let temp = ::devtools::RandomTempPath::new(); - - let mut jdb = new_db(temp.as_path().as_path()); + let mut jdb = new_db(); let key = jdb.insert(b"dog"); jdb.inject_batch().unwrap(); diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs index 687333d67..71ce05696 100644 --- a/util/src/journaldb/overlayrecentdb.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -117,13 +117,6 @@ impl OverlayRecentDB { } } - /// Create a new instance with an anonymous temporary database. - #[cfg(test)] - pub fn new_temp() -> OverlayRecentDB { - let backing = Arc::new(::kvdb::in_memory(0)); - Self::new(backing, None) - } - #[cfg(test)] fn can_reconstruct_refs(&self) -> bool { let reconstructed = Self::read_overlay(&*self.backing, self.column); @@ -462,24 +455,22 @@ mod tests { #![cfg_attr(feature="dev", allow(blacklisted_name))] #![cfg_attr(feature="dev", allow(similar_names))] - use std::path::Path; use keccak::keccak; use super::*; use hashdb::{HashDB, DBValue}; use ethcore_logger::init_log; use journaldb::JournalDB; - use kvdb::Database; - use bigint::hash::H32; + use kvdb_memorydb; - fn new_db(path: &Path) -> OverlayRecentDB { - let backing = Arc::new(Database::open_default(path.to_str().unwrap()).unwrap()); + fn new_db() -> OverlayRecentDB { + let backing = Arc::new(kvdb_memorydb::create(0)); OverlayRecentDB::new(backing, None) } #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); let x = jdb.insert(b"X"); jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); @@ -509,7 +500,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); let h = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -532,7 +523,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -575,7 +566,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -607,7 +598,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); let foo = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); @@ -629,7 +620,7 @@ mod tests { #[test] fn fork_same_key_one() { - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -654,7 +645,7 @@ mod tests { #[test] fn fork_same_key_other() { - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -680,7 +671,7 @@ mod tests { #[test] fn fork_ins_del_ins() { - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -714,12 +705,11 @@ mod tests { #[test] fn reopen() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); + let shared_db = Arc::new(kvdb_memorydb::create(0)); let bar = H256::random(); let foo = { - let mut jdb = new_db(&dir); + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), DBValue::from_slice(b"bar")); @@ -729,14 +719,14 @@ mod tests { }; { - let mut jdb = new_db(&dir); + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); jdb.remove(&foo); jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = new_db(&dir); + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); @@ -748,7 +738,7 @@ mod tests { #[test] fn insert_delete_insert_delete_insert_expunge() { init_log(); - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); // history is 4 let foo = jdb.insert(b"foo"); @@ -774,7 +764,7 @@ mod tests { #[test] fn forked_insert_delete_insert_delete_insert_expunge() { init_log(); - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); // history is 4 let foo = jdb.insert(b"foo"); @@ -820,7 +810,7 @@ mod tests { #[test] fn broken_assert() { - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); let foo = jdb.insert(b"foo"); jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); @@ -848,7 +838,7 @@ mod tests { #[test] fn reopen_test() { - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); // history is 4 let foo = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); @@ -882,13 +872,11 @@ mod tests { fn reopen_remove_three() { init_log(); - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - + let shared_db = Arc::new(kvdb_memorydb::create(0)); let foo = keccak(b"foo"); { - let mut jdb = new_db(&dir); + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); // history is 1 jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); @@ -910,7 +898,7 @@ mod tests { // incantation to reopen the db }; { - let mut jdb = new_db(&dir); + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); jdb.remove(&foo); jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap(); @@ -919,7 +907,7 @@ mod tests { // incantation to reopen the db }; { - let mut jdb = new_db(&dir); + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -927,7 +915,7 @@ mod tests { // incantation to reopen the db }; { - let mut jdb = new_db(&dir); + let mut jdb = OverlayRecentDB::new(shared_db, None); jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -937,10 +925,10 @@ mod tests { #[test] fn reopen_fork() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); + let shared_db = Arc::new(kvdb_memorydb::create(0)); + let (foo, bar, baz) = { - let mut jdb = new_db(&dir); + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -958,7 +946,7 @@ mod tests { }; { - let mut jdb = new_db(&dir); + let mut jdb = OverlayRecentDB::new(shared_db, None); jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); @@ -969,7 +957,7 @@ mod tests { #[test] fn insert_older_era() { - let mut jdb = OverlayRecentDB::new_temp(); + let mut jdb = new_db(); let foo = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -989,9 +977,7 @@ mod tests { #[test] fn inject() { - let temp = ::devtools::RandomTempPath::new(); - - let mut jdb = new_db(temp.as_path().as_path()); + let mut jdb = new_db(); let key = jdb.insert(b"dog"); jdb.inject_batch().unwrap(); @@ -1004,10 +990,10 @@ mod tests { #[test] fn earliest_era() { - let temp = ::devtools::RandomTempPath::new(); + let shared_db = Arc::new(kvdb_memorydb::create(0)); // empty DB - let mut jdb = new_db(temp.as_path().as_path()); + let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); assert!(jdb.earliest_era().is_none()); // single journalled era. @@ -1041,7 +1027,7 @@ mod tests { // reconstructed: no journal entries. drop(jdb); - let jdb = new_db(temp.as_path().as_path()); + let jdb = OverlayRecentDB::new(shared_db, None); assert_eq!(jdb.earliest_era(), None); } } diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs index eeca11085..b97940321 100644 --- a/util/src/journaldb/refcounteddb.rs +++ b/util/src/journaldb/refcounteddb.rs @@ -75,13 +75,6 @@ impl RefCountedDB { column: col, } } - - /// Create a new instance with an anonymous temporary database. - #[cfg(test)] - fn new_temp() -> RefCountedDB { - let backing = Arc::new(::kvdb::in_memory(0)); - Self::new(backing, None) - } } impl HashDB for RefCountedDB { @@ -217,13 +210,19 @@ mod tests { use keccak::keccak; use hashdb::{HashDB, DBValue}; + use kvdb_memorydb; use super::*; use super::super::traits::JournalDB; + fn new_db() -> RefCountedDB { + let backing = Arc::new(kvdb_memorydb::create(0)); + RefCountedDB::new(backing, None) + } + #[test] fn long_history() { // history is 3 - let mut jdb = RefCountedDB::new_temp(); + let mut jdb = new_db(); let h = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h)); @@ -241,7 +240,7 @@ mod tests { #[test] fn latest_era_should_work() { // history is 3 - let mut jdb = RefCountedDB::new_temp(); + let mut jdb = new_db(); assert_eq!(jdb.latest_era(), None); let h = jdb.insert(b"foo"); jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); @@ -260,7 +259,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = RefCountedDB::new_temp(); + let mut jdb = new_db(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -298,7 +297,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = RefCountedDB::new_temp(); + let mut jdb = new_db(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -325,7 +324,7 @@ mod tests { #[test] fn inject() { - let mut jdb = RefCountedDB::new_temp(); + let mut jdb = new_db(); let key = jdb.insert(b"dog"); jdb.inject_batch().unwrap(); diff --git a/util/src/lib.rs b/util/src/lib.rs index 863f811c4..4c50013a5 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -110,6 +110,9 @@ extern crate patricia_trie as trie; extern crate kvdb; extern crate util_error as error; +#[cfg(test)] +extern crate kvdb_memorydb; + #[macro_use] extern crate log as rlog; diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index b4c0beb25..6844eb801 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -50,7 +50,7 @@ impl OverlayDB { /// Create a new instance of OverlayDB with an anonymous temporary database. #[cfg(test)] pub fn new_temp() -> OverlayDB { - let backing = Arc::new(::kvdb::in_memory(0)); + let backing = Arc::new(::kvdb_memorydb::create(0)); Self::new(backing, None) }