Merge pull request #6720 from paritytech/kvdb_split
separated kvdb into 3 crates: kvdb, kvdb-memorydb && kvdb-rocksdb
This commit is contained in:
parent
54bae9a0f2
commit
61c3e1a2d6
35
Cargo.lock
generated
35
Cargo.lock
generated
@ -587,6 +587,8 @@ dependencies = [
|
|||||||
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
|
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
|
||||||
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"kvdb 0.1.0",
|
"kvdb 0.1.0",
|
||||||
|
"kvdb-memorydb 0.1.0",
|
||||||
|
"kvdb-rocksdb 0.1.0",
|
||||||
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"linked-hash-map 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"linked-hash-map 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -744,6 +746,8 @@ dependencies = [
|
|||||||
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"kvdb 0.1.0",
|
"kvdb 0.1.0",
|
||||||
|
"kvdb-memorydb 0.1.0",
|
||||||
|
"kvdb-rocksdb 0.1.0",
|
||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"memorydb 0.1.0",
|
"memorydb 0.1.0",
|
||||||
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -830,6 +834,7 @@ dependencies = [
|
|||||||
"hash 0.1.0",
|
"hash 0.1.0",
|
||||||
"hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"kvdb 0.1.0",
|
"kvdb 0.1.0",
|
||||||
|
"kvdb-rocksdb 0.1.0",
|
||||||
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"native-contracts 0.1.0",
|
"native-contracts 0.1.0",
|
||||||
@ -885,6 +890,7 @@ dependencies = [
|
|||||||
"hashdb 0.1.0",
|
"hashdb 0.1.0",
|
||||||
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"kvdb 0.1.0",
|
"kvdb 0.1.0",
|
||||||
|
"kvdb-memorydb 0.1.0",
|
||||||
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1010,6 +1016,7 @@ dependencies = [
|
|||||||
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"kvdb 0.1.0",
|
"kvdb 0.1.0",
|
||||||
|
"kvdb-memorydb 0.1.0",
|
||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"macros 0.1.0",
|
"macros 0.1.0",
|
||||||
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1500,10 +1507,26 @@ version = "0.1.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-bigint 0.1.3",
|
|
||||||
"ethcore-bytes 0.1.0",
|
"ethcore-bytes 0.1.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "kvdb-memorydb"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"kvdb 0.1.0",
|
||||||
|
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rlp 0.2.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "kvdb-rocksdb"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"ethcore-bigint 0.1.3",
|
||||||
"ethcore-devtools 1.8.0",
|
"ethcore-devtools 1.8.0",
|
||||||
"hashdb 0.1.0",
|
"kvdb 0.1.0",
|
||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1656,6 +1679,7 @@ version = "0.1.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"ethcore-devtools 1.8.0",
|
"ethcore-devtools 1.8.0",
|
||||||
"kvdb 0.1.0",
|
"kvdb 0.1.0",
|
||||||
|
"kvdb-rocksdb 0.1.0",
|
||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"macros 0.1.0",
|
"macros 0.1.0",
|
||||||
]
|
]
|
||||||
@ -1845,7 +1869,7 @@ dependencies = [
|
|||||||
"ethcore-network 1.8.0",
|
"ethcore-network 1.8.0",
|
||||||
"ethcore-util 1.8.5",
|
"ethcore-util 1.8.5",
|
||||||
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"kvdb 0.1.0",
|
"kvdb-memorydb 0.1.0",
|
||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"native-contracts 0.1.0",
|
"native-contracts 0.1.0",
|
||||||
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2076,7 +2100,7 @@ dependencies = [
|
|||||||
"ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"isatty 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"isatty 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
|
"jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
|
||||||
"kvdb 0.1.0",
|
"kvdb-rocksdb 0.1.0",
|
||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"migration 0.1.0",
|
"migration 0.1.0",
|
||||||
"node-filter 1.8.0",
|
"node-filter 1.8.0",
|
||||||
@ -2223,6 +2247,7 @@ dependencies = [
|
|||||||
"ethcore-util 1.8.5",
|
"ethcore-util 1.8.5",
|
||||||
"ethkey 0.2.0",
|
"ethkey 0.2.0",
|
||||||
"kvdb 0.1.0",
|
"kvdb 0.1.0",
|
||||||
|
"kvdb-memorydb 0.1.0",
|
||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rlp 0.2.0",
|
"rlp 0.2.0",
|
||||||
"serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2280,7 +2305,7 @@ dependencies = [
|
|||||||
"jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
|
"jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
|
||||||
"jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
|
"jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
|
||||||
"jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
|
"jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
|
||||||
"kvdb 0.1.0",
|
"kvdb-memorydb 0.1.0",
|
||||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"macros 0.1.0",
|
"macros 0.1.0",
|
||||||
"multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
@ -63,7 +63,7 @@ path = { path = "util/path" }
|
|||||||
panic_hook = { path = "panic_hook" }
|
panic_hook = { path = "panic_hook" }
|
||||||
hash = { path = "util/hash" }
|
hash = { path = "util/hash" }
|
||||||
migration = { path = "util/migration" }
|
migration = { path = "util/migration" }
|
||||||
kvdb = { path = "util/kvdb" }
|
kvdb-rocksdb = { path = "util/kvdb-rocksdb" }
|
||||||
|
|
||||||
parity-dapps = { path = "dapps", optional = true }
|
parity-dapps = { path = "dapps", optional = true }
|
||||||
clippy = { version = "0.0.103", optional = true}
|
clippy = { version = "0.0.103", optional = true}
|
||||||
|
@ -56,6 +56,8 @@ rand = "0.3"
|
|||||||
rlp = { path = "../util/rlp" }
|
rlp = { path = "../util/rlp" }
|
||||||
rlp_derive = { path = "../util/rlp_derive" }
|
rlp_derive = { path = "../util/rlp_derive" }
|
||||||
kvdb = { path = "../util/kvdb" }
|
kvdb = { path = "../util/kvdb" }
|
||||||
|
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
|
||||||
|
kvdb-memorydb = { path = "../util/kvdb-memorydb" }
|
||||||
util-error = { path = "../util/error" }
|
util-error = { path = "../util/error" }
|
||||||
snappy = { path = "../util/snappy" }
|
snappy = { path = "../util/snappy" }
|
||||||
migration = { path = "../util/migration" }
|
migration = { path = "../util/migration" }
|
||||||
|
@ -40,6 +40,8 @@ stats = { path = "../../util/stats" }
|
|||||||
hash = { path = "../../util/hash" }
|
hash = { path = "../../util/hash" }
|
||||||
triehash = { path = "../../util/triehash" }
|
triehash = { path = "../../util/triehash" }
|
||||||
kvdb = { path = "../../util/kvdb" }
|
kvdb = { path = "../../util/kvdb" }
|
||||||
|
kvdb-rocksdb = { path = "../../util/kvdb-rocksdb" }
|
||||||
|
kvdb-memorydb = { path = "../../util/kvdb-memorydb" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
|
@ -728,13 +728,14 @@ mod tests {
|
|||||||
use ethcore::header::Header;
|
use ethcore::header::Header;
|
||||||
use ethcore::spec::Spec;
|
use ethcore::spec::Spec;
|
||||||
use cache::Cache;
|
use cache::Cache;
|
||||||
use kvdb::{in_memory, KeyValueDB};
|
use kvdb::KeyValueDB;
|
||||||
|
use kvdb_memorydb;
|
||||||
|
|
||||||
use time::Duration;
|
use time::Duration;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
|
|
||||||
fn make_db() -> Arc<KeyValueDB> {
|
fn make_db() -> Arc<KeyValueDB> {
|
||||||
Arc::new(in_memory(0))
|
Arc::new(kvdb_memorydb::create(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -36,7 +36,8 @@ use bigint::prelude::U256;
|
|||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use futures::{IntoFuture, Future};
|
use futures::{IntoFuture, Future};
|
||||||
|
|
||||||
use kvdb::{KeyValueDB, CompactionProfile};
|
use kvdb::KeyValueDB;
|
||||||
|
use kvdb_rocksdb::CompactionProfile;
|
||||||
|
|
||||||
use self::fetch::ChainDataFetcher;
|
use self::fetch::ChainDataFetcher;
|
||||||
use self::header_chain::{AncestryIter, HeaderChain};
|
use self::header_chain::{AncestryIter, HeaderChain};
|
||||||
@ -214,7 +215,7 @@ impl<T: ChainDataFetcher> Client<T> {
|
|||||||
io_channel: IoChannel<ClientIoMessage>,
|
io_channel: IoChannel<ClientIoMessage>,
|
||||||
cache: Arc<Mutex<Cache>>
|
cache: Arc<Mutex<Cache>>
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let db = ::kvdb::in_memory(0);
|
let db = ::kvdb_memorydb::create(0);
|
||||||
|
|
||||||
Client::new(
|
Client::new(
|
||||||
config,
|
config,
|
||||||
|
@ -25,7 +25,7 @@ use ethcore::db;
|
|||||||
use ethcore::service::ClientIoMessage;
|
use ethcore::service::ClientIoMessage;
|
||||||
use ethcore::spec::Spec;
|
use ethcore::spec::Spec;
|
||||||
use io::{IoContext, IoError, IoHandler, IoService};
|
use io::{IoContext, IoError, IoHandler, IoService};
|
||||||
use kvdb::{Database, DatabaseConfig};
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
|
|
||||||
use cache::Cache;
|
use cache::Cache;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
|
@ -92,6 +92,8 @@ extern crate vm;
|
|||||||
extern crate hash;
|
extern crate hash;
|
||||||
extern crate triehash;
|
extern crate triehash;
|
||||||
extern crate kvdb;
|
extern crate kvdb;
|
||||||
|
extern crate kvdb_memorydb;
|
||||||
|
extern crate kvdb_rocksdb;
|
||||||
|
|
||||||
#[cfg(feature = "ipc")]
|
#[cfg(feature = "ipc")]
|
||||||
extern crate ethcore_ipc as ipc;
|
extern crate ethcore_ipc as ipc;
|
||||||
|
@ -11,10 +11,12 @@ ethcore = { path = ".."}
|
|||||||
ethcore-util = { path = "../../util" }
|
ethcore-util = { path = "../../util" }
|
||||||
ethcore-bigint = { path = "../../util/bigint" }
|
ethcore-bigint = { path = "../../util/bigint" }
|
||||||
ethcore-bytes = { path = "../../util/bytes" }
|
ethcore-bytes = { path = "../../util/bytes" }
|
||||||
ethcore-io = { path = "../../util/io" }
|
|
||||||
ethcore-network = { path = "../../util/network" }
|
ethcore-network = { path = "../../util/network" }
|
||||||
kvdb = { path = "../../util/kvdb" }
|
|
||||||
native-contracts = { path = "../native_contracts" }
|
native-contracts = { path = "../native_contracts" }
|
||||||
futures = "0.1"
|
futures = "0.1"
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
parking_lot = "0.4"
|
parking_lot = "0.4"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
kvdb-memorydb = { path = "../../util/kvdb-memorydb" }
|
||||||
|
ethcore-io = { path = "../../util/io" }
|
||||||
|
@ -24,9 +24,14 @@ extern crate ethcore_network as network;
|
|||||||
extern crate native_contracts;
|
extern crate native_contracts;
|
||||||
extern crate futures;
|
extern crate futures;
|
||||||
extern crate parking_lot;
|
extern crate parking_lot;
|
||||||
extern crate kvdb;
|
|
||||||
#[cfg(test)] extern crate ethcore_io as io;
|
#[macro_use]
|
||||||
#[macro_use] extern crate log;
|
extern crate log;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
extern crate kvdb_memorydb;
|
||||||
|
#[cfg(test)]
|
||||||
|
extern crate ethcore_io as io;
|
||||||
|
|
||||||
use std::sync::Weak;
|
use std::sync::Weak;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
@ -135,7 +140,7 @@ mod test {
|
|||||||
let contract_addr = Address::from_str("0000000000000000000000000000000000000005").unwrap();
|
let contract_addr = Address::from_str("0000000000000000000000000000000000000005").unwrap();
|
||||||
let data = include_bytes!("../res/node_filter.json");
|
let data = include_bytes!("../res/node_filter.json");
|
||||||
let spec = Spec::load(&::std::env::temp_dir(), &data[..]).unwrap();
|
let spec = Spec::load(&::std::env::temp_dir(), &data[..]).unwrap();
|
||||||
let client_db = Arc::new(::kvdb::in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0)));
|
let client_db = Arc::new(::kvdb_memorydb::create(::ethcore::db::NUM_COLUMNS.unwrap_or(0)));
|
||||||
|
|
||||||
let client = Client::new(
|
let client = Client::new(
|
||||||
ClientConfig::default(),
|
ClientConfig::default(),
|
||||||
|
@ -1479,7 +1479,8 @@ mod tests {
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use rustc_hex::FromHex;
|
use rustc_hex::FromHex;
|
||||||
use hash::keccak;
|
use hash::keccak;
|
||||||
use kvdb::{in_memory, KeyValueDB};
|
use kvdb::KeyValueDB;
|
||||||
|
use kvdb_memorydb;
|
||||||
use bigint::hash::*;
|
use bigint::hash::*;
|
||||||
use receipt::{Receipt, TransactionOutcome};
|
use receipt::{Receipt, TransactionOutcome};
|
||||||
use blockchain::{BlockProvider, BlockChain, Config, ImportRoute};
|
use blockchain::{BlockProvider, BlockChain, Config, ImportRoute};
|
||||||
@ -1493,7 +1494,7 @@ mod tests {
|
|||||||
use header::BlockNumber;
|
use header::BlockNumber;
|
||||||
|
|
||||||
fn new_db() -> Arc<KeyValueDB> {
|
fn new_db() -> Arc<KeyValueDB> {
|
||||||
Arc::new(in_memory(::db::NUM_COLUMNS.unwrap_or(0)))
|
Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_chain(genesis: &[u8], db: Arc<KeyValueDB>) -> BlockChain {
|
fn new_chain(genesis: &[u8], db: Arc<KeyValueDB>) -> BlockChain {
|
||||||
|
@ -21,7 +21,7 @@ use std::fmt::{Display, Formatter, Error as FmtError};
|
|||||||
use mode::Mode as IpcMode;
|
use mode::Mode as IpcMode;
|
||||||
use verification::{VerifierType, QueueConfig};
|
use verification::{VerifierType, QueueConfig};
|
||||||
use util::journaldb;
|
use util::journaldb;
|
||||||
use kvdb::CompactionProfile;
|
use kvdb_rocksdb::CompactionProfile;
|
||||||
|
|
||||||
pub use std::time::Duration;
|
pub use std::time::Duration;
|
||||||
pub use blockchain::Config as BlockChainConfig;
|
pub use blockchain::Config as BlockChainConfig;
|
||||||
|
@ -21,8 +21,7 @@ use std::sync::Arc;
|
|||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use util::journaldb;
|
use util::journaldb;
|
||||||
use trie;
|
use {trie, kvdb_memorydb, bytes};
|
||||||
use bytes;
|
|
||||||
use kvdb::{self, KeyValueDB};
|
use kvdb::{self, KeyValueDB};
|
||||||
use {state, state_db, client, executive, trace, transaction, db, spec, pod_state};
|
use {state, state_db, client, executive, trace, transaction, db, spec, pod_state};
|
||||||
use factory::Factories;
|
use factory::Factories;
|
||||||
@ -128,7 +127,7 @@ impl<'a> EvmTestClient<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn state_from_spec(spec: &'a spec::Spec, factories: &Factories) -> Result<state::State<state_db::StateDB>, EvmTestError> {
|
fn state_from_spec(spec: &'a spec::Spec, factories: &Factories) -> Result<state::State<state_db::StateDB>, EvmTestError> {
|
||||||
let db = Arc::new(kvdb::in_memory(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
|
let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
|
||||||
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
|
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
|
||||||
let mut state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
|
let mut state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
|
||||||
state_db = spec.ensure_db_good(state_db, factories)?;
|
state_db = spec.ensure_db_good(state_db, factories)?;
|
||||||
@ -150,7 +149,7 @@ impl<'a> EvmTestClient<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn state_from_pod(spec: &'a spec::Spec, factories: &Factories, pod_state: pod_state::PodState) -> Result<state::State<state_db::StateDB>, EvmTestError> {
|
fn state_from_pod(spec: &'a spec::Spec, factories: &Factories, pod_state: pod_state::PodState) -> Result<state::State<state_db::StateDB>, EvmTestError> {
|
||||||
let db = Arc::new(kvdb::in_memory(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
|
let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
|
||||||
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
|
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
|
||||||
let state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
|
let state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
|
||||||
let mut state = state::State::new(
|
let mut state = state::State::new(
|
||||||
|
@ -27,7 +27,7 @@ use bigint::prelude::U256;
|
|||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use util::*;
|
use util::*;
|
||||||
use kvdb::{Database, DatabaseConfig};
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
use ethkey::{Generator, Random};
|
use ethkey::{Generator, Random};
|
||||||
|
@ -57,7 +57,7 @@ pub fn json_chain_test(json_data: &[u8]) -> Vec<String> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let db = Arc::new(::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
|
let db = Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
|
||||||
let mut config = ClientConfig::default();
|
let mut config = ClientConfig::default();
|
||||||
config.history = 8;
|
config.history = 8;
|
||||||
let client = Client::new(
|
let client = Client::new(
|
||||||
|
@ -113,6 +113,8 @@ extern crate ansi_term;
|
|||||||
extern crate semantic_version;
|
extern crate semantic_version;
|
||||||
extern crate unexpected;
|
extern crate unexpected;
|
||||||
extern crate kvdb;
|
extern crate kvdb;
|
||||||
|
extern crate kvdb_rocksdb;
|
||||||
|
extern crate kvdb_memorydb;
|
||||||
extern crate util_error;
|
extern crate util_error;
|
||||||
extern crate snappy;
|
extern crate snappy;
|
||||||
extern crate migration;
|
extern crate migration;
|
||||||
|
@ -22,7 +22,7 @@ use std::collections::HashMap;
|
|||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use util::Address;
|
use util::Address;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use kvdb::Database;
|
use kvdb_rocksdb::Database;
|
||||||
use migration::{Batch, Config, Error, Migration, SimpleMigration, Progress};
|
use migration::{Batch, Config, Error, Migration, SimpleMigration, Progress};
|
||||||
use hash::keccak;
|
use hash::keccak;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -26,7 +26,8 @@ use migration::{Error, Migration, Progress, Batch, Config};
|
|||||||
use util::journaldb;
|
use util::journaldb;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use trie::Trie;
|
use trie::Trie;
|
||||||
use kvdb::{Database, DBTransaction};
|
use kvdb::DBTransaction;
|
||||||
|
use kvdb_rocksdb::Database;
|
||||||
|
|
||||||
/// Account bloom upgrade routine. If bloom already present, does nothing.
|
/// Account bloom upgrade routine. If bloom already present, does nothing.
|
||||||
/// If database empty (no best block), does nothing.
|
/// If database empty (no best block), does nothing.
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
//! This migration consolidates all databases into single one using Column Families.
|
//! This migration consolidates all databases into single one using Column Families.
|
||||||
|
|
||||||
use rlp::{Rlp, RlpStream};
|
use rlp::{Rlp, RlpStream};
|
||||||
use kvdb::Database;
|
use kvdb_rocksdb::Database;
|
||||||
use migration::{Batch, Config, Error, Migration, Progress};
|
use migration::{Batch, Config, Error, Migration, Progress};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
@ -19,7 +19,8 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use kvdb::{Database, DatabaseConfig, KeyValueDB};
|
use kvdb::KeyValueDB;
|
||||||
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use io::*;
|
use io::*;
|
||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
|
@ -40,7 +40,7 @@ use parking_lot::{Mutex, RwLock, RwLockReadGuard};
|
|||||||
use util_error::UtilError;
|
use util_error::UtilError;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use util::journaldb::Algorithm;
|
use util::journaldb::Algorithm;
|
||||||
use kvdb::{Database, DatabaseConfig};
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
use snappy;
|
use snappy;
|
||||||
|
|
||||||
/// Helper for removing directories in case of error.
|
/// Helper for removing directories in case of error.
|
||||||
@ -682,7 +682,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn cannot_finish_with_invalid_chunks() {
|
fn cannot_finish_with_invalid_chunks() {
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use kvdb::DatabaseConfig;
|
use kvdb_rocksdb::DatabaseConfig;
|
||||||
|
|
||||||
let spec = get_test_spec();
|
let spec = get_test_spec();
|
||||||
let dir = RandomTempPath::new();
|
let dir = RandomTempPath::new();
|
||||||
|
@ -31,7 +31,7 @@ use tests::helpers;
|
|||||||
use transaction::{Transaction, Action, SignedTransaction};
|
use transaction::{Transaction, Action, SignedTransaction};
|
||||||
|
|
||||||
use util::Address;
|
use util::Address;
|
||||||
use kvdb;
|
use kvdb_memorydb;
|
||||||
|
|
||||||
const PASS: &'static str = "";
|
const PASS: &'static str = "";
|
||||||
const TRANSITION_BLOCK_1: usize = 2; // block at which the contract becomes activated.
|
const TRANSITION_BLOCK_1: usize = 2; // block at which the contract becomes activated.
|
||||||
@ -238,7 +238,7 @@ fn fixed_to_contract_only() {
|
|||||||
assert_eq!(client.chain_info().best_block_number, 11);
|
assert_eq!(client.chain_info().best_block_number, 11);
|
||||||
let reader = snapshot_helpers::snap(&*client);
|
let reader = snapshot_helpers::snap(&*client);
|
||||||
|
|
||||||
let new_db = kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0));
|
let new_db = kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0));
|
||||||
let spec = spec_fixed_to_contract();
|
let spec = spec_fixed_to_contract();
|
||||||
|
|
||||||
// ensure fresh engine's step matches.
|
// ensure fresh engine's step matches.
|
||||||
@ -270,7 +270,7 @@ fn fixed_to_contract_to_contract() {
|
|||||||
|
|
||||||
assert_eq!(client.chain_info().best_block_number, 16);
|
assert_eq!(client.chain_info().best_block_number, 16);
|
||||||
let reader = snapshot_helpers::snap(&*client);
|
let reader = snapshot_helpers::snap(&*client);
|
||||||
let new_db = kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0));
|
let new_db = kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0));
|
||||||
let spec = spec_fixed_to_contract();
|
let spec = spec_fixed_to_contract();
|
||||||
|
|
||||||
for _ in 0..16 { spec.engine.step() }
|
for _ in 0..16 { spec.engine.step() }
|
||||||
|
@ -26,7 +26,8 @@ use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
|
|||||||
|
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use snappy;
|
use snappy;
|
||||||
use kvdb::{self, KeyValueDB, DBTransaction};
|
use kvdb::{KeyValueDB, DBTransaction};
|
||||||
|
use kvdb_memorydb;
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
@ -43,7 +44,7 @@ fn chunk_and_restore(amount: u64) {
|
|||||||
let mut snapshot_path = new_path.as_path().to_owned();
|
let mut snapshot_path = new_path.as_path().to_owned();
|
||||||
snapshot_path.push("SNAP");
|
snapshot_path.push("SNAP");
|
||||||
|
|
||||||
let old_db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
|
let old_db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
|
||||||
let bc = BlockChain::new(Default::default(), &genesis, old_db.clone());
|
let bc = BlockChain::new(Default::default(), &genesis, old_db.clone());
|
||||||
|
|
||||||
// build the blockchain.
|
// build the blockchain.
|
||||||
@ -80,7 +81,7 @@ fn chunk_and_restore(amount: u64) {
|
|||||||
writer.into_inner().finish(manifest.clone()).unwrap();
|
writer.into_inner().finish(manifest.clone()).unwrap();
|
||||||
|
|
||||||
// restore it.
|
// restore it.
|
||||||
let new_db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
|
let new_db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
|
||||||
let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone());
|
let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone());
|
||||||
let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap();
|
let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap();
|
||||||
|
|
||||||
@ -127,7 +128,7 @@ fn checks_flag() {
|
|||||||
|
|
||||||
let chunk = stream.out();
|
let chunk = stream.out();
|
||||||
|
|
||||||
let db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
|
let db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
|
||||||
let engine = ::spec::Spec::new_test().engine;
|
let engine = ::spec::Spec::new_test().engine;
|
||||||
let chain = BlockChain::new(Default::default(), &genesis, db.clone());
|
let chain = BlockChain::new(Default::default(), &genesis, db.clone());
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ use tests::helpers::generate_dummy_client_with_spec_and_data;
|
|||||||
|
|
||||||
use devtools::RandomTempPath;
|
use devtools::RandomTempPath;
|
||||||
use io::IoChannel;
|
use io::IoChannel;
|
||||||
use kvdb::{Database, DatabaseConfig};
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
|
|
||||||
struct NoopDBRestore;
|
struct NoopDBRestore;
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ use error::Error;
|
|||||||
use rand::{XorShiftRng, SeedableRng};
|
use rand::{XorShiftRng, SeedableRng};
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use util::journaldb::{self, Algorithm};
|
use util::journaldb::{self, Algorithm};
|
||||||
use kvdb::{Database, DatabaseConfig};
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
use memorydb::MemoryDB;
|
use memorydb::MemoryDB;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use devtools::RandomTempPath;
|
use devtools::RandomTempPath;
|
||||||
|
@ -672,13 +672,13 @@ impl Spec {
|
|||||||
pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> {
|
pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> {
|
||||||
use transaction::{Action, Transaction};
|
use transaction::{Action, Transaction};
|
||||||
use util::journaldb;
|
use util::journaldb;
|
||||||
use kvdb;
|
use kvdb_memorydb;
|
||||||
|
|
||||||
let genesis = self.genesis_header();
|
let genesis = self.genesis_header();
|
||||||
|
|
||||||
let factories = Default::default();
|
let factories = Default::default();
|
||||||
let mut db = journaldb::new(
|
let mut db = journaldb::new(
|
||||||
Arc::new(kvdb::in_memory(0)),
|
Arc::new(kvdb_memorydb::create(0)),
|
||||||
journaldb::Algorithm::Archive,
|
journaldb::Algorithm::Archive,
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
@ -27,7 +27,7 @@ use tests::helpers::*;
|
|||||||
use types::filter::Filter;
|
use types::filter::Filter;
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use util::*;
|
use util::*;
|
||||||
use kvdb::{Database, DatabaseConfig};
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
use devtools::*;
|
use devtools::*;
|
||||||
use miner::Miner;
|
use miner::Miner;
|
||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
|
@ -232,7 +232,7 @@ pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> Arc<Client> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn new_db() -> Arc<::kvdb::KeyValueDB> {
|
fn new_db() -> Arc<::kvdb::KeyValueDB> {
|
||||||
Arc::new(::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)))
|
Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain {
|
pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain {
|
||||||
|
@ -27,7 +27,7 @@ use client::*;
|
|||||||
use tests::helpers::*;
|
use tests::helpers::*;
|
||||||
use devtools::RandomTempPath;
|
use devtools::RandomTempPath;
|
||||||
use client::{BlockChainClient, Client, ClientConfig};
|
use client::{BlockChainClient, Client, ClientConfig};
|
||||||
use kvdb::{Database, DatabaseConfig};
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use header::Header;
|
use header::Header;
|
||||||
use miner::Miner;
|
use miner::Miner;
|
||||||
|
@ -416,7 +416,8 @@ mod tests {
|
|||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use util::Address;
|
use util::Address;
|
||||||
use kvdb::{DBTransaction, in_memory, KeyValueDB};
|
use kvdb::{DBTransaction, KeyValueDB};
|
||||||
|
use kvdb_memorydb;
|
||||||
use header::BlockNumber;
|
use header::BlockNumber;
|
||||||
use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest};
|
use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest};
|
||||||
use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError};
|
use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError};
|
||||||
@ -467,7 +468,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn new_db() -> Arc<KeyValueDB> {
|
fn new_db() -> Arc<KeyValueDB> {
|
||||||
Arc::new(in_memory(::db::NUM_COLUMNS.unwrap_or(0)))
|
Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -178,7 +178,7 @@ mod test {
|
|||||||
"#;
|
"#;
|
||||||
|
|
||||||
let spec = Spec::load(&::std::env::temp_dir(), spec_data.as_bytes()).unwrap();
|
let spec = Spec::load(&::std::env::temp_dir(), spec_data.as_bytes()).unwrap();
|
||||||
let client_db = Arc::new(::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
|
let client_db = Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
|
||||||
|
|
||||||
let client = Client::new(
|
let client = Client::new(
|
||||||
ClientConfig::default(),
|
ClientConfig::default(),
|
||||||
|
@ -14,4 +14,7 @@ serde = "1.0"
|
|||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
ethkey = { path = "../ethkey" }
|
ethkey = { path = "../ethkey" }
|
||||||
|
kvdb-memorydb = { path = "../util/kvdb-memorydb" }
|
||||||
|
@ -44,6 +44,8 @@ extern crate log;
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
extern crate ethkey;
|
extern crate ethkey;
|
||||||
|
#[cfg(test)]
|
||||||
|
extern crate kvdb_memorydb;
|
||||||
|
|
||||||
const LOCAL_TRANSACTIONS_KEY: &'static [u8] = &*b"LOCAL_TXS";
|
const LOCAL_TRANSACTIONS_KEY: &'static [u8] = &*b"LOCAL_TXS";
|
||||||
|
|
||||||
@ -243,7 +245,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn twice_empty() {
|
fn twice_empty() {
|
||||||
let db = Arc::new(::kvdb::in_memory(0));
|
let db = Arc::new(::kvdb_memorydb::create(0));
|
||||||
|
|
||||||
{
|
{
|
||||||
let store = super::create(db.clone(), None, Dummy(vec![]));
|
let store = super::create(db.clone(), None, Dummy(vec![]));
|
||||||
@ -272,7 +274,7 @@ mod tests {
|
|||||||
PendingTransaction::new(signed, condition)
|
PendingTransaction::new(signed, condition)
|
||||||
}).collect();
|
}).collect();
|
||||||
|
|
||||||
let db = Arc::new(::kvdb::in_memory(0));
|
let db = Arc::new(::kvdb_memorydb::create(0));
|
||||||
|
|
||||||
{
|
{
|
||||||
// nothing written yet, will write pending.
|
// nothing written yet, will write pending.
|
||||||
@ -311,7 +313,7 @@ mod tests {
|
|||||||
PendingTransaction::new(signed, None)
|
PendingTransaction::new(signed, None)
|
||||||
});
|
});
|
||||||
|
|
||||||
let db = Arc::new(::kvdb::in_memory(0));
|
let db = Arc::new(::kvdb_memorydb::create(0));
|
||||||
{
|
{
|
||||||
// nothing written, will write bad.
|
// nothing written, will write bad.
|
||||||
let store = super::create(db.clone(), None, Dummy(transactions.clone()));
|
let store = super::create(db.clone(), None, Dummy(transactions.clone()));
|
||||||
|
@ -21,7 +21,7 @@ use std::fs::File;
|
|||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::clean_0x;
|
use bigint::hash::clean_0x;
|
||||||
use util::Address;
|
use util::Address;
|
||||||
use kvdb::CompactionProfile;
|
use kvdb_rocksdb::CompactionProfile;
|
||||||
use util::journaldb::Algorithm;
|
use util::journaldb::Algorithm;
|
||||||
use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType};
|
use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType};
|
||||||
use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy};
|
use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy};
|
||||||
|
@ -62,7 +62,7 @@ extern crate ethcore_bigint as bigint;
|
|||||||
extern crate ethcore_bytes as bytes;
|
extern crate ethcore_bytes as bytes;
|
||||||
extern crate ethcore_network as network;
|
extern crate ethcore_network as network;
|
||||||
extern crate migration as migr;
|
extern crate migration as migr;
|
||||||
extern crate kvdb;
|
extern crate kvdb_rocksdb;
|
||||||
extern crate ethkey;
|
extern crate ethkey;
|
||||||
extern crate ethsync;
|
extern crate ethsync;
|
||||||
extern crate node_health;
|
extern crate node_health;
|
||||||
|
@ -22,7 +22,7 @@ use std::fmt::{Display, Formatter, Error as FmtError};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use util::journaldb::Algorithm;
|
use util::journaldb::Algorithm;
|
||||||
use migr::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError, Migration};
|
use migr::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError, Migration};
|
||||||
use kvdb::{CompactionProfile, Database, DatabaseConfig};
|
use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig};
|
||||||
use ethcore::migrations;
|
use ethcore::migrations;
|
||||||
use ethcore::db;
|
use ethcore::db;
|
||||||
use ethcore::migrations::Extract;
|
use ethcore::migrations::Extract;
|
||||||
@ -283,7 +283,7 @@ mod legacy {
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use util::journaldb::Algorithm;
|
use util::journaldb::Algorithm;
|
||||||
use migr::{Manager as MigrationManager};
|
use migr::{Manager as MigrationManager};
|
||||||
use kvdb::CompactionProfile;
|
use kvdb_rocksdb::CompactionProfile;
|
||||||
use ethcore::migrations;
|
use ethcore::migrations;
|
||||||
|
|
||||||
/// Blocks database path.
|
/// Blocks database path.
|
||||||
|
@ -57,7 +57,6 @@ rlp = { path = "../util/rlp" }
|
|||||||
stats = { path = "../util/stats" }
|
stats = { path = "../util/stats" }
|
||||||
vm = { path = "../ethcore/vm" }
|
vm = { path = "../ethcore/vm" }
|
||||||
hash = { path = "../util/hash" }
|
hash = { path = "../util/hash" }
|
||||||
kvdb = { path = "../util/kvdb" }
|
|
||||||
hardware-wallet = { path = "../hw" }
|
hardware-wallet = { path = "../hw" }
|
||||||
|
|
||||||
clippy = { version = "0.0.103", optional = true}
|
clippy = { version = "0.0.103", optional = true}
|
||||||
@ -66,6 +65,7 @@ pretty_assertions = "0.1"
|
|||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
macros = { path = "../util/macros" }
|
macros = { path = "../util/macros" }
|
||||||
ethcore-network = { path = "../util/network" }
|
ethcore-network = { path = "../util/network" }
|
||||||
|
kvdb-memorydb = { path = "../util/kvdb-memorydb" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"]
|
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"]
|
||||||
|
@ -65,7 +65,6 @@ extern crate rlp;
|
|||||||
extern crate stats;
|
extern crate stats;
|
||||||
extern crate hash;
|
extern crate hash;
|
||||||
extern crate hardware_wallet;
|
extern crate hardware_wallet;
|
||||||
extern crate kvdb;
|
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
extern crate log;
|
||||||
@ -85,6 +84,9 @@ extern crate pretty_assertions;
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate macros;
|
extern crate macros;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
extern crate kvdb_memorydb;
|
||||||
|
|
||||||
pub extern crate jsonrpc_ws_server as ws;
|
pub extern crate jsonrpc_ws_server as ws;
|
||||||
|
|
||||||
mod authcodes;
|
mod authcodes;
|
||||||
|
@ -33,7 +33,7 @@ use io::IoChannel;
|
|||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use util::Address;
|
use util::Address;
|
||||||
use kvdb::in_memory;
|
use kvdb_memorydb;
|
||||||
|
|
||||||
use jsonrpc_core::IoHandler;
|
use jsonrpc_core::IoHandler;
|
||||||
use v1::impls::{EthClient, SigningUnsafeClient};
|
use v1::impls::{EthClient, SigningUnsafeClient};
|
||||||
@ -131,7 +131,7 @@ impl EthTester {
|
|||||||
let client = Client::new(
|
let client = Client::new(
|
||||||
ClientConfig::default(),
|
ClientConfig::default(),
|
||||||
&spec,
|
&spec,
|
||||||
Arc::new(in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0))),
|
Arc::new(kvdb_memorydb::create(::ethcore::db::NUM_COLUMNS.unwrap_or(0))),
|
||||||
miner_service.clone(),
|
miner_service.clone(),
|
||||||
IoChannel::disconnected(),
|
IoChannel::disconnected(),
|
||||||
).unwrap();
|
).unwrap();
|
||||||
|
@ -33,6 +33,7 @@ ethcore-devtools = { path = "../devtools" }
|
|||||||
ethcore-util = { path = "../util" }
|
ethcore-util = { path = "../util" }
|
||||||
ethcore-bigint = { path = "../util/bigint" }
|
ethcore-bigint = { path = "../util/bigint" }
|
||||||
kvdb = { path = "../util/kvdb" }
|
kvdb = { path = "../util/kvdb" }
|
||||||
|
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
|
||||||
hash = { path = "../util/hash" }
|
hash = { path = "../util/hash" }
|
||||||
ethcore-ipc = { path = "../ipc/rpc" }
|
ethcore-ipc = { path = "../ipc/rpc" }
|
||||||
ethcore-ipc-nano = { path = "../ipc/nano" }
|
ethcore-ipc-nano = { path = "../ipc/nano" }
|
||||||
|
@ -18,7 +18,7 @@ use std::path::PathBuf;
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use ethkey::{Secret, Public};
|
use ethkey::{Secret, Public};
|
||||||
use kvdb::{Database, DatabaseIterator};
|
use kvdb_rocksdb::{Database, DatabaseIterator};
|
||||||
use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId};
|
use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId};
|
||||||
use serialization::{SerializablePublic, SerializableSecret};
|
use serialization::{SerializablePublic, SerializableSecret};
|
||||||
|
|
||||||
@ -293,7 +293,7 @@ pub mod tests {
|
|||||||
use serde_json;
|
use serde_json;
|
||||||
use devtools::RandomTempPath;
|
use devtools::RandomTempPath;
|
||||||
use ethkey::{Random, Generator, Public, Secret};
|
use ethkey::{Random, Generator, Public, Secret};
|
||||||
use kvdb::Database;
|
use kvdb_rocksdb::Database;
|
||||||
use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId};
|
use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId};
|
||||||
use super::{DB_META_KEY_VERSION, CURRENT_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare,
|
use super::{DB_META_KEY_VERSION, CURRENT_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare,
|
||||||
SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1,
|
SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1,
|
||||||
|
@ -49,6 +49,7 @@ extern crate ethkey;
|
|||||||
extern crate native_contracts;
|
extern crate native_contracts;
|
||||||
extern crate hash;
|
extern crate hash;
|
||||||
extern crate kvdb;
|
extern crate kvdb;
|
||||||
|
extern crate kvdb_rocksdb;
|
||||||
|
|
||||||
mod key_server_cluster;
|
mod key_server_cluster;
|
||||||
mod types;
|
mod types;
|
||||||
|
@ -34,11 +34,14 @@ ethcore-ipc = { path = "../ipc/rpc" }
|
|||||||
semver = "0.6"
|
semver = "0.6"
|
||||||
smallvec = { version = "0.4", features = ["heapsizeof"] }
|
smallvec = { version = "0.4", features = ["heapsizeof"] }
|
||||||
ethcore-ipc-nano = { path = "../ipc/nano" }
|
ethcore-ipc-nano = { path = "../ipc/nano" }
|
||||||
ethcore-devtools = { path = "../devtools" }
|
|
||||||
ethkey = { path = "../ethkey" }
|
|
||||||
parking_lot = "0.4"
|
parking_lot = "0.4"
|
||||||
ipnetwork = "0.12.6"
|
ipnetwork = "0.12.6"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
ethkey = { path = "../ethkey" }
|
||||||
|
kvdb-memorydb = { path = "../util/kvdb-memorydb" }
|
||||||
|
ethcore-devtools = { path = "../devtools" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev"]
|
dev = ["clippy", "ethcore/dev", "ethcore-util/dev"]
|
||||||
|
@ -48,6 +48,7 @@ extern crate ethcore_light as light;
|
|||||||
|
|
||||||
#[cfg(test)] extern crate ethcore_devtools as devtools;
|
#[cfg(test)] extern crate ethcore_devtools as devtools;
|
||||||
#[cfg(test)] extern crate ethkey;
|
#[cfg(test)] extern crate ethkey;
|
||||||
|
#[cfg(test)] extern crate kvdb_memorydb;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate macros;
|
extern crate macros;
|
||||||
|
@ -291,7 +291,7 @@ impl TestNet<EthPeer<EthcoreClient>> {
|
|||||||
let client = EthcoreClient::new(
|
let client = EthcoreClient::new(
|
||||||
ClientConfig::default(),
|
ClientConfig::default(),
|
||||||
&spec,
|
&spec,
|
||||||
Arc::new(::kvdb::in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0))),
|
Arc::new(::kvdb_memorydb::create(::ethcore::db::NUM_COLUMNS.unwrap_or(0))),
|
||||||
Arc::new(Miner::with_spec_and_accounts(&spec, accounts)),
|
Arc::new(Miner::with_spec_and_accounts(&spec, accounts)),
|
||||||
IoChannel::disconnected(),
|
IoChannel::disconnected(),
|
||||||
).unwrap();
|
).unwrap();
|
||||||
|
@ -35,6 +35,9 @@ memorydb = { path = "memorydb" }
|
|||||||
util-error = { path = "error" }
|
util-error = { path = "error" }
|
||||||
kvdb = { path = "kvdb" }
|
kvdb = { path = "kvdb" }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
kvdb-memorydb = { path = "kvdb-memorydb" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
dev = ["clippy"]
|
dev = ["clippy"]
|
||||||
|
9
util/kvdb-memorydb/Cargo.toml
Normal file
9
util/kvdb-memorydb/Cargo.toml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
[package]
|
||||||
|
name = "kvdb-memorydb"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
parking_lot = "0.4"
|
||||||
|
rlp = { path = "../rlp" }
|
||||||
|
kvdb = { path = "../kvdb" }
|
124
util/kvdb-memorydb/src/lib.rs
Normal file
124
util/kvdb-memorydb/src/lib.rs
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
extern crate parking_lot;
|
||||||
|
extern crate kvdb;
|
||||||
|
extern crate rlp;
|
||||||
|
|
||||||
|
use std::collections::{BTreeMap, HashMap};
|
||||||
|
use parking_lot::RwLock;
|
||||||
|
use kvdb::{DBValue, Error, DBTransaction, KeyValueDB, DBOp};
|
||||||
|
use rlp::{RlpType, UntrustedRlp, Compressible};
|
||||||
|
|
||||||
|
/// A key-value database fulfilling the `KeyValueDB` trait, living in memory.
|
||||||
|
/// This is generally intended for tests and is not particularly optimized.
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct InMemory {
|
||||||
|
columns: RwLock<HashMap<Option<u32>, BTreeMap<Vec<u8>, DBValue>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create an in-memory database with the given number of columns.
|
||||||
|
/// Columns will be indexable by 0..`num_cols`
|
||||||
|
pub fn create(num_cols: u32) -> InMemory {
|
||||||
|
let mut cols = HashMap::new();
|
||||||
|
cols.insert(None, BTreeMap::new());
|
||||||
|
|
||||||
|
for idx in 0..num_cols {
|
||||||
|
cols.insert(Some(idx), BTreeMap::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
InMemory {
|
||||||
|
columns: RwLock::new(cols)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KeyValueDB for InMemory {
|
||||||
|
fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
|
||||||
|
let columns = self.columns.read();
|
||||||
|
match columns.get(&col) {
|
||||||
|
None => Err(format!("No such column family: {:?}", col)),
|
||||||
|
Some(map) => Ok(map.get(key).cloned()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
|
||||||
|
let columns = self.columns.read();
|
||||||
|
match columns.get(&col) {
|
||||||
|
None => None,
|
||||||
|
Some(map) =>
|
||||||
|
map.iter()
|
||||||
|
.find(|&(ref k ,_)| k.starts_with(prefix))
|
||||||
|
.map(|(_, v)| v.to_vec().into_boxed_slice())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_buffered(&self, transaction: DBTransaction) {
|
||||||
|
let mut columns = self.columns.write();
|
||||||
|
let ops = transaction.ops;
|
||||||
|
for op in ops {
|
||||||
|
match op {
|
||||||
|
DBOp::Insert { col, key, value } => {
|
||||||
|
if let Some(col) = columns.get_mut(&col) {
|
||||||
|
col.insert(key.into_vec(), value);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
DBOp::InsertCompressed { col, key, value } => {
|
||||||
|
if let Some(col) = columns.get_mut(&col) {
|
||||||
|
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
|
||||||
|
let mut value = DBValue::new();
|
||||||
|
value.append_slice(&compressed);
|
||||||
|
col.insert(key.into_vec(), value);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
DBOp::Delete { col, key } => {
|
||||||
|
if let Some(col) = columns.get_mut(&col) {
|
||||||
|
col.remove(&*key);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&self) -> Result<(), String> { Ok(()) }
|
||||||
|
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a> {
|
||||||
|
match self.columns.read().get(&col) {
|
||||||
|
Some(map) => Box::new( // TODO: worth optimizing at all?
|
||||||
|
map.clone()
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice()))
|
||||||
|
),
|
||||||
|
None => Box::new(None.into_iter()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
|
||||||
|
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>
|
||||||
|
{
|
||||||
|
match self.columns.read().get(&col) {
|
||||||
|
Some(map) => Box::new(
|
||||||
|
map.clone()
|
||||||
|
.into_iter()
|
||||||
|
.skip_while(move |&(ref k, _)| !k.starts_with(prefix))
|
||||||
|
.map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice()))
|
||||||
|
),
|
||||||
|
None => Box::new(None.into_iter()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn restore(&self, _new_db: &str) -> Result<(), Error> {
|
||||||
|
Err("Attempted to restore in-memory database".into())
|
||||||
|
}
|
||||||
|
}
|
15
util/kvdb-rocksdb/Cargo.toml
Normal file
15
util/kvdb-rocksdb/Cargo.toml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
[package]
|
||||||
|
name = "kvdb-rocksdb"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
log = "0.3"
|
||||||
|
elastic-array = "0.9"
|
||||||
|
ethcore-bigint = { path = "../bigint" }
|
||||||
|
ethcore-devtools = { path = "../../devtools" }
|
||||||
|
parking_lot = "0.4"
|
||||||
|
regex = "0.2"
|
||||||
|
rlp = { path = "../rlp" }
|
||||||
|
rocksdb = { git = "https://github.com/paritytech/rust-rocksdb" }
|
||||||
|
kvdb = { path = "../kvdb" }
|
805
util/kvdb-rocksdb/src/lib.rs
Normal file
805
util/kvdb-rocksdb/src/lib.rs
Normal file
@ -0,0 +1,805 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate log;
|
||||||
|
|
||||||
|
extern crate elastic_array;
|
||||||
|
extern crate parking_lot;
|
||||||
|
extern crate regex;
|
||||||
|
extern crate rocksdb;
|
||||||
|
|
||||||
|
extern crate ethcore_bigint as bigint;
|
||||||
|
extern crate ethcore_devtools as devtools;
|
||||||
|
extern crate kvdb;
|
||||||
|
extern crate rlp;
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
use std::path::{PathBuf, Path};
|
||||||
|
use std::{mem, fs, io};
|
||||||
|
|
||||||
|
use parking_lot::{Mutex, MutexGuard, RwLock};
|
||||||
|
use rocksdb::{
|
||||||
|
DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator,
|
||||||
|
Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache, Column, ReadOptions
|
||||||
|
};
|
||||||
|
|
||||||
|
use elastic_array::ElasticArray32;
|
||||||
|
use rlp::{UntrustedRlp, RlpType, Compressible};
|
||||||
|
use kvdb::{KeyValueDB, DBTransaction, DBValue, Error, DBOp};
|
||||||
|
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
use regex::Regex;
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
use std::process::Command;
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
use std::fs::File;
|
||||||
|
|
||||||
|
const DB_BACKGROUND_FLUSHES: i32 = 2;
|
||||||
|
const DB_BACKGROUND_COMPACTIONS: i32 = 2;
|
||||||
|
const DB_WRITE_BUFFER_SIZE: usize = 2048 * 1000;
|
||||||
|
|
||||||
|
enum KeyState {
|
||||||
|
Insert(DBValue),
|
||||||
|
InsertCompressed(DBValue),
|
||||||
|
Delete,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compaction profile for the database settings
|
||||||
|
#[derive(Clone, Copy, PartialEq, Debug)]
|
||||||
|
pub struct CompactionProfile {
|
||||||
|
/// L0-L1 target file size
|
||||||
|
pub initial_file_size: u64,
|
||||||
|
/// L2-LN target file size multiplier
|
||||||
|
pub file_size_multiplier: i32,
|
||||||
|
/// rate limiter for background flushes and compactions, bytes/sec, if any
|
||||||
|
pub write_rate_limit: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CompactionProfile {
|
||||||
|
/// Default profile suitable for most storage
|
||||||
|
fn default() -> CompactionProfile {
|
||||||
|
CompactionProfile::ssd()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Given output of df command return Linux rotational flag file path.
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
pub fn rotational_from_df_output(df_out: Vec<u8>) -> Option<PathBuf> {
|
||||||
|
use std::str;
|
||||||
|
str::from_utf8(df_out.as_slice())
|
||||||
|
.ok()
|
||||||
|
// Get the drive name.
|
||||||
|
.and_then(|df_str| Regex::new(r"/dev/(sd[:alpha:]{1,2})")
|
||||||
|
.ok()
|
||||||
|
.and_then(|re| re.captures(df_str))
|
||||||
|
.and_then(|captures| captures.get(1)))
|
||||||
|
// Generate path e.g. /sys/block/sda/queue/rotational
|
||||||
|
.map(|drive_path| {
|
||||||
|
let mut p = PathBuf::from("/sys/block");
|
||||||
|
p.push(drive_path.as_str());
|
||||||
|
p.push("queue/rotational");
|
||||||
|
p
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CompactionProfile {
|
||||||
|
/// Attempt to determine the best profile automatically, only Linux for now.
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
pub fn auto(db_path: &Path) -> CompactionProfile {
|
||||||
|
use std::io::Read;
|
||||||
|
let hdd_check_file = db_path
|
||||||
|
.to_str()
|
||||||
|
.and_then(|path_str| Command::new("df").arg(path_str).output().ok())
|
||||||
|
.and_then(|df_res| match df_res.status.success() {
|
||||||
|
true => Some(df_res.stdout),
|
||||||
|
false => None,
|
||||||
|
})
|
||||||
|
.and_then(rotational_from_df_output);
|
||||||
|
// Read out the file and match compaction profile.
|
||||||
|
if let Some(hdd_check) = hdd_check_file {
|
||||||
|
if let Ok(mut file) = File::open(hdd_check.as_path()) {
|
||||||
|
let mut buffer = [0; 1];
|
||||||
|
if file.read_exact(&mut buffer).is_ok() {
|
||||||
|
// 0 means not rotational.
|
||||||
|
if buffer == [48] { return Self::ssd(); }
|
||||||
|
// 1 means rotational.
|
||||||
|
if buffer == [49] { return Self::hdd(); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Fallback if drive type was not determined.
|
||||||
|
Self::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Just default for other platforms.
|
||||||
|
#[cfg(not(target_os = "linux"))]
|
||||||
|
pub fn auto(_db_path: &Path) -> CompactionProfile {
|
||||||
|
Self::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Default profile suitable for SSD storage
|
||||||
|
pub fn ssd() -> CompactionProfile {
|
||||||
|
CompactionProfile {
|
||||||
|
initial_file_size: 32 * 1024 * 1024,
|
||||||
|
file_size_multiplier: 2,
|
||||||
|
write_rate_limit: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Slow HDD compaction profile
|
||||||
|
pub fn hdd() -> CompactionProfile {
|
||||||
|
CompactionProfile {
|
||||||
|
initial_file_size: 192 * 1024 * 1024,
|
||||||
|
file_size_multiplier: 1,
|
||||||
|
write_rate_limit: Some(8 * 1024 * 1024),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Database configuration
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct DatabaseConfig {
|
||||||
|
/// Max number of open files.
|
||||||
|
pub max_open_files: i32,
|
||||||
|
/// Cache sizes (in MiB) for specific columns.
|
||||||
|
pub cache_sizes: HashMap<Option<u32>, usize>,
|
||||||
|
/// Compaction profile
|
||||||
|
pub compaction: CompactionProfile,
|
||||||
|
/// Set number of columns
|
||||||
|
pub columns: Option<u32>,
|
||||||
|
/// Should we keep WAL enabled?
|
||||||
|
pub wal: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DatabaseConfig {
|
||||||
|
/// Create new `DatabaseConfig` with default parameters and specified set of columns.
|
||||||
|
/// Note that cache sizes must be explicitly set.
|
||||||
|
pub fn with_columns(columns: Option<u32>) -> Self {
|
||||||
|
let mut config = Self::default();
|
||||||
|
config.columns = columns;
|
||||||
|
config
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the column cache size in MiB.
|
||||||
|
pub fn set_cache(&mut self, col: Option<u32>, size: usize) {
|
||||||
|
self.cache_sizes.insert(col, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for DatabaseConfig {
|
||||||
|
fn default() -> DatabaseConfig {
|
||||||
|
DatabaseConfig {
|
||||||
|
cache_sizes: HashMap::new(),
|
||||||
|
max_open_files: 512,
|
||||||
|
compaction: CompactionProfile::default(),
|
||||||
|
columns: None,
|
||||||
|
wal: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Database iterator (for flushed data only)
|
||||||
|
// The compromise of holding only a virtual borrow vs. holding a lock on the
|
||||||
|
// inner DB (to prevent closing via restoration) may be re-evaluated in the future.
|
||||||
|
//
|
||||||
|
pub struct DatabaseIterator<'a> {
|
||||||
|
iter: DBIterator,
|
||||||
|
_marker: PhantomData<&'a Database>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Iterator for DatabaseIterator<'a> {
|
||||||
|
type Item = (Box<[u8]>, Box<[u8]>);
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
self.iter.next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct DBAndColumns {
|
||||||
|
db: DB,
|
||||||
|
cfs: Vec<Column>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// get column family configuration from database config.
|
||||||
|
fn col_config(col: u32, config: &DatabaseConfig) -> Options {
|
||||||
|
// default cache size for columns not specified.
|
||||||
|
const DEFAULT_CACHE: usize = 2;
|
||||||
|
|
||||||
|
let mut opts = Options::new();
|
||||||
|
opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction);
|
||||||
|
opts.set_target_file_size_base(config.compaction.initial_file_size);
|
||||||
|
opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier);
|
||||||
|
opts.set_db_write_buffer_size(DB_WRITE_BUFFER_SIZE);
|
||||||
|
|
||||||
|
let col_opt = config.columns.map(|_| col);
|
||||||
|
|
||||||
|
{
|
||||||
|
let cache_size = config.cache_sizes.get(&col_opt).cloned().unwrap_or(DEFAULT_CACHE);
|
||||||
|
let mut block_opts = BlockBasedOptions::new();
|
||||||
|
// all goes to read cache.
|
||||||
|
block_opts.set_cache(Cache::new(cache_size * 1024 * 1024));
|
||||||
|
opts.set_block_based_table_factory(&block_opts);
|
||||||
|
}
|
||||||
|
|
||||||
|
opts
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Key-Value database.
|
||||||
|
pub struct Database {
|
||||||
|
db: RwLock<Option<DBAndColumns>>,
|
||||||
|
config: DatabaseConfig,
|
||||||
|
write_opts: WriteOptions,
|
||||||
|
read_opts: ReadOptions,
|
||||||
|
path: String,
|
||||||
|
// Dirty values added with `write_buffered`. Cleaned on `flush`.
|
||||||
|
overlay: RwLock<Vec<HashMap<ElasticArray32<u8>, KeyState>>>,
|
||||||
|
// Values currently being flushed. Cleared when `flush` completes.
|
||||||
|
flushing: RwLock<Vec<HashMap<ElasticArray32<u8>, KeyState>>>,
|
||||||
|
// Prevents concurrent flushes.
|
||||||
|
// Value indicates if a flush is in progress.
|
||||||
|
flushing_lock: Mutex<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Database {
|
||||||
|
/// Open database with default settings.
|
||||||
|
pub fn open_default(path: &str) -> Result<Database, String> {
|
||||||
|
Database::open(&DatabaseConfig::default(), path)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Open database file. Creates if it does not exist.
|
||||||
|
pub fn open(config: &DatabaseConfig, path: &str) -> Result<Database, String> {
|
||||||
|
let mut opts = Options::new();
|
||||||
|
if let Some(rate_limit) = config.compaction.write_rate_limit {
|
||||||
|
opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit))?;
|
||||||
|
}
|
||||||
|
opts.set_parsed_options(&format!("max_total_wal_size={}", 64 * 1024 * 1024))?;
|
||||||
|
opts.set_parsed_options("verify_checksums_in_compaction=0")?;
|
||||||
|
opts.set_parsed_options("keep_log_file_num=1")?;
|
||||||
|
opts.set_max_open_files(config.max_open_files);
|
||||||
|
opts.create_if_missing(true);
|
||||||
|
opts.set_use_fsync(false);
|
||||||
|
opts.set_db_write_buffer_size(DB_WRITE_BUFFER_SIZE);
|
||||||
|
|
||||||
|
opts.set_max_background_flushes(DB_BACKGROUND_FLUSHES);
|
||||||
|
opts.set_max_background_compactions(DB_BACKGROUND_COMPACTIONS);
|
||||||
|
|
||||||
|
// compaction settings
|
||||||
|
opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction);
|
||||||
|
opts.set_target_file_size_base(config.compaction.initial_file_size);
|
||||||
|
opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier);
|
||||||
|
|
||||||
|
let mut cf_options = Vec::with_capacity(config.columns.unwrap_or(0) as usize);
|
||||||
|
let cfnames: Vec<_> = (0..config.columns.unwrap_or(0)).map(|c| format!("col{}", c)).collect();
|
||||||
|
let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect();
|
||||||
|
|
||||||
|
for col in 0 .. config.columns.unwrap_or(0) {
|
||||||
|
cf_options.push(col_config(col, &config));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut write_opts = WriteOptions::new();
|
||||||
|
if !config.wal {
|
||||||
|
write_opts.disable_wal(true);
|
||||||
|
}
|
||||||
|
let mut read_opts = ReadOptions::new();
|
||||||
|
read_opts.set_verify_checksums(false);
|
||||||
|
|
||||||
|
let mut cfs: Vec<Column> = Vec::new();
|
||||||
|
let db = match config.columns {
|
||||||
|
Some(columns) => {
|
||||||
|
match DB::open_cf(&opts, path, &cfnames, &cf_options) {
|
||||||
|
Ok(db) => {
|
||||||
|
cfs = cfnames.iter().map(|n| db.cf_handle(n)
|
||||||
|
.expect("rocksdb opens a cf_handle for each cfname; qed")).collect();
|
||||||
|
assert!(cfs.len() == columns as usize);
|
||||||
|
Ok(db)
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
// retry and create CFs
|
||||||
|
match DB::open_cf(&opts, path, &[], &[]) {
|
||||||
|
Ok(mut db) => {
|
||||||
|
cfs = cfnames.iter().enumerate().map(|(i, n)| db.create_cf(n, &cf_options[i])).collect::<Result<_, _>>()?;
|
||||||
|
Ok(db)
|
||||||
|
},
|
||||||
|
err @ Err(_) => err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => DB::open(&opts, path)
|
||||||
|
};
|
||||||
|
|
||||||
|
let db = match db {
|
||||||
|
Ok(db) => db,
|
||||||
|
Err(ref s) if s.starts_with("Corruption:") => {
|
||||||
|
info!("{}", s);
|
||||||
|
info!("Attempting DB repair for {}", path);
|
||||||
|
DB::repair(&opts, path)?;
|
||||||
|
|
||||||
|
match cfnames.is_empty() {
|
||||||
|
true => DB::open(&opts, path)?,
|
||||||
|
false => DB::open_cf(&opts, path, &cfnames, &cf_options)?
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(s) => { return Err(s); }
|
||||||
|
};
|
||||||
|
let num_cols = cfs.len();
|
||||||
|
Ok(Database {
|
||||||
|
db: RwLock::new(Some(DBAndColumns{ db: db, cfs: cfs })),
|
||||||
|
config: config.clone(),
|
||||||
|
write_opts: write_opts,
|
||||||
|
overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()),
|
||||||
|
flushing: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()),
|
||||||
|
flushing_lock: Mutex::new((false)),
|
||||||
|
path: path.to_owned(),
|
||||||
|
read_opts: read_opts,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper to create new transaction for this database.
|
||||||
|
pub fn transaction(&self) -> DBTransaction {
|
||||||
|
DBTransaction::new()
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
fn to_overlay_column(col: Option<u32>) -> usize {
|
||||||
|
col.map_or(0, |c| (c + 1) as usize)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Commit transaction to database.
|
||||||
|
pub fn write_buffered(&self, tr: DBTransaction) {
|
||||||
|
let mut overlay = self.overlay.write();
|
||||||
|
let ops = tr.ops;
|
||||||
|
for op in ops {
|
||||||
|
match op {
|
||||||
|
DBOp::Insert { col, key, value } => {
|
||||||
|
let c = Self::to_overlay_column(col);
|
||||||
|
overlay[c].insert(key, KeyState::Insert(value));
|
||||||
|
},
|
||||||
|
DBOp::InsertCompressed { col, key, value } => {
|
||||||
|
let c = Self::to_overlay_column(col);
|
||||||
|
overlay[c].insert(key, KeyState::InsertCompressed(value));
|
||||||
|
},
|
||||||
|
DBOp::Delete { col, key } => {
|
||||||
|
let c = Self::to_overlay_column(col);
|
||||||
|
overlay[c].insert(key, KeyState::Delete);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Commit buffered changes to database. Must be called under `flush_lock`
|
||||||
|
fn write_flushing_with_lock(&self, _lock: &mut MutexGuard<bool>) -> Result<(), String> {
|
||||||
|
match *self.db.read() {
|
||||||
|
Some(DBAndColumns { ref db, ref cfs }) => {
|
||||||
|
let batch = WriteBatch::new();
|
||||||
|
mem::swap(&mut *self.overlay.write(), &mut *self.flushing.write());
|
||||||
|
{
|
||||||
|
for (c, column) in self.flushing.read().iter().enumerate() {
|
||||||
|
for (ref key, ref state) in column.iter() {
|
||||||
|
match **state {
|
||||||
|
KeyState::Delete => {
|
||||||
|
if c > 0 {
|
||||||
|
batch.delete_cf(cfs[c - 1], &key)?;
|
||||||
|
} else {
|
||||||
|
batch.delete(&key)?;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
KeyState::Insert(ref value) => {
|
||||||
|
if c > 0 {
|
||||||
|
batch.put_cf(cfs[c - 1], &key, value)?;
|
||||||
|
} else {
|
||||||
|
batch.put(&key, &value)?;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
KeyState::InsertCompressed(ref value) => {
|
||||||
|
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
|
||||||
|
if c > 0 {
|
||||||
|
batch.put_cf(cfs[c - 1], &key, &compressed)?;
|
||||||
|
} else {
|
||||||
|
batch.put(&key, &value)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
db.write_opt(batch, &self.write_opts)?;
|
||||||
|
for column in self.flushing.write().iter_mut() {
|
||||||
|
column.clear();
|
||||||
|
column.shrink_to_fit();
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
None => Err("Database is closed".to_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Commit buffered changes to database.
|
||||||
|
pub fn flush(&self) -> Result<(), String> {
|
||||||
|
let mut lock = self.flushing_lock.lock();
|
||||||
|
// If RocksDB batch allocation fails the thread gets terminated and the lock is released.
|
||||||
|
// The value inside the lock is used to detect that.
|
||||||
|
if *lock {
|
||||||
|
// This can only happen if another flushing thread is terminated unexpectedly.
|
||||||
|
return Err("Database write failure. Running low on memory perhaps?".to_owned());
|
||||||
|
}
|
||||||
|
*lock = true;
|
||||||
|
let result = self.write_flushing_with_lock(&mut lock);
|
||||||
|
*lock = false;
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Commit transaction to database.
|
||||||
|
pub fn write(&self, tr: DBTransaction) -> Result<(), String> {
|
||||||
|
match *self.db.read() {
|
||||||
|
Some(DBAndColumns { ref db, ref cfs }) => {
|
||||||
|
let batch = WriteBatch::new();
|
||||||
|
let ops = tr.ops;
|
||||||
|
for op in ops {
|
||||||
|
match op {
|
||||||
|
DBOp::Insert { col, key, value } => {
|
||||||
|
col.map_or_else(|| batch.put(&key, &value), |c| batch.put_cf(cfs[c as usize], &key, &value))?
|
||||||
|
},
|
||||||
|
DBOp::InsertCompressed { col, key, value } => {
|
||||||
|
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
|
||||||
|
col.map_or_else(|| batch.put(&key, &compressed), |c| batch.put_cf(cfs[c as usize], &key, &compressed))?
|
||||||
|
},
|
||||||
|
DBOp::Delete { col, key } => {
|
||||||
|
col.map_or_else(|| batch.delete(&key), |c| batch.delete_cf(cfs[c as usize], &key))?
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
db.write_opt(batch, &self.write_opts)
|
||||||
|
},
|
||||||
|
None => Err("Database is closed".to_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get value by key.
|
||||||
|
pub fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
|
||||||
|
match *self.db.read() {
|
||||||
|
Some(DBAndColumns { ref db, ref cfs }) => {
|
||||||
|
let overlay = &self.overlay.read()[Self::to_overlay_column(col)];
|
||||||
|
match overlay.get(key) {
|
||||||
|
Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())),
|
||||||
|
Some(&KeyState::Delete) => Ok(None),
|
||||||
|
None => {
|
||||||
|
let flushing = &self.flushing.read()[Self::to_overlay_column(col)];
|
||||||
|
match flushing.get(key) {
|
||||||
|
Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())),
|
||||||
|
Some(&KeyState::Delete) => Ok(None),
|
||||||
|
None => {
|
||||||
|
col.map_or_else(
|
||||||
|
|| db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))),
|
||||||
|
|c| db.get_cf_opt(cfs[c as usize], key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))))
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values.
|
||||||
|
// TODO: support prefix seek for unflushed data
|
||||||
|
pub fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
|
||||||
|
self.iter_from_prefix(col, prefix).and_then(|mut iter| {
|
||||||
|
match iter.next() {
|
||||||
|
// TODO: use prefix_same_as_start read option (not availabele in C API currently)
|
||||||
|
Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None },
|
||||||
|
_ => None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get database iterator for flushed data.
|
||||||
|
pub fn iter(&self, col: Option<u32>) -> Option<DatabaseIterator> {
|
||||||
|
//TODO: iterate over overlay
|
||||||
|
match *self.db.read() {
|
||||||
|
Some(DBAndColumns { ref db, ref cfs }) => {
|
||||||
|
let iter = col.map_or_else(
|
||||||
|
|| db.iterator_opt(IteratorMode::Start, &self.read_opts),
|
||||||
|
|c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::Start, &self.read_opts)
|
||||||
|
.expect("iterator params are valid; qed")
|
||||||
|
);
|
||||||
|
|
||||||
|
Some(DatabaseIterator {
|
||||||
|
iter: iter,
|
||||||
|
_marker: PhantomData,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter_from_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<DatabaseIterator> {
|
||||||
|
match *self.db.read() {
|
||||||
|
Some(DBAndColumns { ref db, ref cfs }) => {
|
||||||
|
let iter = col.map_or_else(|| db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts),
|
||||||
|
|c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward), &self.read_opts)
|
||||||
|
.expect("iterator params are valid; qed"));
|
||||||
|
|
||||||
|
Some(DatabaseIterator {
|
||||||
|
iter: iter,
|
||||||
|
_marker: PhantomData,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Close the database
|
||||||
|
fn close(&self) {
|
||||||
|
*self.db.write() = None;
|
||||||
|
self.overlay.write().clear();
|
||||||
|
self.flushing.write().clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Restore the database from a copy at given path.
|
||||||
|
pub fn restore(&self, new_db: &str) -> Result<(), Error> {
|
||||||
|
self.close();
|
||||||
|
|
||||||
|
let mut backup_db = PathBuf::from(&self.path);
|
||||||
|
backup_db.pop();
|
||||||
|
backup_db.push("backup_db");
|
||||||
|
|
||||||
|
let existed = match fs::rename(&self.path, &backup_db) {
|
||||||
|
Ok(_) => true,
|
||||||
|
Err(e) => if let io::ErrorKind::NotFound = e.kind() {
|
||||||
|
false
|
||||||
|
} else {
|
||||||
|
return Err(e.into());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match fs::rename(&new_db, &self.path) {
|
||||||
|
Ok(_) => {
|
||||||
|
// clean up the backup.
|
||||||
|
if existed {
|
||||||
|
fs::remove_dir_all(&backup_db)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// restore the backup.
|
||||||
|
if existed {
|
||||||
|
fs::rename(&backup_db, &self.path)?;
|
||||||
|
}
|
||||||
|
return Err(e.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// reopen the database and steal handles into self
|
||||||
|
let db = Self::open(&self.config, &self.path)?;
|
||||||
|
*self.db.write() = mem::replace(&mut *db.db.write(), None);
|
||||||
|
*self.overlay.write() = mem::replace(&mut *db.overlay.write(), Vec::new());
|
||||||
|
*self.flushing.write() = mem::replace(&mut *db.flushing.write(), Vec::new());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The number of non-default column families.
|
||||||
|
pub fn num_columns(&self) -> u32 {
|
||||||
|
self.db.read().as_ref()
|
||||||
|
.and_then(|db| if db.cfs.is_empty() { None } else { Some(db.cfs.len()) } )
|
||||||
|
.map(|n| n as u32)
|
||||||
|
.unwrap_or(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Drop a column family.
|
||||||
|
pub fn drop_column(&self) -> Result<(), String> {
|
||||||
|
match *self.db.write() {
|
||||||
|
Some(DBAndColumns { ref mut db, ref mut cfs }) => {
|
||||||
|
if let Some(col) = cfs.pop() {
|
||||||
|
let name = format!("col{}", cfs.len());
|
||||||
|
drop(col);
|
||||||
|
db.drop_cf(&name)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
None => Ok(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add a column family.
|
||||||
|
pub fn add_column(&self) -> Result<(), String> {
|
||||||
|
match *self.db.write() {
|
||||||
|
Some(DBAndColumns { ref mut db, ref mut cfs }) => {
|
||||||
|
let col = cfs.len() as u32;
|
||||||
|
let name = format!("col{}", col);
|
||||||
|
cfs.push(db.create_cf(&name, &col_config(col, &self.config))?);
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
None => Ok(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// duplicate declaration of methods here to avoid trait import in certain existing cases
|
||||||
|
// at time of addition.
|
||||||
|
impl KeyValueDB for Database {
|
||||||
|
fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
|
||||||
|
Database::get(self, col, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
|
||||||
|
Database::get_by_prefix(self, col, prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_buffered(&self, transaction: DBTransaction) {
|
||||||
|
Database::write_buffered(self, transaction)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write(&self, transaction: DBTransaction) -> Result<(), String> {
|
||||||
|
Database::write(self, transaction)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&self) -> Result<(), String> {
|
||||||
|
Database::flush(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a> {
|
||||||
|
let unboxed = Database::iter(self, col);
|
||||||
|
Box::new(unboxed.into_iter().flat_map(|inner| inner))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
|
||||||
|
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>
|
||||||
|
{
|
||||||
|
let unboxed = Database::iter_from_prefix(self, col, prefix);
|
||||||
|
Box::new(unboxed.into_iter().flat_map(|inner| inner))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn restore(&self, new_db: &str) -> Result<(), Error> {
|
||||||
|
Database::restore(self, new_db)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for Database {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// write all buffered changes if we can.
|
||||||
|
let _ = self.flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use bigint::hash::H256;
|
||||||
|
use super::*;
|
||||||
|
use devtools::*;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
fn test_db(config: &DatabaseConfig) {
|
||||||
|
let path = RandomTempPath::create_dir();
|
||||||
|
let db = Database::open(config, path.as_path().to_str().unwrap()).unwrap();
|
||||||
|
let key1 = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
|
||||||
|
let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
|
||||||
|
let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
|
||||||
|
|
||||||
|
let mut batch = db.transaction();
|
||||||
|
batch.put(None, &key1, b"cat");
|
||||||
|
batch.put(None, &key2, b"dog");
|
||||||
|
db.write(batch).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"cat");
|
||||||
|
|
||||||
|
let contents: Vec<_> = db.iter(None).into_iter().flat_map(|inner| inner).collect();
|
||||||
|
assert_eq!(contents.len(), 2);
|
||||||
|
assert_eq!(&*contents[0].0, &*key1);
|
||||||
|
assert_eq!(&*contents[0].1, b"cat");
|
||||||
|
assert_eq!(&*contents[1].0, &*key2);
|
||||||
|
assert_eq!(&*contents[1].1, b"dog");
|
||||||
|
|
||||||
|
let mut batch = db.transaction();
|
||||||
|
batch.delete(None, &key1);
|
||||||
|
db.write(batch).unwrap();
|
||||||
|
|
||||||
|
assert!(db.get(None, &key1).unwrap().is_none());
|
||||||
|
|
||||||
|
let mut batch = db.transaction();
|
||||||
|
batch.put(None, &key1, b"cat");
|
||||||
|
db.write(batch).unwrap();
|
||||||
|
|
||||||
|
let mut transaction = db.transaction();
|
||||||
|
transaction.put(None, &key3, b"elephant");
|
||||||
|
transaction.delete(None, &key1);
|
||||||
|
db.write(transaction).unwrap();
|
||||||
|
assert!(db.get(None, &key1).unwrap().is_none());
|
||||||
|
assert_eq!(&*db.get(None, &key3).unwrap().unwrap(), b"elephant");
|
||||||
|
|
||||||
|
assert_eq!(&*db.get_by_prefix(None, &key3).unwrap(), b"elephant");
|
||||||
|
assert_eq!(&*db.get_by_prefix(None, &key2).unwrap(), b"dog");
|
||||||
|
|
||||||
|
let mut transaction = db.transaction();
|
||||||
|
transaction.put(None, &key1, b"horse");
|
||||||
|
transaction.delete(None, &key3);
|
||||||
|
db.write_buffered(transaction);
|
||||||
|
assert!(db.get(None, &key3).unwrap().is_none());
|
||||||
|
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
|
||||||
|
|
||||||
|
db.flush().unwrap();
|
||||||
|
assert!(db.get(None, &key3).unwrap().is_none());
|
||||||
|
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn kvdb() {
|
||||||
|
let path = RandomTempPath::create_dir();
|
||||||
|
let _ = Database::open_default(path.as_path().to_str().unwrap()).unwrap();
|
||||||
|
test_db(&DatabaseConfig::default());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
fn df_to_rotational() {
|
||||||
|
use std::path::PathBuf;
|
||||||
|
// Example df output.
|
||||||
|
let example_df = vec![70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10];
|
||||||
|
let expected_output = Some(PathBuf::from("/sys/block/sda/queue/rotational"));
|
||||||
|
assert_eq!(rotational_from_df_output(example_df), expected_output);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn add_columns() {
|
||||||
|
let config = DatabaseConfig::default();
|
||||||
|
let config_5 = DatabaseConfig::with_columns(Some(5));
|
||||||
|
|
||||||
|
let path = RandomTempPath::create_dir();
|
||||||
|
|
||||||
|
// open empty, add 5.
|
||||||
|
{
|
||||||
|
let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap();
|
||||||
|
assert_eq!(db.num_columns(), 0);
|
||||||
|
|
||||||
|
for i in 0..5 {
|
||||||
|
db.add_column().unwrap();
|
||||||
|
assert_eq!(db.num_columns(), i + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// reopen as 5.
|
||||||
|
{
|
||||||
|
let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap();
|
||||||
|
assert_eq!(db.num_columns(), 5);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn drop_columns() {
|
||||||
|
let config = DatabaseConfig::default();
|
||||||
|
let config_5 = DatabaseConfig::with_columns(Some(5));
|
||||||
|
|
||||||
|
let path = RandomTempPath::create_dir();
|
||||||
|
|
||||||
|
// open 5, remove all.
|
||||||
|
{
|
||||||
|
let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap();
|
||||||
|
assert_eq!(db.num_columns(), 5);
|
||||||
|
|
||||||
|
for i in (0..5).rev() {
|
||||||
|
db.drop_column().unwrap();
|
||||||
|
assert_eq!(db.num_columns(), i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// reopen as 0.
|
||||||
|
{
|
||||||
|
let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap();
|
||||||
|
assert_eq!(db.num_columns(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -4,14 +4,6 @@ version = "0.1.0"
|
|||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.3"
|
|
||||||
ethcore-bytes = { path = "../bytes" }
|
|
||||||
ethcore-bigint = { path = "../bigint" }
|
|
||||||
ethcore-devtools = { path = "../../devtools" }
|
|
||||||
elastic-array = "0.9"
|
elastic-array = "0.9"
|
||||||
hashdb = { path = "../hashdb" }
|
|
||||||
parking_lot = "0.4"
|
|
||||||
regex = "0.2"
|
|
||||||
rlp = { path = "../rlp" }
|
|
||||||
rocksdb = { git = "https://github.com/paritytech/rust-rocksdb" }
|
|
||||||
error-chain = "0.11.0-rc.2"
|
error-chain = "0.11.0-rc.2"
|
||||||
|
ethcore-bytes = { path = "../bytes" }
|
||||||
|
@ -16,48 +16,21 @@
|
|||||||
|
|
||||||
//! Key-Value store abstraction with `RocksDB` backend.
|
//! Key-Value store abstraction with `RocksDB` backend.
|
||||||
|
|
||||||
#[macro_use]
|
|
||||||
extern crate log;
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate error_chain;
|
extern crate error_chain;
|
||||||
|
|
||||||
extern crate ethcore_bytes as bytes;
|
|
||||||
extern crate ethcore_bigint as bigint;
|
|
||||||
extern crate ethcore_devtools as devtools;
|
|
||||||
extern crate elastic_array;
|
extern crate elastic_array;
|
||||||
extern crate hashdb;
|
extern crate ethcore_bytes as bytes;
|
||||||
extern crate parking_lot;
|
|
||||||
extern crate rlp;
|
|
||||||
extern crate rocksdb;
|
|
||||||
extern crate regex;
|
|
||||||
|
|
||||||
use std::{mem, fs, io};
|
use std::io;
|
||||||
use std::collections::{HashMap, BTreeMap};
|
use elastic_array::{ElasticArray128, ElasticArray32};
|
||||||
use std::marker::PhantomData;
|
|
||||||
use std::path::{PathBuf, Path};
|
|
||||||
use parking_lot::{Mutex, MutexGuard, RwLock};
|
|
||||||
|
|
||||||
use elastic_array::*;
|
|
||||||
use hashdb::DBValue;
|
|
||||||
use rlp::{UntrustedRlp, RlpType, Compressible};
|
|
||||||
use rocksdb::{DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator,
|
|
||||||
Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache, Column, ReadOptions};
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
use regex::Regex;
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
use std::process::Command;
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
use std::fs::File;
|
|
||||||
|
|
||||||
const DB_BACKGROUND_FLUSHES: i32 = 2;
|
|
||||||
const DB_BACKGROUND_COMPACTIONS: i32 = 2;
|
|
||||||
const DB_WRITE_BUFFER_SIZE: usize = 2048 * 1000;
|
|
||||||
|
|
||||||
/// Required length of prefixes.
|
/// Required length of prefixes.
|
||||||
pub const PREFIX_LEN: usize = 12;
|
pub const PREFIX_LEN: usize = 12;
|
||||||
|
|
||||||
|
/// Database value.
|
||||||
|
pub type DBValue = ElasticArray128<u8>;
|
||||||
|
|
||||||
error_chain! {
|
error_chain! {
|
||||||
types {
|
types {
|
||||||
Error, ErrorKind, ResultExt;
|
Error, ErrorKind, ResultExt;
|
||||||
@ -71,11 +44,13 @@ error_chain! {
|
|||||||
/// Write transaction. Batches a sequence of put/delete operations for efficiency.
|
/// Write transaction. Batches a sequence of put/delete operations for efficiency.
|
||||||
#[derive(Default, Clone, PartialEq)]
|
#[derive(Default, Clone, PartialEq)]
|
||||||
pub struct DBTransaction {
|
pub struct DBTransaction {
|
||||||
ops: Vec<DBOp>,
|
/// Database operations.
|
||||||
|
pub ops: Vec<DBOp>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Database operation.
|
||||||
#[derive(Clone, PartialEq)]
|
#[derive(Clone, PartialEq)]
|
||||||
enum DBOp {
|
pub enum DBOp {
|
||||||
Insert {
|
Insert {
|
||||||
col: Option<u32>,
|
col: Option<u32>,
|
||||||
key: ElasticArray32<u8>,
|
key: ElasticArray32<u8>,
|
||||||
@ -150,12 +125,6 @@ impl DBTransaction {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum KeyState {
|
|
||||||
Insert(DBValue),
|
|
||||||
InsertCompressed(DBValue),
|
|
||||||
Delete,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generic key-value database.
|
/// Generic key-value database.
|
||||||
///
|
///
|
||||||
/// This makes a distinction between "buffered" and "flushed" values. Values which have been
|
/// This makes a distinction between "buffered" and "flushed" values. Values which have been
|
||||||
@ -206,847 +175,3 @@ pub trait KeyValueDB: Sync + Send {
|
|||||||
/// Attempt to replace this database with a new one located at the given path.
|
/// Attempt to replace this database with a new one located at the given path.
|
||||||
fn restore(&self, new_db: &str) -> Result<(), Error>;
|
fn restore(&self, new_db: &str) -> Result<(), Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A key-value database fulfilling the `KeyValueDB` trait, living in memory.
|
|
||||||
/// This is generally intended for tests and is not particularly optimized.
|
|
||||||
pub struct InMemory {
|
|
||||||
columns: RwLock<HashMap<Option<u32>, BTreeMap<Vec<u8>, DBValue>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create an in-memory database with the given number of columns.
|
|
||||||
/// Columns will be indexable by 0..`num_cols`
|
|
||||||
pub fn in_memory(num_cols: u32) -> InMemory {
|
|
||||||
let mut cols = HashMap::new();
|
|
||||||
cols.insert(None, BTreeMap::new());
|
|
||||||
|
|
||||||
for idx in 0..num_cols {
|
|
||||||
cols.insert(Some(idx), BTreeMap::new());
|
|
||||||
}
|
|
||||||
|
|
||||||
InMemory {
|
|
||||||
columns: RwLock::new(cols)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyValueDB for InMemory {
|
|
||||||
fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
|
|
||||||
let columns = self.columns.read();
|
|
||||||
match columns.get(&col) {
|
|
||||||
None => Err(format!("No such column family: {:?}", col)),
|
|
||||||
Some(map) => Ok(map.get(key).cloned()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
|
|
||||||
let columns = self.columns.read();
|
|
||||||
match columns.get(&col) {
|
|
||||||
None => None,
|
|
||||||
Some(map) =>
|
|
||||||
map.iter()
|
|
||||||
.find(|&(ref k ,_)| k.starts_with(prefix))
|
|
||||||
.map(|(_, v)| v.to_vec().into_boxed_slice())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_buffered(&self, transaction: DBTransaction) {
|
|
||||||
let mut columns = self.columns.write();
|
|
||||||
let ops = transaction.ops;
|
|
||||||
for op in ops {
|
|
||||||
match op {
|
|
||||||
DBOp::Insert { col, key, value } => {
|
|
||||||
if let Some(mut col) = columns.get_mut(&col) {
|
|
||||||
col.insert(key.into_vec(), value);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
DBOp::InsertCompressed { col, key, value } => {
|
|
||||||
if let Some(mut col) = columns.get_mut(&col) {
|
|
||||||
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
|
|
||||||
let mut value = DBValue::new();
|
|
||||||
value.append_slice(&compressed);
|
|
||||||
col.insert(key.into_vec(), value);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
DBOp::Delete { col, key } => {
|
|
||||||
if let Some(mut col) = columns.get_mut(&col) {
|
|
||||||
col.remove(&*key);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn flush(&self) -> Result<(), String> { Ok(()) }
|
|
||||||
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a> {
|
|
||||||
match self.columns.read().get(&col) {
|
|
||||||
Some(map) => Box::new( // TODO: worth optimizing at all?
|
|
||||||
map.clone()
|
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice()))
|
|
||||||
),
|
|
||||||
None => Box::new(None.into_iter()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
|
|
||||||
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>
|
|
||||||
{
|
|
||||||
match self.columns.read().get(&col) {
|
|
||||||
Some(map) => Box::new(
|
|
||||||
map.clone()
|
|
||||||
.into_iter()
|
|
||||||
.skip_while(move |&(ref k, _)| !k.starts_with(prefix))
|
|
||||||
.map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice()))
|
|
||||||
),
|
|
||||||
None => Box::new(None.into_iter()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore(&self, _new_db: &str) -> Result<(), Error> {
|
|
||||||
Err("Attempted to restore in-memory database".into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compaction profile for the database settings
|
|
||||||
#[derive(Clone, Copy, PartialEq, Debug)]
|
|
||||||
pub struct CompactionProfile {
|
|
||||||
/// L0-L1 target file size
|
|
||||||
pub initial_file_size: u64,
|
|
||||||
/// L2-LN target file size multiplier
|
|
||||||
pub file_size_multiplier: i32,
|
|
||||||
/// rate limiter for background flushes and compactions, bytes/sec, if any
|
|
||||||
pub write_rate_limit: Option<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for CompactionProfile {
|
|
||||||
/// Default profile suitable for most storage
|
|
||||||
fn default() -> CompactionProfile {
|
|
||||||
CompactionProfile::ssd()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Given output of df command return Linux rotational flag file path.
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
pub fn rotational_from_df_output(df_out: Vec<u8>) -> Option<PathBuf> {
|
|
||||||
use std::str;
|
|
||||||
str::from_utf8(df_out.as_slice())
|
|
||||||
.ok()
|
|
||||||
// Get the drive name.
|
|
||||||
.and_then(|df_str| Regex::new(r"/dev/(sd[:alpha:]{1,2})")
|
|
||||||
.ok()
|
|
||||||
.and_then(|re| re.captures(df_str))
|
|
||||||
.and_then(|captures| captures.get(1)))
|
|
||||||
// Generate path e.g. /sys/block/sda/queue/rotational
|
|
||||||
.map(|drive_path| {
|
|
||||||
let mut p = PathBuf::from("/sys/block");
|
|
||||||
p.push(drive_path.as_str());
|
|
||||||
p.push("queue/rotational");
|
|
||||||
p
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CompactionProfile {
|
|
||||||
/// Attempt to determine the best profile automatically, only Linux for now.
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
pub fn auto(db_path: &Path) -> CompactionProfile {
|
|
||||||
use std::io::Read;
|
|
||||||
let hdd_check_file = db_path
|
|
||||||
.to_str()
|
|
||||||
.and_then(|path_str| Command::new("df").arg(path_str).output().ok())
|
|
||||||
.and_then(|df_res| match df_res.status.success() {
|
|
||||||
true => Some(df_res.stdout),
|
|
||||||
false => None,
|
|
||||||
})
|
|
||||||
.and_then(rotational_from_df_output);
|
|
||||||
// Read out the file and match compaction profile.
|
|
||||||
if let Some(hdd_check) = hdd_check_file {
|
|
||||||
if let Ok(mut file) = File::open(hdd_check.as_path()) {
|
|
||||||
let mut buffer = [0; 1];
|
|
||||||
if file.read_exact(&mut buffer).is_ok() {
|
|
||||||
// 0 means not rotational.
|
|
||||||
if buffer == [48] { return Self::ssd(); }
|
|
||||||
// 1 means rotational.
|
|
||||||
if buffer == [49] { return Self::hdd(); }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Fallback if drive type was not determined.
|
|
||||||
Self::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Just default for other platforms.
|
|
||||||
#[cfg(not(target_os = "linux"))]
|
|
||||||
pub fn auto(_db_path: &Path) -> CompactionProfile {
|
|
||||||
Self::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Default profile suitable for SSD storage
|
|
||||||
pub fn ssd() -> CompactionProfile {
|
|
||||||
CompactionProfile {
|
|
||||||
initial_file_size: 32 * 1024 * 1024,
|
|
||||||
file_size_multiplier: 2,
|
|
||||||
write_rate_limit: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Slow HDD compaction profile
|
|
||||||
pub fn hdd() -> CompactionProfile {
|
|
||||||
CompactionProfile {
|
|
||||||
initial_file_size: 192 * 1024 * 1024,
|
|
||||||
file_size_multiplier: 1,
|
|
||||||
write_rate_limit: Some(8 * 1024 * 1024),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Database configuration
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct DatabaseConfig {
|
|
||||||
/// Max number of open files.
|
|
||||||
pub max_open_files: i32,
|
|
||||||
/// Cache sizes (in MiB) for specific columns.
|
|
||||||
pub cache_sizes: HashMap<Option<u32>, usize>,
|
|
||||||
/// Compaction profile
|
|
||||||
pub compaction: CompactionProfile,
|
|
||||||
/// Set number of columns
|
|
||||||
pub columns: Option<u32>,
|
|
||||||
/// Should we keep WAL enabled?
|
|
||||||
pub wal: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DatabaseConfig {
|
|
||||||
/// Create new `DatabaseConfig` with default parameters and specified set of columns.
|
|
||||||
/// Note that cache sizes must be explicitly set.
|
|
||||||
pub fn with_columns(columns: Option<u32>) -> Self {
|
|
||||||
let mut config = Self::default();
|
|
||||||
config.columns = columns;
|
|
||||||
config
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the column cache size in MiB.
|
|
||||||
pub fn set_cache(&mut self, col: Option<u32>, size: usize) {
|
|
||||||
self.cache_sizes.insert(col, size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for DatabaseConfig {
|
|
||||||
fn default() -> DatabaseConfig {
|
|
||||||
DatabaseConfig {
|
|
||||||
cache_sizes: HashMap::new(),
|
|
||||||
max_open_files: 512,
|
|
||||||
compaction: CompactionProfile::default(),
|
|
||||||
columns: None,
|
|
||||||
wal: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Database iterator (for flushed data only)
|
|
||||||
// The compromise of holding only a virtual borrow vs. holding a lock on the
|
|
||||||
// inner DB (to prevent closing via restoration) may be re-evaluated in the future.
|
|
||||||
//
|
|
||||||
pub struct DatabaseIterator<'a> {
|
|
||||||
iter: DBIterator,
|
|
||||||
_marker: PhantomData<&'a Database>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Iterator for DatabaseIterator<'a> {
|
|
||||||
type Item = (Box<[u8]>, Box<[u8]>);
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
self.iter.next()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct DBAndColumns {
|
|
||||||
db: DB,
|
|
||||||
cfs: Vec<Column>,
|
|
||||||
}
|
|
||||||
|
|
||||||
// get column family configuration from database config.
|
|
||||||
fn col_config(col: u32, config: &DatabaseConfig) -> Options {
|
|
||||||
// default cache size for columns not specified.
|
|
||||||
const DEFAULT_CACHE: usize = 2;
|
|
||||||
|
|
||||||
let mut opts = Options::new();
|
|
||||||
opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction);
|
|
||||||
opts.set_target_file_size_base(config.compaction.initial_file_size);
|
|
||||||
opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier);
|
|
||||||
opts.set_db_write_buffer_size(DB_WRITE_BUFFER_SIZE);
|
|
||||||
|
|
||||||
let col_opt = config.columns.map(|_| col);
|
|
||||||
|
|
||||||
{
|
|
||||||
let cache_size = config.cache_sizes.get(&col_opt).cloned().unwrap_or(DEFAULT_CACHE);
|
|
||||||
let mut block_opts = BlockBasedOptions::new();
|
|
||||||
// all goes to read cache.
|
|
||||||
block_opts.set_cache(Cache::new(cache_size * 1024 * 1024));
|
|
||||||
opts.set_block_based_table_factory(&block_opts);
|
|
||||||
}
|
|
||||||
|
|
||||||
opts
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Key-Value database.
|
|
||||||
pub struct Database {
|
|
||||||
db: RwLock<Option<DBAndColumns>>,
|
|
||||||
config: DatabaseConfig,
|
|
||||||
write_opts: WriteOptions,
|
|
||||||
read_opts: ReadOptions,
|
|
||||||
path: String,
|
|
||||||
// Dirty values added with `write_buffered`. Cleaned on `flush`.
|
|
||||||
overlay: RwLock<Vec<HashMap<ElasticArray32<u8>, KeyState>>>,
|
|
||||||
// Values currently being flushed. Cleared when `flush` completes.
|
|
||||||
flushing: RwLock<Vec<HashMap<ElasticArray32<u8>, KeyState>>>,
|
|
||||||
// Prevents concurrent flushes.
|
|
||||||
// Value indicates if a flush is in progress.
|
|
||||||
flushing_lock: Mutex<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Database {
|
|
||||||
/// Open database with default settings.
|
|
||||||
pub fn open_default(path: &str) -> Result<Database, String> {
|
|
||||||
Database::open(&DatabaseConfig::default(), path)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Open database file. Creates if it does not exist.
|
|
||||||
pub fn open(config: &DatabaseConfig, path: &str) -> Result<Database, String> {
|
|
||||||
let mut opts = Options::new();
|
|
||||||
if let Some(rate_limit) = config.compaction.write_rate_limit {
|
|
||||||
opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit))?;
|
|
||||||
}
|
|
||||||
opts.set_parsed_options(&format!("max_total_wal_size={}", 64 * 1024 * 1024))?;
|
|
||||||
opts.set_parsed_options("verify_checksums_in_compaction=0")?;
|
|
||||||
opts.set_parsed_options("keep_log_file_num=1")?;
|
|
||||||
opts.set_max_open_files(config.max_open_files);
|
|
||||||
opts.create_if_missing(true);
|
|
||||||
opts.set_use_fsync(false);
|
|
||||||
opts.set_db_write_buffer_size(DB_WRITE_BUFFER_SIZE);
|
|
||||||
|
|
||||||
opts.set_max_background_flushes(DB_BACKGROUND_FLUSHES);
|
|
||||||
opts.set_max_background_compactions(DB_BACKGROUND_COMPACTIONS);
|
|
||||||
|
|
||||||
// compaction settings
|
|
||||||
opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction);
|
|
||||||
opts.set_target_file_size_base(config.compaction.initial_file_size);
|
|
||||||
opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier);
|
|
||||||
|
|
||||||
let mut cf_options = Vec::with_capacity(config.columns.unwrap_or(0) as usize);
|
|
||||||
let cfnames: Vec<_> = (0..config.columns.unwrap_or(0)).map(|c| format!("col{}", c)).collect();
|
|
||||||
let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect();
|
|
||||||
|
|
||||||
for col in 0 .. config.columns.unwrap_or(0) {
|
|
||||||
cf_options.push(col_config(col, &config));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut write_opts = WriteOptions::new();
|
|
||||||
if !config.wal {
|
|
||||||
write_opts.disable_wal(true);
|
|
||||||
}
|
|
||||||
let mut read_opts = ReadOptions::new();
|
|
||||||
read_opts.set_verify_checksums(false);
|
|
||||||
|
|
||||||
let mut cfs: Vec<Column> = Vec::new();
|
|
||||||
let db = match config.columns {
|
|
||||||
Some(columns) => {
|
|
||||||
match DB::open_cf(&opts, path, &cfnames, &cf_options) {
|
|
||||||
Ok(db) => {
|
|
||||||
cfs = cfnames.iter().map(|n| db.cf_handle(n)
|
|
||||||
.expect("rocksdb opens a cf_handle for each cfname; qed")).collect();
|
|
||||||
assert!(cfs.len() == columns as usize);
|
|
||||||
Ok(db)
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
// retry and create CFs
|
|
||||||
match DB::open_cf(&opts, path, &[], &[]) {
|
|
||||||
Ok(mut db) => {
|
|
||||||
cfs = cfnames.iter().enumerate().map(|(i, n)| db.create_cf(n, &cf_options[i])).collect::<Result<_, _>>()?;
|
|
||||||
Ok(db)
|
|
||||||
},
|
|
||||||
err @ Err(_) => err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
None => DB::open(&opts, path)
|
|
||||||
};
|
|
||||||
|
|
||||||
let db = match db {
|
|
||||||
Ok(db) => db,
|
|
||||||
Err(ref s) if s.starts_with("Corruption:") => {
|
|
||||||
info!("{}", s);
|
|
||||||
info!("Attempting DB repair for {}", path);
|
|
||||||
DB::repair(&opts, path)?;
|
|
||||||
|
|
||||||
match cfnames.is_empty() {
|
|
||||||
true => DB::open(&opts, path)?,
|
|
||||||
false => DB::open_cf(&opts, path, &cfnames, &cf_options)?
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(s) => { return Err(s); }
|
|
||||||
};
|
|
||||||
let num_cols = cfs.len();
|
|
||||||
Ok(Database {
|
|
||||||
db: RwLock::new(Some(DBAndColumns{ db: db, cfs: cfs })),
|
|
||||||
config: config.clone(),
|
|
||||||
write_opts: write_opts,
|
|
||||||
overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()),
|
|
||||||
flushing: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()),
|
|
||||||
flushing_lock: Mutex::new((false)),
|
|
||||||
path: path.to_owned(),
|
|
||||||
read_opts: read_opts,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper to create new transaction for this database.
|
|
||||||
pub fn transaction(&self) -> DBTransaction {
|
|
||||||
DBTransaction::new()
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
fn to_overlay_column(col: Option<u32>) -> usize {
|
|
||||||
col.map_or(0, |c| (c + 1) as usize)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Commit transaction to database.
|
|
||||||
pub fn write_buffered(&self, tr: DBTransaction) {
|
|
||||||
let mut overlay = self.overlay.write();
|
|
||||||
let ops = tr.ops;
|
|
||||||
for op in ops {
|
|
||||||
match op {
|
|
||||||
DBOp::Insert { col, key, value } => {
|
|
||||||
let c = Self::to_overlay_column(col);
|
|
||||||
overlay[c].insert(key, KeyState::Insert(value));
|
|
||||||
},
|
|
||||||
DBOp::InsertCompressed { col, key, value } => {
|
|
||||||
let c = Self::to_overlay_column(col);
|
|
||||||
overlay[c].insert(key, KeyState::InsertCompressed(value));
|
|
||||||
},
|
|
||||||
DBOp::Delete { col, key } => {
|
|
||||||
let c = Self::to_overlay_column(col);
|
|
||||||
overlay[c].insert(key, KeyState::Delete);
|
|
||||||
},
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Commit buffered changes to database. Must be called under `flush_lock`
|
|
||||||
fn write_flushing_with_lock(&self, _lock: &mut MutexGuard<bool>) -> Result<(), String> {
|
|
||||||
match *self.db.read() {
|
|
||||||
Some(DBAndColumns { ref db, ref cfs }) => {
|
|
||||||
let batch = WriteBatch::new();
|
|
||||||
mem::swap(&mut *self.overlay.write(), &mut *self.flushing.write());
|
|
||||||
{
|
|
||||||
for (c, column) in self.flushing.read().iter().enumerate() {
|
|
||||||
for (ref key, ref state) in column.iter() {
|
|
||||||
match **state {
|
|
||||||
KeyState::Delete => {
|
|
||||||
if c > 0 {
|
|
||||||
batch.delete_cf(cfs[c - 1], &key)?;
|
|
||||||
} else {
|
|
||||||
batch.delete(&key)?;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
KeyState::Insert(ref value) => {
|
|
||||||
if c > 0 {
|
|
||||||
batch.put_cf(cfs[c - 1], &key, value)?;
|
|
||||||
} else {
|
|
||||||
batch.put(&key, &value)?;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
KeyState::InsertCompressed(ref value) => {
|
|
||||||
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
|
|
||||||
if c > 0 {
|
|
||||||
batch.put_cf(cfs[c - 1], &key, &compressed)?;
|
|
||||||
} else {
|
|
||||||
batch.put(&key, &value)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
db.write_opt(batch, &self.write_opts)?;
|
|
||||||
for column in self.flushing.write().iter_mut() {
|
|
||||||
column.clear();
|
|
||||||
column.shrink_to_fit();
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
None => Err("Database is closed".to_owned())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Commit buffered changes to database.
|
|
||||||
pub fn flush(&self) -> Result<(), String> {
|
|
||||||
let mut lock = self.flushing_lock.lock();
|
|
||||||
// If RocksDB batch allocation fails the thread gets terminated and the lock is released.
|
|
||||||
// The value inside the lock is used to detect that.
|
|
||||||
if *lock {
|
|
||||||
// This can only happen if another flushing thread is terminated unexpectedly.
|
|
||||||
return Err("Database write failure. Running low on memory perhaps?".to_owned());
|
|
||||||
}
|
|
||||||
*lock = true;
|
|
||||||
let result = self.write_flushing_with_lock(&mut lock);
|
|
||||||
*lock = false;
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Commit transaction to database.
|
|
||||||
pub fn write(&self, tr: DBTransaction) -> Result<(), String> {
|
|
||||||
match *self.db.read() {
|
|
||||||
Some(DBAndColumns { ref db, ref cfs }) => {
|
|
||||||
let batch = WriteBatch::new();
|
|
||||||
let ops = tr.ops;
|
|
||||||
for op in ops {
|
|
||||||
match op {
|
|
||||||
DBOp::Insert { col, key, value } => {
|
|
||||||
col.map_or_else(|| batch.put(&key, &value), |c| batch.put_cf(cfs[c as usize], &key, &value))?
|
|
||||||
},
|
|
||||||
DBOp::InsertCompressed { col, key, value } => {
|
|
||||||
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
|
|
||||||
col.map_or_else(|| batch.put(&key, &compressed), |c| batch.put_cf(cfs[c as usize], &key, &compressed))?
|
|
||||||
},
|
|
||||||
DBOp::Delete { col, key } => {
|
|
||||||
col.map_or_else(|| batch.delete(&key), |c| batch.delete_cf(cfs[c as usize], &key))?
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
db.write_opt(batch, &self.write_opts)
|
|
||||||
},
|
|
||||||
None => Err("Database is closed".to_owned())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get value by key.
|
|
||||||
pub fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
|
|
||||||
match *self.db.read() {
|
|
||||||
Some(DBAndColumns { ref db, ref cfs }) => {
|
|
||||||
let overlay = &self.overlay.read()[Self::to_overlay_column(col)];
|
|
||||||
match overlay.get(key) {
|
|
||||||
Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())),
|
|
||||||
Some(&KeyState::Delete) => Ok(None),
|
|
||||||
None => {
|
|
||||||
let flushing = &self.flushing.read()[Self::to_overlay_column(col)];
|
|
||||||
match flushing.get(key) {
|
|
||||||
Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())),
|
|
||||||
Some(&KeyState::Delete) => Ok(None),
|
|
||||||
None => {
|
|
||||||
col.map_or_else(
|
|
||||||
|| db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))),
|
|
||||||
|c| db.get_cf_opt(cfs[c as usize], key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))))
|
|
||||||
},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
None => Ok(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values.
|
|
||||||
// TODO: support prefix seek for unflushed data
|
|
||||||
pub fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
|
|
||||||
self.iter_from_prefix(col, prefix).and_then(|mut iter| {
|
|
||||||
match iter.next() {
|
|
||||||
// TODO: use prefix_same_as_start read option (not availabele in C API currently)
|
|
||||||
Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None },
|
|
||||||
_ => None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get database iterator for flushed data.
|
|
||||||
pub fn iter(&self, col: Option<u32>) -> Option<DatabaseIterator> {
|
|
||||||
//TODO: iterate over overlay
|
|
||||||
match *self.db.read() {
|
|
||||||
Some(DBAndColumns { ref db, ref cfs }) => {
|
|
||||||
let iter = col.map_or_else(
|
|
||||||
|| db.iterator_opt(IteratorMode::Start, &self.read_opts),
|
|
||||||
|c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::Start, &self.read_opts)
|
|
||||||
.expect("iterator params are valid; qed")
|
|
||||||
);
|
|
||||||
|
|
||||||
Some(DatabaseIterator {
|
|
||||||
iter: iter,
|
|
||||||
_marker: PhantomData,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_from_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<DatabaseIterator> {
|
|
||||||
match *self.db.read() {
|
|
||||||
Some(DBAndColumns { ref db, ref cfs }) => {
|
|
||||||
let iter = col.map_or_else(|| db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts),
|
|
||||||
|c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward), &self.read_opts)
|
|
||||||
.expect("iterator params are valid; qed"));
|
|
||||||
|
|
||||||
Some(DatabaseIterator {
|
|
||||||
iter: iter,
|
|
||||||
_marker: PhantomData,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Close the database
|
|
||||||
fn close(&self) {
|
|
||||||
*self.db.write() = None;
|
|
||||||
self.overlay.write().clear();
|
|
||||||
self.flushing.write().clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Restore the database from a copy at given path.
|
|
||||||
pub fn restore(&self, new_db: &str) -> Result<(), Error> {
|
|
||||||
self.close();
|
|
||||||
|
|
||||||
let mut backup_db = PathBuf::from(&self.path);
|
|
||||||
backup_db.pop();
|
|
||||||
backup_db.push("backup_db");
|
|
||||||
|
|
||||||
let existed = match fs::rename(&self.path, &backup_db) {
|
|
||||||
Ok(_) => true,
|
|
||||||
Err(e) => if let io::ErrorKind::NotFound = e.kind() {
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
return Err(e.into());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
match fs::rename(&new_db, &self.path) {
|
|
||||||
Ok(_) => {
|
|
||||||
// clean up the backup.
|
|
||||||
if existed {
|
|
||||||
fs::remove_dir_all(&backup_db)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
// restore the backup.
|
|
||||||
if existed {
|
|
||||||
fs::rename(&backup_db, &self.path)?;
|
|
||||||
}
|
|
||||||
return Err(e.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// reopen the database and steal handles into self
|
|
||||||
let db = Self::open(&self.config, &self.path)?;
|
|
||||||
*self.db.write() = mem::replace(&mut *db.db.write(), None);
|
|
||||||
*self.overlay.write() = mem::replace(&mut *db.overlay.write(), Vec::new());
|
|
||||||
*self.flushing.write() = mem::replace(&mut *db.flushing.write(), Vec::new());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The number of non-default column families.
|
|
||||||
pub fn num_columns(&self) -> u32 {
|
|
||||||
self.db.read().as_ref()
|
|
||||||
.and_then(|db| if db.cfs.is_empty() { None } else { Some(db.cfs.len()) } )
|
|
||||||
.map(|n| n as u32)
|
|
||||||
.unwrap_or(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Drop a column family.
|
|
||||||
pub fn drop_column(&self) -> Result<(), String> {
|
|
||||||
match *self.db.write() {
|
|
||||||
Some(DBAndColumns { ref mut db, ref mut cfs }) => {
|
|
||||||
if let Some(col) = cfs.pop() {
|
|
||||||
let name = format!("col{}", cfs.len());
|
|
||||||
drop(col);
|
|
||||||
db.drop_cf(&name)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
None => Ok(()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a column family.
|
|
||||||
pub fn add_column(&self) -> Result<(), String> {
|
|
||||||
match *self.db.write() {
|
|
||||||
Some(DBAndColumns { ref mut db, ref mut cfs }) => {
|
|
||||||
let col = cfs.len() as u32;
|
|
||||||
let name = format!("col{}", col);
|
|
||||||
cfs.push(db.create_cf(&name, &col_config(col, &self.config))?);
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
None => Ok(()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// duplicate declaration of methods here to avoid trait import in certain existing cases
|
|
||||||
// at time of addition.
|
|
||||||
impl KeyValueDB for Database {
|
|
||||||
fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
|
|
||||||
Database::get(self, col, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
|
|
||||||
Database::get_by_prefix(self, col, prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_buffered(&self, transaction: DBTransaction) {
|
|
||||||
Database::write_buffered(self, transaction)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write(&self, transaction: DBTransaction) -> Result<(), String> {
|
|
||||||
Database::write(self, transaction)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn flush(&self) -> Result<(), String> {
|
|
||||||
Database::flush(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a> {
|
|
||||||
let unboxed = Database::iter(self, col);
|
|
||||||
Box::new(unboxed.into_iter().flat_map(|inner| inner))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
|
|
||||||
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>
|
|
||||||
{
|
|
||||||
let unboxed = Database::iter_from_prefix(self, col, prefix);
|
|
||||||
Box::new(unboxed.into_iter().flat_map(|inner| inner))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore(&self, new_db: &str) -> Result<(), Error> {
|
|
||||||
Database::restore(self, new_db)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for Database {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
// write all buffered changes if we can.
|
|
||||||
let _ = self.flush();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use bigint::hash::H256;
|
|
||||||
use super::*;
|
|
||||||
use devtools::*;
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
fn test_db(config: &DatabaseConfig) {
|
|
||||||
let path = RandomTempPath::create_dir();
|
|
||||||
let db = Database::open(config, path.as_path().to_str().unwrap()).unwrap();
|
|
||||||
let key1 = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
|
|
||||||
let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
|
|
||||||
let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
|
|
||||||
|
|
||||||
let mut batch = db.transaction();
|
|
||||||
batch.put(None, &key1, b"cat");
|
|
||||||
batch.put(None, &key2, b"dog");
|
|
||||||
db.write(batch).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"cat");
|
|
||||||
|
|
||||||
let contents: Vec<_> = db.iter(None).into_iter().flat_map(|inner| inner).collect();
|
|
||||||
assert_eq!(contents.len(), 2);
|
|
||||||
assert_eq!(&*contents[0].0, &*key1);
|
|
||||||
assert_eq!(&*contents[0].1, b"cat");
|
|
||||||
assert_eq!(&*contents[1].0, &*key2);
|
|
||||||
assert_eq!(&*contents[1].1, b"dog");
|
|
||||||
|
|
||||||
let mut batch = db.transaction();
|
|
||||||
batch.delete(None, &key1);
|
|
||||||
db.write(batch).unwrap();
|
|
||||||
|
|
||||||
assert!(db.get(None, &key1).unwrap().is_none());
|
|
||||||
|
|
||||||
let mut batch = db.transaction();
|
|
||||||
batch.put(None, &key1, b"cat");
|
|
||||||
db.write(batch).unwrap();
|
|
||||||
|
|
||||||
let mut transaction = db.transaction();
|
|
||||||
transaction.put(None, &key3, b"elephant");
|
|
||||||
transaction.delete(None, &key1);
|
|
||||||
db.write(transaction).unwrap();
|
|
||||||
assert!(db.get(None, &key1).unwrap().is_none());
|
|
||||||
assert_eq!(&*db.get(None, &key3).unwrap().unwrap(), b"elephant");
|
|
||||||
|
|
||||||
assert_eq!(&*db.get_by_prefix(None, &key3).unwrap(), b"elephant");
|
|
||||||
assert_eq!(&*db.get_by_prefix(None, &key2).unwrap(), b"dog");
|
|
||||||
|
|
||||||
let mut transaction = db.transaction();
|
|
||||||
transaction.put(None, &key1, b"horse");
|
|
||||||
transaction.delete(None, &key3);
|
|
||||||
db.write_buffered(transaction);
|
|
||||||
assert!(db.get(None, &key3).unwrap().is_none());
|
|
||||||
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
|
|
||||||
|
|
||||||
db.flush().unwrap();
|
|
||||||
assert!(db.get(None, &key3).unwrap().is_none());
|
|
||||||
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn kvdb() {
|
|
||||||
let path = RandomTempPath::create_dir();
|
|
||||||
let _ = Database::open_default(path.as_path().to_str().unwrap()).unwrap();
|
|
||||||
test_db(&DatabaseConfig::default());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
fn df_to_rotational() {
|
|
||||||
use std::path::PathBuf;
|
|
||||||
// Example df output.
|
|
||||||
let example_df = vec![70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10];
|
|
||||||
let expected_output = Some(PathBuf::from("/sys/block/sda/queue/rotational"));
|
|
||||||
assert_eq!(rotational_from_df_output(example_df), expected_output);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn add_columns() {
|
|
||||||
let config = DatabaseConfig::default();
|
|
||||||
let config_5 = DatabaseConfig::with_columns(Some(5));
|
|
||||||
|
|
||||||
let path = RandomTempPath::create_dir();
|
|
||||||
|
|
||||||
// open empty, add 5.
|
|
||||||
{
|
|
||||||
let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap();
|
|
||||||
assert_eq!(db.num_columns(), 0);
|
|
||||||
|
|
||||||
for i in 0..5 {
|
|
||||||
db.add_column().unwrap();
|
|
||||||
assert_eq!(db.num_columns(), i + 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// reopen as 5.
|
|
||||||
{
|
|
||||||
let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap();
|
|
||||||
assert_eq!(db.num_columns(), 5);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn drop_columns() {
|
|
||||||
let config = DatabaseConfig::default();
|
|
||||||
let config_5 = DatabaseConfig::with_columns(Some(5));
|
|
||||||
|
|
||||||
let path = RandomTempPath::create_dir();
|
|
||||||
|
|
||||||
// open 5, remove all.
|
|
||||||
{
|
|
||||||
let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap();
|
|
||||||
assert_eq!(db.num_columns(), 5);
|
|
||||||
|
|
||||||
for i in (0..5).rev() {
|
|
||||||
db.drop_column().unwrap();
|
|
||||||
assert_eq!(db.num_columns(), i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// reopen as 0.
|
|
||||||
{
|
|
||||||
let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap();
|
|
||||||
assert_eq!(db.num_columns(), 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -7,4 +7,5 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
|||||||
log = "0.3"
|
log = "0.3"
|
||||||
macros = { path = "../macros" }
|
macros = { path = "../macros" }
|
||||||
kvdb = { path = "../kvdb" }
|
kvdb = { path = "../kvdb" }
|
||||||
|
kvdb-rocksdb = { path = "../kvdb-rocksdb" }
|
||||||
ethcore-devtools = { path = "../../devtools" }
|
ethcore-devtools = { path = "../../devtools" }
|
||||||
|
@ -25,14 +25,15 @@ extern crate macros;
|
|||||||
|
|
||||||
extern crate ethcore_devtools as devtools;
|
extern crate ethcore_devtools as devtools;
|
||||||
extern crate kvdb;
|
extern crate kvdb;
|
||||||
|
extern crate kvdb_rocksdb;
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::fs;
|
|
||||||
use std::fmt;
|
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::{fs, fmt};
|
||||||
|
|
||||||
use kvdb::{CompactionProfile, Database, DatabaseConfig, DBTransaction};
|
use kvdb::DBTransaction;
|
||||||
|
use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig};
|
||||||
|
|
||||||
/// Migration config.
|
/// Migration config.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
@ -22,7 +22,7 @@ use std::collections::BTreeMap;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use {Batch, Config, Error, SimpleMigration, Migration, Manager, ChangeColumns};
|
use {Batch, Config, Error, SimpleMigration, Migration, Manager, ChangeColumns};
|
||||||
use kvdb::Database;
|
use kvdb_rocksdb::Database;
|
||||||
use devtools::RandomTempPath;
|
use devtools::RandomTempPath;
|
||||||
|
|
||||||
fn db_path(path: &Path) -> PathBuf {
|
fn db_path(path: &Path) -> PathBuf {
|
||||||
@ -229,7 +229,7 @@ fn pre_columns() {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn change_columns() {
|
fn change_columns() {
|
||||||
use kvdb::DatabaseConfig;
|
use kvdb_rocksdb::DatabaseConfig;
|
||||||
|
|
||||||
let mut manager = Manager::new(Config::default());
|
let mut manager = Manager::new(Config::default());
|
||||||
manager.add_migration(ChangeColumns {
|
manager.add_migration(ChangeColumns {
|
||||||
|
@ -55,13 +55,6 @@ impl ArchiveDB {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new instance with an anonymous temporary database.
|
|
||||||
#[cfg(test)]
|
|
||||||
fn new_temp() -> ArchiveDB {
|
|
||||||
let backing = Arc::new(::kvdb::in_memory(0));
|
|
||||||
Self::new(backing, None)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn payload(&self, key: &H256) -> Option<DBValue> {
|
fn payload(&self, key: &H256) -> Option<DBValue> {
|
||||||
self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?")
|
self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?")
|
||||||
}
|
}
|
||||||
@ -206,18 +199,16 @@ mod tests {
|
|||||||
#![cfg_attr(feature="dev", allow(blacklisted_name))]
|
#![cfg_attr(feature="dev", allow(blacklisted_name))]
|
||||||
#![cfg_attr(feature="dev", allow(similar_names))]
|
#![cfg_attr(feature="dev", allow(similar_names))]
|
||||||
|
|
||||||
use std::path::Path;
|
|
||||||
use keccak::keccak;
|
use keccak::keccak;
|
||||||
use hashdb::{HashDB, DBValue};
|
use hashdb::{HashDB, DBValue};
|
||||||
use super::*;
|
use super::*;
|
||||||
use journaldb::traits::JournalDB;
|
use journaldb::traits::JournalDB;
|
||||||
use kvdb::Database;
|
use kvdb_memorydb;
|
||||||
use bigint::hash::H32;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn insert_same_in_fork() {
|
fn insert_same_in_fork() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = ArchiveDB::new_temp();
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
||||||
|
|
||||||
let x = jdb.insert(b"X");
|
let x = jdb.insert(b"X");
|
||||||
jdb.commit_batch(1, &keccak(b"1"), None).unwrap();
|
jdb.commit_batch(1, &keccak(b"1"), None).unwrap();
|
||||||
@ -239,7 +230,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn long_history() {
|
fn long_history() {
|
||||||
// history is 3
|
// history is 3
|
||||||
let mut jdb = ArchiveDB::new_temp();
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
||||||
let h = jdb.insert(b"foo");
|
let h = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.contains(&h));
|
assert!(jdb.contains(&h));
|
||||||
@ -257,7 +248,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn multiple_owed_removal_not_allowed() {
|
fn multiple_owed_removal_not_allowed() {
|
||||||
let mut jdb = ArchiveDB::new_temp();
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
||||||
let h = jdb.insert(b"foo");
|
let h = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.contains(&h));
|
assert!(jdb.contains(&h));
|
||||||
@ -271,7 +262,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn complex() {
|
fn complex() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = ArchiveDB::new_temp();
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
let bar = jdb.insert(b"bar");
|
let bar = jdb.insert(b"bar");
|
||||||
@ -303,7 +294,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn fork() {
|
fn fork() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = ArchiveDB::new_temp();
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
let bar = jdb.insert(b"bar");
|
let bar = jdb.insert(b"bar");
|
||||||
@ -329,7 +320,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn overwrite() {
|
fn overwrite() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = ArchiveDB::new_temp();
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
@ -348,7 +339,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn fork_same_key() {
|
fn fork_same_key() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = ArchiveDB::new_temp();
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
@ -362,19 +353,13 @@ mod tests {
|
|||||||
assert!(jdb.contains(&foo));
|
assert!(jdb.contains(&foo));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_db(dir: &Path) -> ArchiveDB {
|
|
||||||
let db = Database::open_default(dir.to_str().unwrap()).unwrap();
|
|
||||||
ArchiveDB::new(Arc::new(db), None)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen() {
|
fn reopen() {
|
||||||
let mut dir = ::std::env::temp_dir();
|
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
||||||
dir.push(H32::random().hex());
|
|
||||||
let bar = H256::random();
|
let bar = H256::random();
|
||||||
|
|
||||||
let foo = {
|
let foo = {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
jdb.emplace(bar.clone(), DBValue::from_slice(b"bar"));
|
jdb.emplace(bar.clone(), DBValue::from_slice(b"bar"));
|
||||||
@ -383,13 +368,13 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
|
||||||
jdb.remove(&foo);
|
jdb.remove(&foo);
|
||||||
jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = ArchiveDB::new(shared_db, None);
|
||||||
assert!(jdb.contains(&foo));
|
assert!(jdb.contains(&foo));
|
||||||
assert!(jdb.contains(&bar));
|
assert!(jdb.contains(&bar));
|
||||||
jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
|
jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
|
||||||
@ -398,11 +383,10 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen_remove() {
|
fn reopen_remove() {
|
||||||
let mut dir = ::std::env::temp_dir();
|
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
||||||
dir.push(H32::random().hex());
|
|
||||||
|
|
||||||
let foo = {
|
let foo = {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
@ -416,7 +400,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = ArchiveDB::new(shared_db, None);
|
||||||
jdb.remove(&foo);
|
jdb.remove(&foo);
|
||||||
jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap();
|
jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap();
|
||||||
assert!(jdb.contains(&foo));
|
assert!(jdb.contains(&foo));
|
||||||
@ -428,10 +412,9 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen_fork() {
|
fn reopen_fork() {
|
||||||
let mut dir = ::std::env::temp_dir();
|
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
||||||
dir.push(H32::random().hex());
|
|
||||||
let (foo, _, _) = {
|
let (foo, _, _) = {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
let bar = jdb.insert(b"bar");
|
let bar = jdb.insert(b"bar");
|
||||||
@ -446,7 +429,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = ArchiveDB::new(shared_db, None);
|
||||||
jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
|
jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
|
||||||
assert!(jdb.contains(&foo));
|
assert!(jdb.contains(&foo));
|
||||||
}
|
}
|
||||||
@ -454,17 +437,17 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn returns_state() {
|
fn returns_state() {
|
||||||
let temp = ::devtools::RandomTempPath::new();
|
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
||||||
|
|
||||||
let key = {
|
let key = {
|
||||||
let mut jdb = new_db(temp.as_path().as_path());
|
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
|
||||||
let key = jdb.insert(b"foo");
|
let key = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
key
|
key
|
||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let jdb = new_db(temp.as_path().as_path());
|
let jdb = ArchiveDB::new(shared_db, None);
|
||||||
let state = jdb.state(&key);
|
let state = jdb.state(&key);
|
||||||
assert!(state.is_some());
|
assert!(state.is_some());
|
||||||
}
|
}
|
||||||
@ -472,9 +455,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn inject() {
|
fn inject() {
|
||||||
let temp = ::devtools::RandomTempPath::new();
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
||||||
|
|
||||||
let mut jdb = new_db(temp.as_path().as_path());
|
|
||||||
let key = jdb.insert(b"dog");
|
let key = jdb.insert(b"dog");
|
||||||
jdb.inject_batch().unwrap();
|
jdb.inject_batch().unwrap();
|
||||||
|
|
||||||
|
@ -140,13 +140,6 @@ impl EarlyMergeDB {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new instance with an anonymous temporary database.
|
|
||||||
#[cfg(test)]
|
|
||||||
fn new_temp() -> EarlyMergeDB {
|
|
||||||
let backing = Arc::new(::kvdb::in_memory(0));
|
|
||||||
Self::new(backing, None)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn morph_key(key: &H256, index: u8) -> Bytes {
|
fn morph_key(key: &H256, index: u8) -> Bytes {
|
||||||
let mut ret = (&**key).to_owned();
|
let mut ret = (&**key).to_owned();
|
||||||
ret.push(index);
|
ret.push(index);
|
||||||
@ -554,19 +547,17 @@ mod tests {
|
|||||||
#![cfg_attr(feature="dev", allow(blacklisted_name))]
|
#![cfg_attr(feature="dev", allow(blacklisted_name))]
|
||||||
#![cfg_attr(feature="dev", allow(similar_names))]
|
#![cfg_attr(feature="dev", allow(similar_names))]
|
||||||
|
|
||||||
use std::path::Path;
|
|
||||||
use keccak::keccak;
|
use keccak::keccak;
|
||||||
use hashdb::{HashDB, DBValue};
|
use hashdb::{HashDB, DBValue};
|
||||||
use super::*;
|
use super::*;
|
||||||
use super::super::traits::JournalDB;
|
use super::super::traits::JournalDB;
|
||||||
use ethcore_logger::init_log;
|
use ethcore_logger::init_log;
|
||||||
use kvdb::{DatabaseConfig};
|
use kvdb_memorydb;
|
||||||
use bigint::hash::H32;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn insert_same_in_fork() {
|
fn insert_same_in_fork() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let x = jdb.insert(b"X");
|
let x = jdb.insert(b"X");
|
||||||
jdb.commit_batch(1, &keccak(b"1"), None).unwrap();
|
jdb.commit_batch(1, &keccak(b"1"), None).unwrap();
|
||||||
@ -595,7 +586,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn insert_older_era() {
|
fn insert_older_era() {
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0a"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0a"), None).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -616,7 +607,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn long_history() {
|
fn long_history() {
|
||||||
// history is 3
|
// history is 3
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
let h = jdb.insert(b"foo");
|
let h = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -639,7 +630,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn complex() {
|
fn complex() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
let bar = jdb.insert(b"bar");
|
let bar = jdb.insert(b"bar");
|
||||||
@ -682,7 +673,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn fork() {
|
fn fork() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
let bar = jdb.insert(b"bar");
|
let bar = jdb.insert(b"bar");
|
||||||
@ -714,7 +705,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn overwrite() {
|
fn overwrite() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
@ -737,7 +728,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn fork_same_key_one() {
|
fn fork_same_key_one() {
|
||||||
|
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
|
|
||||||
@ -762,7 +753,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fork_same_key_other() {
|
fn fork_same_key_other() {
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
|
|
||||||
@ -787,7 +778,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fork_ins_del_ins() {
|
fn fork_ins_del_ins() {
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
|
|
||||||
@ -818,20 +809,18 @@ mod tests {
|
|||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_db(path: &Path) -> EarlyMergeDB {
|
fn new_db() -> EarlyMergeDB {
|
||||||
let config = DatabaseConfig::with_columns(Some(1));
|
let backing = Arc::new(kvdb_memorydb::create(0));
|
||||||
let backing = Arc::new(::kvdb::Database::open(&config, path.to_str().unwrap()).unwrap());
|
EarlyMergeDB::new(backing, None)
|
||||||
EarlyMergeDB::new(backing, Some(0))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen() {
|
fn reopen() {
|
||||||
let mut dir = ::std::env::temp_dir();
|
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
||||||
dir.push(H32::random().hex());
|
|
||||||
let bar = H256::random();
|
let bar = H256::random();
|
||||||
|
|
||||||
let foo = {
|
let foo = {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
jdb.emplace(bar.clone(), DBValue::from_slice(b"bar"));
|
jdb.emplace(bar.clone(), DBValue::from_slice(b"bar"));
|
||||||
@ -841,14 +830,14 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
|
||||||
jdb.remove(&foo);
|
jdb.remove(&foo);
|
||||||
jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = EarlyMergeDB::new(shared_db, None);
|
||||||
assert!(jdb.contains(&foo));
|
assert!(jdb.contains(&foo));
|
||||||
assert!(jdb.contains(&bar));
|
assert!(jdb.contains(&bar));
|
||||||
jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
|
jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
|
||||||
@ -861,7 +850,7 @@ mod tests {
|
|||||||
fn insert_delete_insert_delete_insert_expunge() {
|
fn insert_delete_insert_delete_insert_expunge() {
|
||||||
init_log();
|
init_log();
|
||||||
|
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
// history is 4
|
// history is 4
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
@ -887,7 +876,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn forked_insert_delete_insert_delete_insert_expunge() {
|
fn forked_insert_delete_insert_delete_insert_expunge() {
|
||||||
init_log();
|
init_log();
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
// history is 4
|
// history is 4
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
@ -933,7 +922,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn broken_assert() {
|
fn broken_assert() {
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
@ -962,7 +951,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen_test() {
|
fn reopen_test() {
|
||||||
let mut jdb = EarlyMergeDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
// history is 4
|
// history is 4
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
@ -997,13 +986,11 @@ mod tests {
|
|||||||
fn reopen_remove_three() {
|
fn reopen_remove_three() {
|
||||||
init_log();
|
init_log();
|
||||||
|
|
||||||
let mut dir = ::std::env::temp_dir();
|
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
||||||
dir.push(H32::random().hex());
|
|
||||||
|
|
||||||
let foo = keccak(b"foo");
|
let foo = keccak(b"foo");
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
|
||||||
// history is 1
|
// history is 1
|
||||||
jdb.insert(b"foo");
|
jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
@ -1025,7 +1012,7 @@ mod tests {
|
|||||||
|
|
||||||
// incantation to reopen the db
|
// incantation to reopen the db
|
||||||
}; {
|
}; {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
|
||||||
|
|
||||||
jdb.remove(&foo);
|
jdb.remove(&foo);
|
||||||
jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap();
|
jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap();
|
||||||
@ -1034,7 +1021,7 @@ mod tests {
|
|||||||
|
|
||||||
// incantation to reopen the db
|
// incantation to reopen the db
|
||||||
}; {
|
}; {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
|
||||||
|
|
||||||
jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap();
|
jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -1042,7 +1029,7 @@ mod tests {
|
|||||||
|
|
||||||
// incantation to reopen the db
|
// incantation to reopen the db
|
||||||
}; {
|
}; {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = EarlyMergeDB::new(shared_db, None);
|
||||||
|
|
||||||
jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap();
|
jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -1052,10 +1039,10 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen_fork() {
|
fn reopen_fork() {
|
||||||
let mut dir = ::std::env::temp_dir();
|
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
||||||
dir.push(H32::random().hex());
|
|
||||||
let (foo, bar, baz) = {
|
let (foo, bar, baz) = {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
let bar = jdb.insert(b"bar");
|
let bar = jdb.insert(b"bar");
|
||||||
@ -1073,7 +1060,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = EarlyMergeDB::new(shared_db, None);
|
||||||
jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
|
jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
assert!(jdb.contains(&foo));
|
assert!(jdb.contains(&foo));
|
||||||
@ -1084,9 +1071,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn inject() {
|
fn inject() {
|
||||||
let temp = ::devtools::RandomTempPath::new();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let mut jdb = new_db(temp.as_path().as_path());
|
|
||||||
let key = jdb.insert(b"dog");
|
let key = jdb.insert(b"dog");
|
||||||
jdb.inject_batch().unwrap();
|
jdb.inject_batch().unwrap();
|
||||||
|
|
||||||
|
@ -117,13 +117,6 @@ impl OverlayRecentDB {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new instance with an anonymous temporary database.
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn new_temp() -> OverlayRecentDB {
|
|
||||||
let backing = Arc::new(::kvdb::in_memory(0));
|
|
||||||
Self::new(backing, None)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn can_reconstruct_refs(&self) -> bool {
|
fn can_reconstruct_refs(&self) -> bool {
|
||||||
let reconstructed = Self::read_overlay(&*self.backing, self.column);
|
let reconstructed = Self::read_overlay(&*self.backing, self.column);
|
||||||
@ -462,24 +455,22 @@ mod tests {
|
|||||||
#![cfg_attr(feature="dev", allow(blacklisted_name))]
|
#![cfg_attr(feature="dev", allow(blacklisted_name))]
|
||||||
#![cfg_attr(feature="dev", allow(similar_names))]
|
#![cfg_attr(feature="dev", allow(similar_names))]
|
||||||
|
|
||||||
use std::path::Path;
|
|
||||||
use keccak::keccak;
|
use keccak::keccak;
|
||||||
use super::*;
|
use super::*;
|
||||||
use hashdb::{HashDB, DBValue};
|
use hashdb::{HashDB, DBValue};
|
||||||
use ethcore_logger::init_log;
|
use ethcore_logger::init_log;
|
||||||
use journaldb::JournalDB;
|
use journaldb::JournalDB;
|
||||||
use kvdb::Database;
|
use kvdb_memorydb;
|
||||||
use bigint::hash::H32;
|
|
||||||
|
|
||||||
fn new_db(path: &Path) -> OverlayRecentDB {
|
fn new_db() -> OverlayRecentDB {
|
||||||
let backing = Arc::new(Database::open_default(path.to_str().unwrap()).unwrap());
|
let backing = Arc::new(kvdb_memorydb::create(0));
|
||||||
OverlayRecentDB::new(backing, None)
|
OverlayRecentDB::new(backing, None)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn insert_same_in_fork() {
|
fn insert_same_in_fork() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let x = jdb.insert(b"X");
|
let x = jdb.insert(b"X");
|
||||||
jdb.commit_batch(1, &keccak(b"1"), None).unwrap();
|
jdb.commit_batch(1, &keccak(b"1"), None).unwrap();
|
||||||
@ -509,7 +500,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn long_history() {
|
fn long_history() {
|
||||||
// history is 3
|
// history is 3
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
let h = jdb.insert(b"foo");
|
let h = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -532,7 +523,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn complex() {
|
fn complex() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
let bar = jdb.insert(b"bar");
|
let bar = jdb.insert(b"bar");
|
||||||
@ -575,7 +566,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn fork() {
|
fn fork() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
let bar = jdb.insert(b"bar");
|
let bar = jdb.insert(b"bar");
|
||||||
@ -607,7 +598,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn overwrite() {
|
fn overwrite() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
@ -629,7 +620,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fork_same_key_one() {
|
fn fork_same_key_one() {
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
|
|
||||||
@ -654,7 +645,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fork_same_key_other() {
|
fn fork_same_key_other() {
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -680,7 +671,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fork_ins_del_ins() {
|
fn fork_ins_del_ins() {
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -714,12 +705,11 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen() {
|
fn reopen() {
|
||||||
let mut dir = ::std::env::temp_dir();
|
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
||||||
dir.push(H32::random().hex());
|
|
||||||
let bar = H256::random();
|
let bar = H256::random();
|
||||||
|
|
||||||
let foo = {
|
let foo = {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
jdb.emplace(bar.clone(), DBValue::from_slice(b"bar"));
|
jdb.emplace(bar.clone(), DBValue::from_slice(b"bar"));
|
||||||
@ -729,14 +719,14 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
||||||
jdb.remove(&foo);
|
jdb.remove(&foo);
|
||||||
jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
||||||
assert!(jdb.contains(&foo));
|
assert!(jdb.contains(&foo));
|
||||||
assert!(jdb.contains(&bar));
|
assert!(jdb.contains(&bar));
|
||||||
jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
|
jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
|
||||||
@ -748,7 +738,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn insert_delete_insert_delete_insert_expunge() {
|
fn insert_delete_insert_delete_insert_expunge() {
|
||||||
init_log();
|
init_log();
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
// history is 4
|
// history is 4
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
@ -774,7 +764,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn forked_insert_delete_insert_delete_insert_expunge() {
|
fn forked_insert_delete_insert_delete_insert_expunge() {
|
||||||
init_log();
|
init_log();
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
// history is 4
|
// history is 4
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
@ -820,7 +810,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn broken_assert() {
|
fn broken_assert() {
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
||||||
@ -848,7 +838,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen_test() {
|
fn reopen_test() {
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
// history is 4
|
// history is 4
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
@ -882,13 +872,11 @@ mod tests {
|
|||||||
fn reopen_remove_three() {
|
fn reopen_remove_three() {
|
||||||
init_log();
|
init_log();
|
||||||
|
|
||||||
let mut dir = ::std::env::temp_dir();
|
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
||||||
dir.push(H32::random().hex());
|
|
||||||
|
|
||||||
let foo = keccak(b"foo");
|
let foo = keccak(b"foo");
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
||||||
// history is 1
|
// history is 1
|
||||||
jdb.insert(b"foo");
|
jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
@ -910,7 +898,7 @@ mod tests {
|
|||||||
|
|
||||||
// incantation to reopen the db
|
// incantation to reopen the db
|
||||||
}; {
|
}; {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
||||||
|
|
||||||
jdb.remove(&foo);
|
jdb.remove(&foo);
|
||||||
jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap();
|
jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap();
|
||||||
@ -919,7 +907,7 @@ mod tests {
|
|||||||
|
|
||||||
// incantation to reopen the db
|
// incantation to reopen the db
|
||||||
}; {
|
}; {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
||||||
|
|
||||||
jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap();
|
jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -927,7 +915,7 @@ mod tests {
|
|||||||
|
|
||||||
// incantation to reopen the db
|
// incantation to reopen the db
|
||||||
}; {
|
}; {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = OverlayRecentDB::new(shared_db, None);
|
||||||
|
|
||||||
jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap();
|
jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -937,10 +925,10 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen_fork() {
|
fn reopen_fork() {
|
||||||
let mut dir = ::std::env::temp_dir();
|
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
||||||
dir.push(H32::random().hex());
|
|
||||||
let (foo, bar, baz) = {
|
let (foo, bar, baz) = {
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
let bar = jdb.insert(b"bar");
|
let bar = jdb.insert(b"bar");
|
||||||
@ -958,7 +946,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = new_db(&dir);
|
let mut jdb = OverlayRecentDB::new(shared_db, None);
|
||||||
jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
|
jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
assert!(jdb.contains(&foo));
|
assert!(jdb.contains(&foo));
|
||||||
@ -969,7 +957,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn insert_older_era() {
|
fn insert_older_era() {
|
||||||
let mut jdb = OverlayRecentDB::new_temp();
|
let mut jdb = new_db();
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0a"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0a"), None).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -989,9 +977,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn inject() {
|
fn inject() {
|
||||||
let temp = ::devtools::RandomTempPath::new();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let mut jdb = new_db(temp.as_path().as_path());
|
|
||||||
let key = jdb.insert(b"dog");
|
let key = jdb.insert(b"dog");
|
||||||
jdb.inject_batch().unwrap();
|
jdb.inject_batch().unwrap();
|
||||||
|
|
||||||
@ -1004,10 +990,10 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn earliest_era() {
|
fn earliest_era() {
|
||||||
let temp = ::devtools::RandomTempPath::new();
|
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
||||||
|
|
||||||
// empty DB
|
// empty DB
|
||||||
let mut jdb = new_db(temp.as_path().as_path());
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
||||||
assert!(jdb.earliest_era().is_none());
|
assert!(jdb.earliest_era().is_none());
|
||||||
|
|
||||||
// single journalled era.
|
// single journalled era.
|
||||||
@ -1041,7 +1027,7 @@ mod tests {
|
|||||||
|
|
||||||
// reconstructed: no journal entries.
|
// reconstructed: no journal entries.
|
||||||
drop(jdb);
|
drop(jdb);
|
||||||
let jdb = new_db(temp.as_path().as_path());
|
let jdb = OverlayRecentDB::new(shared_db, None);
|
||||||
assert_eq!(jdb.earliest_era(), None);
|
assert_eq!(jdb.earliest_era(), None);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -75,13 +75,6 @@ impl RefCountedDB {
|
|||||||
column: col,
|
column: col,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new instance with an anonymous temporary database.
|
|
||||||
#[cfg(test)]
|
|
||||||
fn new_temp() -> RefCountedDB {
|
|
||||||
let backing = Arc::new(::kvdb::in_memory(0));
|
|
||||||
Self::new(backing, None)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HashDB for RefCountedDB {
|
impl HashDB for RefCountedDB {
|
||||||
@ -217,13 +210,19 @@ mod tests {
|
|||||||
|
|
||||||
use keccak::keccak;
|
use keccak::keccak;
|
||||||
use hashdb::{HashDB, DBValue};
|
use hashdb::{HashDB, DBValue};
|
||||||
|
use kvdb_memorydb;
|
||||||
use super::*;
|
use super::*;
|
||||||
use super::super::traits::JournalDB;
|
use super::super::traits::JournalDB;
|
||||||
|
|
||||||
|
fn new_db() -> RefCountedDB {
|
||||||
|
let backing = Arc::new(kvdb_memorydb::create(0));
|
||||||
|
RefCountedDB::new(backing, None)
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn long_history() {
|
fn long_history() {
|
||||||
// history is 3
|
// history is 3
|
||||||
let mut jdb = RefCountedDB::new_temp();
|
let mut jdb = new_db();
|
||||||
let h = jdb.insert(b"foo");
|
let h = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.contains(&h));
|
assert!(jdb.contains(&h));
|
||||||
@ -241,7 +240,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn latest_era_should_work() {
|
fn latest_era_should_work() {
|
||||||
// history is 3
|
// history is 3
|
||||||
let mut jdb = RefCountedDB::new_temp();
|
let mut jdb = new_db();
|
||||||
assert_eq!(jdb.latest_era(), None);
|
assert_eq!(jdb.latest_era(), None);
|
||||||
let h = jdb.insert(b"foo");
|
let h = jdb.insert(b"foo");
|
||||||
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
|
||||||
@ -260,7 +259,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn complex() {
|
fn complex() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = RefCountedDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
let bar = jdb.insert(b"bar");
|
let bar = jdb.insert(b"bar");
|
||||||
@ -298,7 +297,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn fork() {
|
fn fork() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = RefCountedDB::new_temp();
|
let mut jdb = new_db();
|
||||||
|
|
||||||
let foo = jdb.insert(b"foo");
|
let foo = jdb.insert(b"foo");
|
||||||
let bar = jdb.insert(b"bar");
|
let bar = jdb.insert(b"bar");
|
||||||
@ -325,7 +324,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn inject() {
|
fn inject() {
|
||||||
let mut jdb = RefCountedDB::new_temp();
|
let mut jdb = new_db();
|
||||||
let key = jdb.insert(b"dog");
|
let key = jdb.insert(b"dog");
|
||||||
jdb.inject_batch().unwrap();
|
jdb.inject_batch().unwrap();
|
||||||
|
|
||||||
|
@ -110,6 +110,9 @@ extern crate patricia_trie as trie;
|
|||||||
extern crate kvdb;
|
extern crate kvdb;
|
||||||
extern crate util_error as error;
|
extern crate util_error as error;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
extern crate kvdb_memorydb;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log as rlog;
|
extern crate log as rlog;
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ impl OverlayDB {
|
|||||||
/// Create a new instance of OverlayDB with an anonymous temporary database.
|
/// Create a new instance of OverlayDB with an anonymous temporary database.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn new_temp() -> OverlayDB {
|
pub fn new_temp() -> OverlayDB {
|
||||||
let backing = Arc::new(::kvdb::in_memory(0));
|
let backing = Arc::new(::kvdb_memorydb::create(0));
|
||||||
Self::new(backing, None)
|
Self::new(backing, None)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user