2017-01-25 18:51:41 +01:00
|
|
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
2016-02-18 03:46:34 +01:00
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2016-04-06 10:07:24 +02:00
|
|
|
//! Key-Value store abstraction with `RocksDB` backend.
|
2016-02-18 03:46:34 +01:00
|
|
|
|
2017-07-29 17:12:07 +02:00
|
|
|
use std::{mem, fs};
|
|
|
|
use std::collections::{HashMap, BTreeMap};
|
2016-09-06 15:31:13 +02:00
|
|
|
use std::io::ErrorKind;
|
2017-02-20 17:21:55 +01:00
|
|
|
use std::marker::PhantomData;
|
2017-07-29 21:56:42 +02:00
|
|
|
use std::path::{PathBuf, Path};
|
|
|
|
use parking_lot::{Mutex, MutexGuard, RwLock};
|
2017-02-20 17:21:55 +01:00
|
|
|
|
2016-08-03 22:03:40 +02:00
|
|
|
use elastic_array::*;
|
2016-10-26 13:53:47 +02:00
|
|
|
use hashdb::DBValue;
|
2017-03-22 14:41:46 +01:00
|
|
|
use rlp::{UntrustedRlp, RlpType, Compressible};
|
2016-08-03 22:03:40 +02:00
|
|
|
use rocksdb::{DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator,
|
2016-10-14 14:44:11 +02:00
|
|
|
Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache, Column, ReadOptions};
|
2017-07-29 21:56:42 +02:00
|
|
|
use {UtilError, Bytes};
|
|
|
|
|
|
|
|
|
2016-10-21 23:21:57 +02:00
|
|
|
#[cfg(target_os = "linux")]
|
|
|
|
use regex::Regex;
|
|
|
|
#[cfg(target_os = "linux")]
|
|
|
|
use std::process::Command;
|
|
|
|
#[cfg(target_os = "linux")]
|
|
|
|
use std::fs::File;
|
2016-02-18 03:46:34 +01:00
|
|
|
|
2016-06-25 23:13:34 +02:00
|
|
|
const DB_BACKGROUND_FLUSHES: i32 = 2;
|
|
|
|
const DB_BACKGROUND_COMPACTIONS: i32 = 2;
|
2017-05-02 11:40:03 +02:00
|
|
|
const DB_WRITE_BUFFER_SIZE: usize = 2048 * 1000;
|
2016-06-20 21:45:24 +02:00
|
|
|
|
2017-04-19 14:58:19 +02:00
|
|
|
/// Required length of prefixes.
|
|
|
|
pub const PREFIX_LEN: usize = 12;
|
|
|
|
|
2016-02-18 03:46:34 +01:00
|
|
|
/// Write transaction. Batches a sequence of put/delete operations for efficiency.
|
2017-02-20 17:21:55 +01:00
|
|
|
#[derive(Default, Clone, PartialEq)]
|
2016-02-18 03:46:34 +01:00
|
|
|
pub struct DBTransaction {
|
2016-08-25 16:43:56 +02:00
|
|
|
ops: Vec<DBOp>,
|
2016-08-03 22:03:40 +02:00
|
|
|
}
|
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
#[derive(Clone, PartialEq)]
|
2016-08-03 22:03:40 +02:00
|
|
|
enum DBOp {
|
|
|
|
Insert {
|
|
|
|
col: Option<u32>,
|
|
|
|
key: ElasticArray32<u8>,
|
2016-10-26 13:53:47 +02:00
|
|
|
value: DBValue,
|
2016-08-03 22:03:40 +02:00
|
|
|
},
|
|
|
|
InsertCompressed {
|
|
|
|
col: Option<u32>,
|
|
|
|
key: ElasticArray32<u8>,
|
2016-10-26 13:53:47 +02:00
|
|
|
value: DBValue,
|
2016-08-03 22:03:40 +02:00
|
|
|
},
|
|
|
|
Delete {
|
|
|
|
col: Option<u32>,
|
|
|
|
key: ElasticArray32<u8>,
|
|
|
|
}
|
2016-03-11 10:57:58 +01:00
|
|
|
}
|
|
|
|
|
2016-02-18 03:46:34 +01:00
|
|
|
impl DBTransaction {
|
|
|
|
/// Create new transaction.
|
2017-02-20 17:21:55 +01:00
|
|
|
pub fn new() -> DBTransaction {
|
|
|
|
DBTransaction::with_capacity(256)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Create new transaction with capacity.
|
|
|
|
pub fn with_capacity(cap: usize) -> DBTransaction {
|
2016-07-28 23:46:24 +02:00
|
|
|
DBTransaction {
|
2017-02-20 17:21:55 +01:00
|
|
|
ops: Vec::with_capacity(cap)
|
2016-07-28 23:46:24 +02:00
|
|
|
}
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
2017-05-20 11:22:35 +02:00
|
|
|
/// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write.
|
2016-08-25 16:43:56 +02:00
|
|
|
pub fn put(&mut self, col: Option<u32>, key: &[u8], value: &[u8]) {
|
2016-08-03 22:03:40 +02:00
|
|
|
let mut ekey = ElasticArray32::new();
|
|
|
|
ekey.append_slice(key);
|
2016-08-25 16:43:56 +02:00
|
|
|
self.ops.push(DBOp::Insert {
|
2016-08-03 22:03:40 +02:00
|
|
|
col: col,
|
|
|
|
key: ekey,
|
2016-10-26 13:53:47 +02:00
|
|
|
value: DBValue::from_slice(value),
|
2016-08-03 22:03:40 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2017-05-20 11:22:35 +02:00
|
|
|
/// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write.
|
2016-08-25 16:43:56 +02:00
|
|
|
pub fn put_vec(&mut self, col: Option<u32>, key: &[u8], value: Bytes) {
|
2016-08-03 22:03:40 +02:00
|
|
|
let mut ekey = ElasticArray32::new();
|
|
|
|
ekey.append_slice(key);
|
2016-08-25 16:43:56 +02:00
|
|
|
self.ops.push(DBOp::Insert {
|
2016-08-03 22:03:40 +02:00
|
|
|
col: col,
|
|
|
|
key: ekey,
|
2016-10-26 13:53:47 +02:00
|
|
|
value: DBValue::from_vec(value),
|
2016-08-03 22:03:40 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2017-05-20 11:22:35 +02:00
|
|
|
/// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write.
|
2016-08-25 16:43:56 +02:00
|
|
|
/// Value will be RLP-compressed on flush
|
|
|
|
pub fn put_compressed(&mut self, col: Option<u32>, key: &[u8], value: Bytes) {
|
2016-08-03 22:03:40 +02:00
|
|
|
let mut ekey = ElasticArray32::new();
|
|
|
|
ekey.append_slice(key);
|
2016-08-25 16:43:56 +02:00
|
|
|
self.ops.push(DBOp::InsertCompressed {
|
2016-08-03 22:03:40 +02:00
|
|
|
col: col,
|
|
|
|
key: ekey,
|
2016-10-26 13:53:47 +02:00
|
|
|
value: DBValue::from_vec(value),
|
2016-08-03 22:03:40 +02:00
|
|
|
});
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Delete value by key.
|
2016-08-25 16:43:56 +02:00
|
|
|
pub fn delete(&mut self, col: Option<u32>, key: &[u8]) {
|
2016-08-03 22:03:40 +02:00
|
|
|
let mut ekey = ElasticArray32::new();
|
|
|
|
ekey.append_slice(key);
|
2016-08-25 16:43:56 +02:00
|
|
|
self.ops.push(DBOp::Delete {
|
2016-08-03 22:03:40 +02:00
|
|
|
col: col,
|
|
|
|
key: ekey,
|
|
|
|
});
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-10 20:49:26 +02:00
|
|
|
enum KeyState {
|
2016-10-26 13:53:47 +02:00
|
|
|
Insert(DBValue),
|
|
|
|
InsertCompressed(DBValue),
|
2016-08-10 20:49:26 +02:00
|
|
|
Delete,
|
2016-08-03 22:03:40 +02:00
|
|
|
}
|
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
/// Generic key-value database.
|
|
|
|
///
|
|
|
|
/// This makes a distinction between "buffered" and "flushed" values. Values which have been
|
|
|
|
/// written can always be read, but may be present in an in-memory buffer. Values which have
|
|
|
|
/// been flushed have been moved to backing storage, like a RocksDB instance. There are certain
|
|
|
|
/// operations which are only guaranteed to operate on flushed data and not buffered,
|
|
|
|
/// although implementations may differ in this regard.
|
|
|
|
///
|
|
|
|
/// The contents of an interior buffer may be explicitly flushed using the `flush` method.
|
|
|
|
///
|
|
|
|
/// The `KeyValueDB` also deals in "column families", which can be thought of as distinct
|
|
|
|
/// stores within a database. Keys written in one column family will not be accessible from
|
|
|
|
/// any other. The number of column families must be specified at initialization, with a
|
|
|
|
/// differing interface for each database. The `None` argument in place of a column index
|
|
|
|
/// is always supported.
|
|
|
|
///
|
|
|
|
/// The API laid out here, along with the `Sync` bound implies interior synchronization for
|
|
|
|
/// implementation.
|
|
|
|
pub trait KeyValueDB: Sync + Send {
|
|
|
|
/// Helper to create a new transaction.
|
|
|
|
fn transaction(&self) -> DBTransaction { DBTransaction::new() }
|
|
|
|
|
|
|
|
/// Get a value by key.
|
|
|
|
fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String>;
|
|
|
|
|
|
|
|
/// Get a value by partial key. Only works for flushed data.
|
|
|
|
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>>;
|
|
|
|
|
|
|
|
/// Write a transaction of changes to the buffer.
|
|
|
|
fn write_buffered(&self, transaction: DBTransaction);
|
|
|
|
|
|
|
|
/// Write a transaction of changes to the backing store.
|
|
|
|
fn write(&self, transaction: DBTransaction) -> Result<(), String> {
|
|
|
|
self.write_buffered(transaction);
|
|
|
|
self.flush()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Flush all buffered data.
|
|
|
|
fn flush(&self) -> Result<(), String>;
|
|
|
|
|
|
|
|
/// Iterate over flushed data for a given column.
|
|
|
|
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>;
|
|
|
|
|
2017-04-18 15:45:15 +02:00
|
|
|
/// Iterate over flushed data for a given column, starting from a given prefix.
|
|
|
|
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
|
|
|
|
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>;
|
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
/// Attempt to replace this database with a new one located at the given path.
|
|
|
|
fn restore(&self, new_db: &str) -> Result<(), UtilError>;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A key-value database fulfilling the `KeyValueDB` trait, living in memory.
|
|
|
|
/// This is generally intended for tests and is not particularly optimized.
|
|
|
|
pub struct InMemory {
|
|
|
|
columns: RwLock<HashMap<Option<u32>, BTreeMap<Vec<u8>, DBValue>>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Create an in-memory database with the given number of columns.
|
|
|
|
/// Columns will be indexable by 0..`num_cols`
|
|
|
|
pub fn in_memory(num_cols: u32) -> InMemory {
|
|
|
|
let mut cols = HashMap::new();
|
|
|
|
cols.insert(None, BTreeMap::new());
|
|
|
|
|
|
|
|
for idx in 0..num_cols {
|
|
|
|
cols.insert(Some(idx), BTreeMap::new());
|
|
|
|
}
|
|
|
|
|
|
|
|
InMemory {
|
|
|
|
columns: RwLock::new(cols)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl KeyValueDB for InMemory {
|
|
|
|
fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
|
|
|
|
let columns = self.columns.read();
|
|
|
|
match columns.get(&col) {
|
|
|
|
None => Err(format!("No such column family: {:?}", col)),
|
|
|
|
Some(map) => Ok(map.get(key).cloned()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
|
|
|
|
let columns = self.columns.read();
|
|
|
|
match columns.get(&col) {
|
|
|
|
None => None,
|
|
|
|
Some(map) =>
|
|
|
|
map.iter()
|
|
|
|
.find(|&(ref k ,_)| k.starts_with(prefix))
|
2017-06-28 14:16:53 +02:00
|
|
|
.map(|(_, v)| v.to_vec().into_boxed_slice())
|
2017-02-20 17:21:55 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn write_buffered(&self, transaction: DBTransaction) {
|
|
|
|
let mut columns = self.columns.write();
|
|
|
|
let ops = transaction.ops;
|
|
|
|
for op in ops {
|
|
|
|
match op {
|
|
|
|
DBOp::Insert { col, key, value } => {
|
|
|
|
if let Some(mut col) = columns.get_mut(&col) {
|
2017-06-28 14:16:53 +02:00
|
|
|
col.insert(key.into_vec(), value);
|
2017-02-20 17:21:55 +01:00
|
|
|
}
|
|
|
|
},
|
|
|
|
DBOp::InsertCompressed { col, key, value } => {
|
|
|
|
if let Some(mut col) = columns.get_mut(&col) {
|
|
|
|
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
|
|
|
|
let mut value = DBValue::new();
|
|
|
|
value.append_slice(&compressed);
|
2017-06-28 14:16:53 +02:00
|
|
|
col.insert(key.into_vec(), value);
|
2017-02-20 17:21:55 +01:00
|
|
|
}
|
|
|
|
},
|
|
|
|
DBOp::Delete { col, key } => {
|
|
|
|
if let Some(mut col) = columns.get_mut(&col) {
|
|
|
|
col.remove(&*key);
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn flush(&self) -> Result<(), String> { Ok(()) }
|
|
|
|
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a> {
|
|
|
|
match self.columns.read().get(&col) {
|
|
|
|
Some(map) => Box::new( // TODO: worth optimizing at all?
|
|
|
|
map.clone()
|
|
|
|
.into_iter()
|
2017-06-28 14:16:53 +02:00
|
|
|
.map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice()))
|
2017-02-20 17:21:55 +01:00
|
|
|
),
|
2017-04-18 15:45:15 +02:00
|
|
|
None => Box::new(None.into_iter()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
|
|
|
|
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>
|
|
|
|
{
|
|
|
|
match self.columns.read().get(&col) {
|
|
|
|
Some(map) => Box::new(
|
|
|
|
map.clone()
|
|
|
|
.into_iter()
|
|
|
|
.skip_while(move |&(ref k, _)| !k.starts_with(prefix))
|
2017-06-28 14:16:53 +02:00
|
|
|
.map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice()))
|
2017-04-18 15:45:15 +02:00
|
|
|
),
|
|
|
|
None => Box::new(None.into_iter()),
|
2017-02-20 17:21:55 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn restore(&self, _new_db: &str) -> Result<(), UtilError> {
|
|
|
|
Err(UtilError::SimpleString("Attempted to restore in-memory database".into()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-27 13:03:34 +02:00
|
|
|
/// Compaction profile for the database settings
|
2016-10-21 23:21:57 +02:00
|
|
|
#[derive(Clone, Copy, PartialEq, Debug)]
|
2016-06-27 13:03:34 +02:00
|
|
|
pub struct CompactionProfile {
|
|
|
|
/// L0-L1 target file size
|
|
|
|
pub initial_file_size: u64,
|
|
|
|
/// L2-LN target file size multiplier
|
|
|
|
pub file_size_multiplier: i32,
|
|
|
|
/// rate limiter for background flushes and compactions, bytes/sec, if any
|
|
|
|
pub write_rate_limit: Option<u64>,
|
|
|
|
}
|
|
|
|
|
2016-07-28 20:29:58 +02:00
|
|
|
impl Default for CompactionProfile {
|
2016-06-27 13:03:34 +02:00
|
|
|
/// Default profile suitable for most storage
|
2016-07-28 20:29:58 +02:00
|
|
|
fn default() -> CompactionProfile {
|
2016-10-21 23:21:57 +02:00
|
|
|
CompactionProfile::ssd()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Given output of df command return Linux rotational flag file path.
|
|
|
|
#[cfg(target_os = "linux")]
|
|
|
|
pub fn rotational_from_df_output(df_out: Vec<u8>) -> Option<PathBuf> {
|
2017-08-01 15:06:30 +02:00
|
|
|
use std::str;
|
2016-10-21 23:21:57 +02:00
|
|
|
str::from_utf8(df_out.as_slice())
|
|
|
|
.ok()
|
|
|
|
// Get the drive name.
|
|
|
|
.and_then(|df_str| Regex::new(r"/dev/(sd[:alpha:]{1,2})")
|
|
|
|
.ok()
|
|
|
|
.and_then(|re| re.captures(df_str))
|
2017-03-28 19:06:08 +02:00
|
|
|
.and_then(|captures| captures.get(1)))
|
2016-10-21 23:21:57 +02:00
|
|
|
// Generate path e.g. /sys/block/sda/queue/rotational
|
|
|
|
.map(|drive_path| {
|
|
|
|
let mut p = PathBuf::from("/sys/block");
|
2017-03-28 19:06:08 +02:00
|
|
|
p.push(drive_path.as_str());
|
2016-10-21 23:21:57 +02:00
|
|
|
p.push("queue/rotational");
|
|
|
|
p
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
impl CompactionProfile {
|
|
|
|
/// Attempt to determine the best profile automatically, only Linux for now.
|
|
|
|
#[cfg(target_os = "linux")]
|
|
|
|
pub fn auto(db_path: &Path) -> CompactionProfile {
|
2017-08-01 15:06:30 +02:00
|
|
|
use std::io::Read;
|
2016-10-21 23:21:57 +02:00
|
|
|
let hdd_check_file = db_path
|
|
|
|
.to_str()
|
|
|
|
.and_then(|path_str| Command::new("df").arg(path_str).output().ok())
|
|
|
|
.and_then(|df_res| match df_res.status.success() {
|
|
|
|
true => Some(df_res.stdout),
|
|
|
|
false => None,
|
|
|
|
})
|
|
|
|
.and_then(rotational_from_df_output);
|
|
|
|
// Read out the file and match compaction profile.
|
|
|
|
if let Some(hdd_check) = hdd_check_file {
|
|
|
|
if let Ok(mut file) = File::open(hdd_check.as_path()) {
|
|
|
|
let mut buffer = [0; 1];
|
|
|
|
if file.read_exact(&mut buffer).is_ok() {
|
|
|
|
// 0 means not rotational.
|
|
|
|
if buffer == [48] { return Self::ssd(); }
|
|
|
|
// 1 means rotational.
|
|
|
|
if buffer == [49] { return Self::hdd(); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Fallback if drive type was not determined.
|
|
|
|
Self::default()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Just default for other platforms.
|
|
|
|
#[cfg(not(target_os = "linux"))]
|
|
|
|
pub fn auto(_db_path: &Path) -> CompactionProfile {
|
|
|
|
Self::default()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Default profile suitable for SSD storage
|
|
|
|
pub fn ssd() -> CompactionProfile {
|
2016-06-27 13:03:34 +02:00
|
|
|
CompactionProfile {
|
|
|
|
initial_file_size: 32 * 1024 * 1024,
|
|
|
|
file_size_multiplier: 2,
|
|
|
|
write_rate_limit: None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-21 23:21:57 +02:00
|
|
|
/// Slow HDD compaction profile
|
2016-06-27 13:58:12 +02:00
|
|
|
pub fn hdd() -> CompactionProfile {
|
2016-06-27 13:03:34 +02:00
|
|
|
CompactionProfile {
|
|
|
|
initial_file_size: 192 * 1024 * 1024,
|
|
|
|
file_size_multiplier: 1,
|
2016-06-27 13:14:40 +02:00
|
|
|
write_rate_limit: Some(8 * 1024 * 1024),
|
2016-06-27 13:03:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-18 03:46:34 +01:00
|
|
|
/// Database configuration
|
2016-09-27 18:16:32 +02:00
|
|
|
#[derive(Clone)]
|
2016-02-18 03:46:34 +01:00
|
|
|
pub struct DatabaseConfig {
|
2016-05-27 08:23:29 +02:00
|
|
|
/// Max number of open files.
|
|
|
|
pub max_open_files: i32,
|
2016-09-27 18:16:32 +02:00
|
|
|
/// Cache sizes (in MiB) for specific columns.
|
|
|
|
pub cache_sizes: HashMap<Option<u32>, usize>,
|
2016-06-27 13:03:34 +02:00
|
|
|
/// Compaction profile
|
|
|
|
pub compaction: CompactionProfile,
|
2016-07-28 23:46:24 +02:00
|
|
|
/// Set number of columns
|
|
|
|
pub columns: Option<u32>,
|
2016-07-29 15:36:00 +02:00
|
|
|
/// Should we keep WAL enabled?
|
|
|
|
pub wal: bool,
|
2016-06-20 12:42:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl DatabaseConfig {
|
2016-07-28 23:46:24 +02:00
|
|
|
/// Create new `DatabaseConfig` with default parameters and specified set of columns.
|
2016-09-27 18:16:32 +02:00
|
|
|
/// Note that cache sizes must be explicitly set.
|
2016-07-28 23:46:24 +02:00
|
|
|
pub fn with_columns(columns: Option<u32>) -> Self {
|
|
|
|
let mut config = Self::default();
|
|
|
|
config.columns = columns;
|
|
|
|
config
|
2016-06-27 13:03:34 +02:00
|
|
|
}
|
2016-09-27 18:16:32 +02:00
|
|
|
|
|
|
|
/// Set the column cache size in MiB.
|
|
|
|
pub fn set_cache(&mut self, col: Option<u32>, size: usize) {
|
|
|
|
self.cache_sizes.insert(col, size);
|
|
|
|
}
|
2016-06-20 12:42:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for DatabaseConfig {
|
|
|
|
fn default() -> DatabaseConfig {
|
|
|
|
DatabaseConfig {
|
2016-09-27 18:16:32 +02:00
|
|
|
cache_sizes: HashMap::new(),
|
2016-08-08 17:18:29 +02:00
|
|
|
max_open_files: 512,
|
2016-06-27 13:14:40 +02:00
|
|
|
compaction: CompactionProfile::default(),
|
2016-07-28 23:46:24 +02:00
|
|
|
columns: None,
|
2016-07-29 15:36:00 +02:00
|
|
|
wal: true,
|
2016-06-20 12:42:04 +02:00
|
|
|
}
|
|
|
|
}
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
/// Database iterator (for flushed data only)
|
|
|
|
// The compromise of holding only a virtual borrow vs. holding a lock on the
|
|
|
|
// inner DB (to prevent closing via restoration) may be re-evaluated in the future.
|
|
|
|
//
|
|
|
|
pub struct DatabaseIterator<'a> {
|
2016-05-19 14:36:15 +02:00
|
|
|
iter: DBIterator,
|
2017-02-20 17:21:55 +01:00
|
|
|
_marker: PhantomData<&'a Database>,
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
impl<'a> Iterator for DatabaseIterator<'a> {
|
2016-02-21 16:58:56 +01:00
|
|
|
type Item = (Box<[u8]>, Box<[u8]>);
|
2016-02-18 03:46:34 +01:00
|
|
|
|
2016-03-07 14:33:00 +01:00
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
2016-02-18 03:46:34 +01:00
|
|
|
self.iter.next()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-06 15:31:13 +02:00
|
|
|
struct DBAndColumns {
|
|
|
|
db: DB,
|
|
|
|
cfs: Vec<Column>,
|
|
|
|
}
|
|
|
|
|
2017-02-26 18:29:35 +01:00
|
|
|
// get column family configuration from database config.
|
|
|
|
fn col_config(col: u32, config: &DatabaseConfig) -> Options {
|
|
|
|
// default cache size for columns not specified.
|
|
|
|
const DEFAULT_CACHE: usize = 2;
|
|
|
|
|
|
|
|
let mut opts = Options::new();
|
|
|
|
opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction);
|
|
|
|
opts.set_target_file_size_base(config.compaction.initial_file_size);
|
|
|
|
opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier);
|
2017-05-02 11:40:03 +02:00
|
|
|
opts.set_db_write_buffer_size(DB_WRITE_BUFFER_SIZE);
|
2017-02-26 18:29:35 +01:00
|
|
|
|
|
|
|
let col_opt = config.columns.map(|_| col);
|
|
|
|
|
|
|
|
{
|
|
|
|
let cache_size = config.cache_sizes.get(&col_opt).cloned().unwrap_or(DEFAULT_CACHE);
|
|
|
|
let mut block_opts = BlockBasedOptions::new();
|
|
|
|
// all goes to read cache.
|
|
|
|
block_opts.set_cache(Cache::new(cache_size * 1024 * 1024));
|
|
|
|
opts.set_block_based_table_factory(&block_opts);
|
|
|
|
}
|
|
|
|
|
|
|
|
opts
|
|
|
|
}
|
|
|
|
|
2016-02-18 03:46:34 +01:00
|
|
|
/// Key-Value database.
|
|
|
|
pub struct Database {
|
2016-09-06 15:31:13 +02:00
|
|
|
db: RwLock<Option<DBAndColumns>>,
|
|
|
|
config: DatabaseConfig,
|
2016-07-13 19:05:06 +02:00
|
|
|
write_opts: WriteOptions,
|
2016-10-14 14:44:11 +02:00
|
|
|
read_opts: ReadOptions,
|
2016-09-06 15:31:13 +02:00
|
|
|
path: String,
|
2016-10-20 14:49:12 +02:00
|
|
|
// Dirty values added with `write_buffered`. Cleaned on `flush`.
|
|
|
|
overlay: RwLock<Vec<HashMap<ElasticArray32<u8>, KeyState>>>,
|
|
|
|
// Values currently being flushed. Cleared when `flush` completes.
|
|
|
|
flushing: RwLock<Vec<HashMap<ElasticArray32<u8>, KeyState>>>,
|
|
|
|
// Prevents concurrent flushes.
|
2016-10-24 18:32:06 +02:00
|
|
|
// Value indicates if a flush is in progress.
|
|
|
|
flushing_lock: Mutex<bool>,
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Database {
|
|
|
|
/// Open database with default settings.
|
|
|
|
pub fn open_default(path: &str) -> Result<Database, String> {
|
2016-06-20 12:42:04 +02:00
|
|
|
Database::open(&DatabaseConfig::default(), path)
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Open database file. Creates if it does not exist.
|
2016-02-18 21:15:56 +01:00
|
|
|
pub fn open(config: &DatabaseConfig, path: &str) -> Result<Database, String> {
|
2016-02-18 03:46:34 +01:00
|
|
|
let mut opts = Options::new();
|
2016-06-27 13:14:40 +02:00
|
|
|
if let Some(rate_limit) = config.compaction.write_rate_limit {
|
2016-12-27 12:53:56 +01:00
|
|
|
opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit))?;
|
2016-06-27 13:14:40 +02:00
|
|
|
}
|
2016-12-27 12:53:56 +01:00
|
|
|
opts.set_parsed_options(&format!("max_total_wal_size={}", 64 * 1024 * 1024))?;
|
|
|
|
opts.set_parsed_options("verify_checksums_in_compaction=0")?;
|
2017-08-21 15:33:31 +02:00
|
|
|
opts.set_parsed_options("keep_log_file_num=1")?;
|
2016-05-27 08:23:29 +02:00
|
|
|
opts.set_max_open_files(config.max_open_files);
|
2016-02-18 03:46:34 +01:00
|
|
|
opts.create_if_missing(true);
|
|
|
|
opts.set_use_fsync(false);
|
2017-05-02 11:40:03 +02:00
|
|
|
opts.set_db_write_buffer_size(DB_WRITE_BUFFER_SIZE);
|
2016-06-27 13:14:40 +02:00
|
|
|
|
2016-08-18 18:03:40 +02:00
|
|
|
opts.set_max_background_flushes(DB_BACKGROUND_FLUSHES);
|
|
|
|
opts.set_max_background_compactions(DB_BACKGROUND_COMPACTIONS);
|
|
|
|
|
2016-06-27 13:14:40 +02:00
|
|
|
// compaction settings
|
2016-02-18 03:46:34 +01:00
|
|
|
opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction);
|
2016-06-27 13:14:40 +02:00
|
|
|
opts.set_target_file_size_base(config.compaction.initial_file_size);
|
|
|
|
opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier);
|
|
|
|
|
2016-08-18 18:03:40 +02:00
|
|
|
let mut cf_options = Vec::with_capacity(config.columns.unwrap_or(0) as usize);
|
2016-10-31 18:21:44 +01:00
|
|
|
let cfnames: Vec<_> = (0..config.columns.unwrap_or(0)).map(|c| format!("col{}", c)).collect();
|
|
|
|
let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect();
|
2016-08-18 18:03:40 +02:00
|
|
|
|
2016-09-27 18:16:32 +02:00
|
|
|
for col in 0 .. config.columns.unwrap_or(0) {
|
2017-02-26 18:29:35 +01:00
|
|
|
cf_options.push(col_config(col, &config));
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
2016-07-13 19:05:06 +02:00
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
let mut write_opts = WriteOptions::new();
|
2016-07-29 15:36:00 +02:00
|
|
|
if !config.wal {
|
|
|
|
write_opts.disable_wal(true);
|
|
|
|
}
|
2016-10-14 14:44:11 +02:00
|
|
|
let mut read_opts = ReadOptions::new();
|
|
|
|
read_opts.set_verify_checksums(false);
|
2016-07-28 23:46:24 +02:00
|
|
|
|
|
|
|
let mut cfs: Vec<Column> = Vec::new();
|
|
|
|
let db = match config.columns {
|
|
|
|
Some(columns) => {
|
2016-08-18 18:03:40 +02:00
|
|
|
match DB::open_cf(&opts, path, &cfnames, &cf_options) {
|
2016-07-28 23:46:24 +02:00
|
|
|
Ok(db) => {
|
2016-10-25 22:34:52 +02:00
|
|
|
cfs = cfnames.iter().map(|n| db.cf_handle(n)
|
|
|
|
.expect("rocksdb opens a cf_handle for each cfname; qed")).collect();
|
2016-07-28 23:46:24 +02:00
|
|
|
assert!(cfs.len() == columns as usize);
|
|
|
|
Ok(db)
|
|
|
|
}
|
|
|
|
Err(_) => {
|
|
|
|
// retry and create CFs
|
2016-08-18 18:03:40 +02:00
|
|
|
match DB::open_cf(&opts, path, &[], &[]) {
|
2016-07-28 23:46:24 +02:00
|
|
|
Ok(mut db) => {
|
2016-12-27 12:53:56 +01:00
|
|
|
cfs = cfnames.iter().enumerate().map(|(i, n)| db.create_cf(n, &cf_options[i])).collect::<Result<_, _>>()?;
|
2016-07-28 23:46:24 +02:00
|
|
|
Ok(db)
|
|
|
|
},
|
|
|
|
err @ Err(_) => err,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
None => DB::open(&opts, path)
|
|
|
|
};
|
2016-10-31 18:21:44 +01:00
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = match db {
|
2016-06-27 18:46:50 +02:00
|
|
|
Ok(db) => db,
|
|
|
|
Err(ref s) if s.starts_with("Corruption:") => {
|
|
|
|
info!("{}", s);
|
|
|
|
info!("Attempting DB repair for {}", path);
|
2016-12-27 12:53:56 +01:00
|
|
|
DB::repair(&opts, path)?;
|
2016-10-31 18:21:44 +01:00
|
|
|
|
|
|
|
match cfnames.is_empty() {
|
2016-12-27 12:53:56 +01:00
|
|
|
true => DB::open(&opts, path)?,
|
|
|
|
false => DB::open_cf(&opts, path, &cfnames, &cf_options)?
|
2016-10-31 18:21:44 +01:00
|
|
|
}
|
2016-06-27 18:46:50 +02:00
|
|
|
},
|
|
|
|
Err(s) => { return Err(s); }
|
|
|
|
};
|
2016-09-06 15:31:13 +02:00
|
|
|
let num_cols = cfs.len();
|
2016-08-03 22:03:40 +02:00
|
|
|
Ok(Database {
|
2016-09-06 15:31:13 +02:00
|
|
|
db: RwLock::new(Some(DBAndColumns{ db: db, cfs: cfs })),
|
|
|
|
config: config.clone(),
|
2016-08-03 22:03:40 +02:00
|
|
|
write_opts: write_opts,
|
2016-09-06 15:31:13 +02:00
|
|
|
overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()),
|
2016-10-20 14:49:12 +02:00
|
|
|
flushing: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()),
|
2016-10-24 18:32:06 +02:00
|
|
|
flushing_lock: Mutex::new((false)),
|
2016-09-06 15:31:13 +02:00
|
|
|
path: path.to_owned(),
|
2016-10-14 14:44:11 +02:00
|
|
|
read_opts: read_opts,
|
2016-08-03 22:03:40 +02:00
|
|
|
})
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
/// Helper to create new transaction for this database.
|
2016-07-28 23:46:24 +02:00
|
|
|
pub fn transaction(&self) -> DBTransaction {
|
2017-02-20 17:21:55 +01:00
|
|
|
DBTransaction::new()
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
2016-08-03 22:03:40 +02:00
|
|
|
|
2016-08-04 23:54:26 +02:00
|
|
|
fn to_overlay_column(col: Option<u32>) -> usize {
|
2016-08-03 22:03:40 +02:00
|
|
|
col.map_or(0, |c| (c + 1) as usize)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Commit transaction to database.
|
2016-08-18 09:43:56 +02:00
|
|
|
pub fn write_buffered(&self, tr: DBTransaction) {
|
2016-08-03 22:03:40 +02:00
|
|
|
let mut overlay = self.overlay.write();
|
2016-08-25 16:43:56 +02:00
|
|
|
let ops = tr.ops;
|
2016-08-03 22:03:40 +02:00
|
|
|
for op in ops {
|
|
|
|
match op {
|
|
|
|
DBOp::Insert { col, key, value } => {
|
2016-08-04 23:54:26 +02:00
|
|
|
let c = Self::to_overlay_column(col);
|
2016-08-10 20:49:26 +02:00
|
|
|
overlay[c].insert(key, KeyState::Insert(value));
|
2016-08-03 22:03:40 +02:00
|
|
|
},
|
|
|
|
DBOp::InsertCompressed { col, key, value } => {
|
2016-08-04 23:54:26 +02:00
|
|
|
let c = Self::to_overlay_column(col);
|
2016-08-10 20:49:26 +02:00
|
|
|
overlay[c].insert(key, KeyState::InsertCompressed(value));
|
2016-08-03 22:03:40 +02:00
|
|
|
},
|
|
|
|
DBOp::Delete { col, key } => {
|
2016-08-04 23:54:26 +02:00
|
|
|
let c = Self::to_overlay_column(col);
|
2016-08-10 20:49:26 +02:00
|
|
|
overlay[c].insert(key, KeyState::Delete);
|
2016-08-03 22:03:40 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2016-10-24 18:32:06 +02:00
|
|
|
/// Commit buffered changes to database. Must be called under `flush_lock`
|
|
|
|
fn write_flushing_with_lock(&self, _lock: &mut MutexGuard<bool>) -> Result<(), String> {
|
2016-09-16 23:03:26 +02:00
|
|
|
match *self.db.read() {
|
|
|
|
Some(DBAndColumns { ref db, ref cfs }) => {
|
2016-09-06 15:31:13 +02:00
|
|
|
let batch = WriteBatch::new();
|
2016-10-20 14:49:12 +02:00
|
|
|
mem::swap(&mut *self.overlay.write(), &mut *self.flushing.write());
|
|
|
|
{
|
|
|
|
for (c, column) in self.flushing.read().iter().enumerate() {
|
|
|
|
for (ref key, ref state) in column.iter() {
|
|
|
|
match **state {
|
|
|
|
KeyState::Delete => {
|
|
|
|
if c > 0 {
|
2016-12-27 12:53:56 +01:00
|
|
|
batch.delete_cf(cfs[c - 1], &key)?;
|
2016-10-20 14:49:12 +02:00
|
|
|
} else {
|
2016-12-27 12:53:56 +01:00
|
|
|
batch.delete(&key)?;
|
2016-10-20 14:49:12 +02:00
|
|
|
}
|
|
|
|
},
|
|
|
|
KeyState::Insert(ref value) => {
|
|
|
|
if c > 0 {
|
2016-12-27 12:53:56 +01:00
|
|
|
batch.put_cf(cfs[c - 1], &key, value)?;
|
2016-10-20 14:49:12 +02:00
|
|
|
} else {
|
2016-12-27 12:53:56 +01:00
|
|
|
batch.put(&key, &value)?;
|
2016-10-20 14:49:12 +02:00
|
|
|
}
|
|
|
|
},
|
|
|
|
KeyState::InsertCompressed(ref value) => {
|
|
|
|
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
|
|
|
|
if c > 0 {
|
2016-12-27 12:53:56 +01:00
|
|
|
batch.put_cf(cfs[c - 1], &key, &compressed)?;
|
2016-10-20 14:49:12 +02:00
|
|
|
} else {
|
2016-12-27 12:53:56 +01:00
|
|
|
batch.put(&key, &value)?;
|
2016-10-20 14:49:12 +02:00
|
|
|
}
|
2016-09-06 15:31:13 +02:00
|
|
|
}
|
|
|
|
}
|
2016-08-10 20:49:26 +02:00
|
|
|
}
|
|
|
|
}
|
2016-08-03 22:03:40 +02:00
|
|
|
}
|
2016-12-27 12:53:56 +01:00
|
|
|
db.write_opt(batch, &self.write_opts)?;
|
2016-10-20 14:49:12 +02:00
|
|
|
for column in self.flushing.write().iter_mut() {
|
|
|
|
column.clear();
|
2016-11-12 12:07:02 +01:00
|
|
|
column.shrink_to_fit();
|
2016-10-20 14:49:12 +02:00
|
|
|
}
|
|
|
|
Ok(())
|
2016-09-06 15:31:13 +02:00
|
|
|
},
|
2016-09-16 23:03:26 +02:00
|
|
|
None => Err("Database is closed".to_owned())
|
2016-08-03 22:03:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-24 18:32:06 +02:00
|
|
|
/// Commit buffered changes to database.
|
|
|
|
pub fn flush(&self) -> Result<(), String> {
|
|
|
|
let mut lock = self.flushing_lock.lock();
|
|
|
|
// If RocksDB batch allocation fails the thread gets terminated and the lock is released.
|
|
|
|
// The value inside the lock is used to detect that.
|
|
|
|
if *lock {
|
|
|
|
// This can only happen if another flushing thread is terminated unexpectedly.
|
|
|
|
return Err("Database write failure. Running low on memory perhaps?".to_owned());
|
|
|
|
}
|
|
|
|
*lock = true;
|
|
|
|
let result = self.write_flushing_with_lock(&mut lock);
|
|
|
|
*lock = false;
|
|
|
|
result
|
|
|
|
}
|
2016-08-03 22:03:40 +02:00
|
|
|
|
2016-02-18 03:46:34 +01:00
|
|
|
/// Commit transaction to database.
|
2016-02-21 16:58:56 +01:00
|
|
|
pub fn write(&self, tr: DBTransaction) -> Result<(), String> {
|
2016-09-16 23:03:26 +02:00
|
|
|
match *self.db.read() {
|
|
|
|
Some(DBAndColumns { ref db, ref cfs }) => {
|
2016-09-06 15:31:13 +02:00
|
|
|
let batch = WriteBatch::new();
|
|
|
|
let ops = tr.ops;
|
|
|
|
for op in ops {
|
|
|
|
match op {
|
|
|
|
DBOp::Insert { col, key, value } => {
|
2016-12-27 12:53:56 +01:00
|
|
|
col.map_or_else(|| batch.put(&key, &value), |c| batch.put_cf(cfs[c as usize], &key, &value))?
|
2016-09-06 15:31:13 +02:00
|
|
|
},
|
|
|
|
DBOp::InsertCompressed { col, key, value } => {
|
|
|
|
let compressed = UntrustedRlp::new(&value).compress(RlpType::Blocks);
|
2016-12-27 12:53:56 +01:00
|
|
|
col.map_or_else(|| batch.put(&key, &compressed), |c| batch.put_cf(cfs[c as usize], &key, &compressed))?
|
2016-09-06 15:31:13 +02:00
|
|
|
},
|
|
|
|
DBOp::Delete { col, key } => {
|
2016-12-27 12:53:56 +01:00
|
|
|
col.map_or_else(|| batch.delete(&key), |c| batch.delete_cf(cfs[c as usize], &key))?
|
2016-09-06 15:31:13 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
db.write_opt(batch, &self.write_opts)
|
|
|
|
},
|
2016-09-16 23:03:26 +02:00
|
|
|
None => Err("Database is closed".to_owned())
|
2016-08-03 22:03:40 +02:00
|
|
|
}
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
2016-02-21 13:19:08 +01:00
|
|
|
|
2016-02-18 03:46:34 +01:00
|
|
|
/// Get value by key.
|
2016-10-26 13:53:47 +02:00
|
|
|
pub fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
|
2016-09-16 23:03:26 +02:00
|
|
|
match *self.db.read() {
|
|
|
|
Some(DBAndColumns { ref db, ref cfs }) => {
|
2016-09-06 15:31:13 +02:00
|
|
|
let overlay = &self.overlay.read()[Self::to_overlay_column(col)];
|
|
|
|
match overlay.get(key) {
|
|
|
|
Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())),
|
|
|
|
Some(&KeyState::Delete) => Ok(None),
|
|
|
|
None => {
|
2016-10-20 14:49:12 +02:00
|
|
|
let flushing = &self.flushing.read()[Self::to_overlay_column(col)];
|
|
|
|
match flushing.get(key) {
|
|
|
|
Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())),
|
|
|
|
Some(&KeyState::Delete) => Ok(None),
|
|
|
|
None => {
|
|
|
|
col.map_or_else(
|
2016-10-26 13:53:47 +02:00
|
|
|
|| db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))),
|
|
|
|
|c| db.get_cf_opt(cfs[c as usize], key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))))
|
2016-10-20 14:49:12 +02:00
|
|
|
},
|
|
|
|
}
|
2016-09-06 15:31:13 +02:00
|
|
|
},
|
|
|
|
}
|
2016-08-10 20:49:26 +02:00
|
|
|
},
|
2016-09-16 23:03:26 +02:00
|
|
|
None => Ok(None),
|
2016-08-10 20:49:26 +02:00
|
|
|
}
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
2016-08-10 20:49:26 +02:00
|
|
|
/// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values.
|
2016-09-06 15:31:13 +02:00
|
|
|
// TODO: support prefix seek for unflushed data
|
2016-07-28 23:46:24 +02:00
|
|
|
pub fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
|
2017-04-18 15:45:15 +02:00
|
|
|
self.iter_from_prefix(col, prefix).and_then(|mut iter| {
|
|
|
|
match iter.next() {
|
|
|
|
// TODO: use prefix_same_as_start read option (not availabele in C API currently)
|
|
|
|
Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None },
|
|
|
|
_ => None
|
|
|
|
}
|
|
|
|
})
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
2016-08-10 20:49:26 +02:00
|
|
|
/// Get database iterator for flushed data.
|
2017-04-18 15:45:15 +02:00
|
|
|
pub fn iter(&self, col: Option<u32>) -> Option<DatabaseIterator> {
|
2016-08-10 20:49:26 +02:00
|
|
|
//TODO: iterate over overlay
|
2016-09-16 23:03:26 +02:00
|
|
|
match *self.db.read() {
|
|
|
|
Some(DBAndColumns { ref db, ref cfs }) => {
|
2017-02-20 17:21:55 +01:00
|
|
|
let iter = col.map_or_else(
|
|
|
|
|| db.iterator_opt(IteratorMode::Start, &self.read_opts),
|
|
|
|
|c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::Start, &self.read_opts)
|
|
|
|
.expect("iterator params are valid; qed")
|
|
|
|
);
|
|
|
|
|
2017-04-18 15:45:15 +02:00
|
|
|
Some(DatabaseIterator {
|
2017-02-20 17:21:55 +01:00
|
|
|
iter: iter,
|
|
|
|
_marker: PhantomData,
|
2017-04-18 15:45:15 +02:00
|
|
|
})
|
|
|
|
},
|
|
|
|
None => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn iter_from_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<DatabaseIterator> {
|
|
|
|
match *self.db.read() {
|
|
|
|
Some(DBAndColumns { ref db, ref cfs }) => {
|
|
|
|
let iter = col.map_or_else(|| db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts),
|
|
|
|
|c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward), &self.read_opts)
|
|
|
|
.expect("iterator params are valid; qed"));
|
|
|
|
|
|
|
|
Some(DatabaseIterator {
|
|
|
|
iter: iter,
|
|
|
|
_marker: PhantomData,
|
|
|
|
})
|
2016-09-06 15:31:13 +02:00
|
|
|
},
|
2017-04-18 15:45:15 +02:00
|
|
|
None => None,
|
2016-09-06 15:31:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Close the database
|
|
|
|
fn close(&self) {
|
|
|
|
*self.db.write() = None;
|
|
|
|
self.overlay.write().clear();
|
2016-10-20 14:49:12 +02:00
|
|
|
self.flushing.write().clear();
|
2016-09-06 15:31:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Restore the database from a copy at given path.
|
|
|
|
pub fn restore(&self, new_db: &str) -> Result<(), UtilError> {
|
|
|
|
self.close();
|
|
|
|
|
|
|
|
let mut backup_db = PathBuf::from(&self.path);
|
|
|
|
backup_db.pop();
|
|
|
|
backup_db.push("backup_db");
|
|
|
|
|
|
|
|
let existed = match fs::rename(&self.path, &backup_db) {
|
|
|
|
Ok(_) => true,
|
|
|
|
Err(e) => if let ErrorKind::NotFound = e.kind() {
|
|
|
|
false
|
|
|
|
} else {
|
|
|
|
return Err(e.into());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
match fs::rename(&new_db, &self.path) {
|
|
|
|
Ok(_) => {
|
|
|
|
// clean up the backup.
|
|
|
|
if existed {
|
2016-12-27 12:53:56 +01:00
|
|
|
fs::remove_dir_all(&backup_db)?;
|
2016-09-06 15:31:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
// restore the backup.
|
|
|
|
if existed {
|
2016-12-27 12:53:56 +01:00
|
|
|
fs::rename(&backup_db, &self.path)?;
|
2016-09-06 15:31:13 +02:00
|
|
|
}
|
|
|
|
return Err(e.into())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// reopen the database and steal handles into self
|
2016-12-27 12:53:56 +01:00
|
|
|
let db = Self::open(&self.config, &self.path)?;
|
2016-09-06 15:31:13 +02:00
|
|
|
*self.db.write() = mem::replace(&mut *db.db.write(), None);
|
|
|
|
*self.overlay.write() = mem::replace(&mut *db.overlay.write(), Vec::new());
|
2016-10-20 14:49:12 +02:00
|
|
|
*self.flushing.write() = mem::replace(&mut *db.flushing.write(), Vec::new());
|
2016-09-06 15:31:13 +02:00
|
|
|
Ok(())
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
2017-02-26 18:29:35 +01:00
|
|
|
|
|
|
|
/// The number of non-default column families.
|
|
|
|
pub fn num_columns(&self) -> u32 {
|
|
|
|
self.db.read().as_ref()
|
|
|
|
.and_then(|db| if db.cfs.is_empty() { None } else { Some(db.cfs.len()) } )
|
|
|
|
.map(|n| n as u32)
|
|
|
|
.unwrap_or(0)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Drop a column family.
|
|
|
|
pub fn drop_column(&self) -> Result<(), String> {
|
|
|
|
match *self.db.write() {
|
|
|
|
Some(DBAndColumns { ref mut db, ref mut cfs }) => {
|
|
|
|
if let Some(col) = cfs.pop() {
|
|
|
|
let name = format!("col{}", cfs.len());
|
|
|
|
drop(col);
|
|
|
|
db.drop_cf(&name)?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
},
|
|
|
|
None => Ok(()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Add a column family.
|
|
|
|
pub fn add_column(&self) -> Result<(), String> {
|
|
|
|
match *self.db.write() {
|
|
|
|
Some(DBAndColumns { ref mut db, ref mut cfs }) => {
|
|
|
|
let col = cfs.len() as u32;
|
|
|
|
let name = format!("col{}", col);
|
|
|
|
cfs.push(db.create_cf(&name, &col_config(col, &self.config))?);
|
|
|
|
Ok(())
|
|
|
|
},
|
|
|
|
None => Ok(()),
|
|
|
|
}
|
|
|
|
}
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
// duplicate declaration of methods here to avoid trait import in certain existing cases
|
|
|
|
// at time of addition.
|
|
|
|
impl KeyValueDB for Database {
|
|
|
|
fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String> {
|
|
|
|
Database::get(self, col, key)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
|
|
|
|
Database::get_by_prefix(self, col, prefix)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn write_buffered(&self, transaction: DBTransaction) {
|
|
|
|
Database::write_buffered(self, transaction)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn write(&self, transaction: DBTransaction) -> Result<(), String> {
|
|
|
|
Database::write(self, transaction)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn flush(&self) -> Result<(), String> {
|
|
|
|
Database::flush(self)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a> {
|
|
|
|
let unboxed = Database::iter(self, col);
|
2017-04-18 15:45:15 +02:00
|
|
|
Box::new(unboxed.into_iter().flat_map(|inner| inner))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
|
|
|
|
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>
|
|
|
|
{
|
|
|
|
let unboxed = Database::iter_from_prefix(self, col, prefix);
|
|
|
|
Box::new(unboxed.into_iter().flat_map(|inner| inner))
|
2017-02-20 17:21:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn restore(&self, new_db: &str) -> Result<(), UtilError> {
|
|
|
|
Database::restore(self, new_db)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-22 15:22:34 +02:00
|
|
|
impl Drop for Database {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
// write all buffered changes if we can.
|
|
|
|
let _ = self.flush();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-18 21:15:56 +01:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2016-11-28 17:05:37 +01:00
|
|
|
use hash::H256;
|
2016-02-18 21:15:56 +01:00
|
|
|
use super::*;
|
2016-02-21 13:19:08 +01:00
|
|
|
use devtools::*;
|
2016-02-18 21:15:56 +01:00
|
|
|
use std::str::FromStr;
|
|
|
|
|
|
|
|
fn test_db(config: &DatabaseConfig) {
|
|
|
|
let path = RandomTempPath::create_dir();
|
|
|
|
let db = Database::open(config, path.as_path().to_str().unwrap()).unwrap();
|
|
|
|
let key1 = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
|
|
|
|
let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
|
|
|
|
let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
|
|
|
|
|
2016-08-25 16:43:56 +02:00
|
|
|
let mut batch = db.transaction();
|
2016-08-18 09:43:56 +02:00
|
|
|
batch.put(None, &key1, b"cat");
|
|
|
|
batch.put(None, &key2, b"dog");
|
2016-07-28 23:46:24 +02:00
|
|
|
db.write(batch).unwrap();
|
2016-02-18 21:15:56 +01:00
|
|
|
|
2016-08-10 16:29:40 +02:00
|
|
|
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"cat");
|
2016-02-18 21:15:56 +01:00
|
|
|
|
2017-04-20 15:04:07 +02:00
|
|
|
let contents: Vec<_> = db.iter(None).into_iter().flat_map(|inner| inner).collect();
|
2016-02-18 21:15:56 +01:00
|
|
|
assert_eq!(contents.len(), 2);
|
2016-08-10 16:29:40 +02:00
|
|
|
assert_eq!(&*contents[0].0, &*key1);
|
2016-02-18 21:15:56 +01:00
|
|
|
assert_eq!(&*contents[0].1, b"cat");
|
2016-08-10 16:29:40 +02:00
|
|
|
assert_eq!(&*contents[1].0, &*key2);
|
2016-02-18 21:15:56 +01:00
|
|
|
assert_eq!(&*contents[1].1, b"dog");
|
|
|
|
|
2016-08-25 16:43:56 +02:00
|
|
|
let mut batch = db.transaction();
|
2016-08-18 09:43:56 +02:00
|
|
|
batch.delete(None, &key1);
|
2016-07-28 23:46:24 +02:00
|
|
|
db.write(batch).unwrap();
|
|
|
|
|
|
|
|
assert!(db.get(None, &key1).unwrap().is_none());
|
|
|
|
|
2016-08-25 16:43:56 +02:00
|
|
|
let mut batch = db.transaction();
|
2016-08-18 09:43:56 +02:00
|
|
|
batch.put(None, &key1, b"cat");
|
2016-07-28 23:46:24 +02:00
|
|
|
db.write(batch).unwrap();
|
2016-02-18 21:15:56 +01:00
|
|
|
|
2016-08-25 16:43:56 +02:00
|
|
|
let mut transaction = db.transaction();
|
2016-08-18 09:43:56 +02:00
|
|
|
transaction.put(None, &key3, b"elephant");
|
|
|
|
transaction.delete(None, &key1);
|
2016-02-18 21:15:56 +01:00
|
|
|
db.write(transaction).unwrap();
|
2016-07-28 23:46:24 +02:00
|
|
|
assert!(db.get(None, &key1).unwrap().is_none());
|
2016-08-10 16:29:40 +02:00
|
|
|
assert_eq!(&*db.get(None, &key3).unwrap().unwrap(), b"elephant");
|
2016-02-21 13:19:08 +01:00
|
|
|
|
2016-08-10 16:29:40 +02:00
|
|
|
assert_eq!(&*db.get_by_prefix(None, &key3).unwrap(), b"elephant");
|
|
|
|
assert_eq!(&*db.get_by_prefix(None, &key2).unwrap(), b"dog");
|
2016-08-10 20:49:26 +02:00
|
|
|
|
2016-08-25 16:43:56 +02:00
|
|
|
let mut transaction = db.transaction();
|
2016-08-18 09:43:56 +02:00
|
|
|
transaction.put(None, &key1, b"horse");
|
|
|
|
transaction.delete(None, &key3);
|
|
|
|
db.write_buffered(transaction);
|
2016-08-10 20:49:26 +02:00
|
|
|
assert!(db.get(None, &key3).unwrap().is_none());
|
|
|
|
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
|
|
|
|
|
|
|
|
db.flush().unwrap();
|
|
|
|
assert!(db.get(None, &key3).unwrap().is_none());
|
|
|
|
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
|
2016-02-18 21:15:56 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn kvdb() {
|
|
|
|
let path = RandomTempPath::create_dir();
|
2016-08-10 20:49:26 +02:00
|
|
|
let _ = Database::open_default(path.as_path().to_str().unwrap()).unwrap();
|
2016-06-27 18:47:50 +02:00
|
|
|
test_db(&DatabaseConfig::default());
|
2016-02-18 21:15:56 +01:00
|
|
|
}
|
2016-10-21 23:21:57 +02:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[cfg(target_os = "linux")]
|
|
|
|
fn df_to_rotational() {
|
2016-10-24 18:25:27 +02:00
|
|
|
use std::path::PathBuf;
|
2016-10-21 23:21:57 +02:00
|
|
|
// Example df output.
|
|
|
|
let example_df = vec![70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10];
|
|
|
|
let expected_output = Some(PathBuf::from("/sys/block/sda/queue/rotational"));
|
|
|
|
assert_eq!(rotational_from_df_output(example_df), expected_output);
|
|
|
|
}
|
2017-02-26 18:29:35 +01:00
|
|
|
|
|
|
|
#[test]
|
2017-02-27 18:57:22 +01:00
|
|
|
fn add_columns() {
|
2017-02-26 18:29:35 +01:00
|
|
|
let config = DatabaseConfig::default();
|
|
|
|
let config_5 = DatabaseConfig::with_columns(Some(5));
|
|
|
|
|
|
|
|
let path = RandomTempPath::create_dir();
|
|
|
|
|
|
|
|
// open empty, add 5.
|
|
|
|
{
|
|
|
|
let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap();
|
|
|
|
assert_eq!(db.num_columns(), 0);
|
|
|
|
|
|
|
|
for i in 0..5 {
|
|
|
|
db.add_column().unwrap();
|
|
|
|
assert_eq!(db.num_columns(), i + 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-27 18:57:22 +01:00
|
|
|
// reopen as 5.
|
|
|
|
{
|
|
|
|
let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap();
|
|
|
|
assert_eq!(db.num_columns(), 5);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn drop_columns() {
|
|
|
|
let config = DatabaseConfig::default();
|
|
|
|
let config_5 = DatabaseConfig::with_columns(Some(5));
|
|
|
|
|
|
|
|
let path = RandomTempPath::create_dir();
|
|
|
|
|
2017-02-26 18:29:35 +01:00
|
|
|
// open 5, remove all.
|
|
|
|
{
|
|
|
|
let db = Database::open(&config_5, path.as_path().to_str().unwrap()).unwrap();
|
|
|
|
assert_eq!(db.num_columns(), 5);
|
|
|
|
|
|
|
|
for i in (0..5).rev() {
|
|
|
|
db.drop_column().unwrap();
|
|
|
|
assert_eq!(db.num_columns(), i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// reopen as 0.
|
|
|
|
{
|
|
|
|
let db = Database::open(&config, path.as_path().to_str().unwrap()).unwrap();
|
|
|
|
assert_eq!(db.num_columns(), 0);
|
|
|
|
}
|
|
|
|
}
|
2016-02-18 21:15:56 +01:00
|
|
|
}
|