2017-01-25 18:51:41 +01:00
|
|
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
2016-02-18 03:46:34 +01:00
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2016-04-06 10:07:24 +02:00
|
|
|
//! Key-Value store abstraction with `RocksDB` backend.
|
2016-02-18 03:46:34 +01:00
|
|
|
|
2017-10-10 20:01:27 +02:00
|
|
|
#[macro_use]
|
|
|
|
extern crate error_chain;
|
|
|
|
extern crate elastic_array;
|
2017-10-12 15:36:27 +02:00
|
|
|
extern crate ethcore_bytes as bytes;
|
2017-02-20 17:21:55 +01:00
|
|
|
|
2017-10-12 15:36:27 +02:00
|
|
|
use std::io;
|
|
|
|
use elastic_array::{ElasticArray128, ElasticArray32};
|
2017-09-06 20:47:45 +02:00
|
|
|
use bytes::Bytes;
|
2017-07-29 21:56:42 +02:00
|
|
|
|
2017-04-19 14:58:19 +02:00
|
|
|
/// Required length of prefixes.
|
|
|
|
pub const PREFIX_LEN: usize = 12;
|
|
|
|
|
2017-10-12 15:36:27 +02:00
|
|
|
pub type DBValue = ElasticArray128<u8>;
|
|
|
|
|
2017-10-10 20:01:27 +02:00
|
|
|
error_chain! {
|
|
|
|
types {
|
|
|
|
Error, ErrorKind, ResultExt;
|
|
|
|
}
|
|
|
|
|
|
|
|
foreign_links {
|
|
|
|
Io(io::Error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-18 03:46:34 +01:00
|
|
|
/// Write transaction. Batches a sequence of put/delete operations for efficiency.
|
2017-02-20 17:21:55 +01:00
|
|
|
#[derive(Default, Clone, PartialEq)]
|
2016-02-18 03:46:34 +01:00
|
|
|
pub struct DBTransaction {
|
2017-10-12 15:36:27 +02:00
|
|
|
pub ops: Vec<DBOp>,
|
2016-08-03 22:03:40 +02:00
|
|
|
}
|
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
#[derive(Clone, PartialEq)]
|
2017-10-12 15:36:27 +02:00
|
|
|
pub enum DBOp {
|
2016-08-03 22:03:40 +02:00
|
|
|
Insert {
|
|
|
|
col: Option<u32>,
|
|
|
|
key: ElasticArray32<u8>,
|
2016-10-26 13:53:47 +02:00
|
|
|
value: DBValue,
|
2016-08-03 22:03:40 +02:00
|
|
|
},
|
|
|
|
InsertCompressed {
|
|
|
|
col: Option<u32>,
|
|
|
|
key: ElasticArray32<u8>,
|
2016-10-26 13:53:47 +02:00
|
|
|
value: DBValue,
|
2016-08-03 22:03:40 +02:00
|
|
|
},
|
|
|
|
Delete {
|
|
|
|
col: Option<u32>,
|
|
|
|
key: ElasticArray32<u8>,
|
|
|
|
}
|
2016-03-11 10:57:58 +01:00
|
|
|
}
|
|
|
|
|
2016-02-18 03:46:34 +01:00
|
|
|
impl DBTransaction {
|
|
|
|
/// Create new transaction.
|
2017-02-20 17:21:55 +01:00
|
|
|
pub fn new() -> DBTransaction {
|
|
|
|
DBTransaction::with_capacity(256)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Create new transaction with capacity.
|
|
|
|
pub fn with_capacity(cap: usize) -> DBTransaction {
|
2016-07-28 23:46:24 +02:00
|
|
|
DBTransaction {
|
2017-02-20 17:21:55 +01:00
|
|
|
ops: Vec::with_capacity(cap)
|
2016-07-28 23:46:24 +02:00
|
|
|
}
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
2017-05-20 11:22:35 +02:00
|
|
|
/// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write.
|
2016-08-25 16:43:56 +02:00
|
|
|
pub fn put(&mut self, col: Option<u32>, key: &[u8], value: &[u8]) {
|
2016-08-03 22:03:40 +02:00
|
|
|
let mut ekey = ElasticArray32::new();
|
|
|
|
ekey.append_slice(key);
|
2016-08-25 16:43:56 +02:00
|
|
|
self.ops.push(DBOp::Insert {
|
2016-08-03 22:03:40 +02:00
|
|
|
col: col,
|
|
|
|
key: ekey,
|
2016-10-26 13:53:47 +02:00
|
|
|
value: DBValue::from_slice(value),
|
2016-08-03 22:03:40 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2017-05-20 11:22:35 +02:00
|
|
|
/// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write.
|
2016-08-25 16:43:56 +02:00
|
|
|
pub fn put_vec(&mut self, col: Option<u32>, key: &[u8], value: Bytes) {
|
2016-08-03 22:03:40 +02:00
|
|
|
let mut ekey = ElasticArray32::new();
|
|
|
|
ekey.append_slice(key);
|
2016-08-25 16:43:56 +02:00
|
|
|
self.ops.push(DBOp::Insert {
|
2016-08-03 22:03:40 +02:00
|
|
|
col: col,
|
|
|
|
key: ekey,
|
2016-10-26 13:53:47 +02:00
|
|
|
value: DBValue::from_vec(value),
|
2016-08-03 22:03:40 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2017-05-20 11:22:35 +02:00
|
|
|
/// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write.
|
2016-08-25 16:43:56 +02:00
|
|
|
/// Value will be RLP-compressed on flush
|
|
|
|
pub fn put_compressed(&mut self, col: Option<u32>, key: &[u8], value: Bytes) {
|
2016-08-03 22:03:40 +02:00
|
|
|
let mut ekey = ElasticArray32::new();
|
|
|
|
ekey.append_slice(key);
|
2016-08-25 16:43:56 +02:00
|
|
|
self.ops.push(DBOp::InsertCompressed {
|
2016-08-03 22:03:40 +02:00
|
|
|
col: col,
|
|
|
|
key: ekey,
|
2016-10-26 13:53:47 +02:00
|
|
|
value: DBValue::from_vec(value),
|
2016-08-03 22:03:40 +02:00
|
|
|
});
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Delete value by key.
|
2016-08-25 16:43:56 +02:00
|
|
|
pub fn delete(&mut self, col: Option<u32>, key: &[u8]) {
|
2016-08-03 22:03:40 +02:00
|
|
|
let mut ekey = ElasticArray32::new();
|
|
|
|
ekey.append_slice(key);
|
2016-08-25 16:43:56 +02:00
|
|
|
self.ops.push(DBOp::Delete {
|
2016-08-03 22:03:40 +02:00
|
|
|
col: col,
|
|
|
|
key: ekey,
|
|
|
|
});
|
2016-02-18 03:46:34 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
/// Generic key-value database.
|
|
|
|
///
|
|
|
|
/// This makes a distinction between "buffered" and "flushed" values. Values which have been
|
|
|
|
/// written can always be read, but may be present in an in-memory buffer. Values which have
|
|
|
|
/// been flushed have been moved to backing storage, like a RocksDB instance. There are certain
|
|
|
|
/// operations which are only guaranteed to operate on flushed data and not buffered,
|
|
|
|
/// although implementations may differ in this regard.
|
|
|
|
///
|
|
|
|
/// The contents of an interior buffer may be explicitly flushed using the `flush` method.
|
|
|
|
///
|
|
|
|
/// The `KeyValueDB` also deals in "column families", which can be thought of as distinct
|
|
|
|
/// stores within a database. Keys written in one column family will not be accessible from
|
|
|
|
/// any other. The number of column families must be specified at initialization, with a
|
|
|
|
/// differing interface for each database. The `None` argument in place of a column index
|
|
|
|
/// is always supported.
|
|
|
|
///
|
|
|
|
/// The API laid out here, along with the `Sync` bound implies interior synchronization for
|
|
|
|
/// implementation.
|
|
|
|
pub trait KeyValueDB: Sync + Send {
|
|
|
|
/// Helper to create a new transaction.
|
|
|
|
fn transaction(&self) -> DBTransaction { DBTransaction::new() }
|
|
|
|
|
|
|
|
/// Get a value by key.
|
|
|
|
fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBValue>, String>;
|
|
|
|
|
|
|
|
/// Get a value by partial key. Only works for flushed data.
|
|
|
|
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>>;
|
|
|
|
|
|
|
|
/// Write a transaction of changes to the buffer.
|
|
|
|
fn write_buffered(&self, transaction: DBTransaction);
|
|
|
|
|
|
|
|
/// Write a transaction of changes to the backing store.
|
|
|
|
fn write(&self, transaction: DBTransaction) -> Result<(), String> {
|
|
|
|
self.write_buffered(transaction);
|
|
|
|
self.flush()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Flush all buffered data.
|
|
|
|
fn flush(&self) -> Result<(), String>;
|
|
|
|
|
|
|
|
/// Iterate over flushed data for a given column.
|
|
|
|
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>;
|
|
|
|
|
2017-04-18 15:45:15 +02:00
|
|
|
/// Iterate over flushed data for a given column, starting from a given prefix.
|
|
|
|
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
|
|
|
|
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>;
|
|
|
|
|
2017-02-20 17:21:55 +01:00
|
|
|
/// Attempt to replace this database with a new one located at the given path.
|
2017-10-10 20:01:27 +02:00
|
|
|
fn restore(&self, new_db: &str) -> Result<(), Error>;
|
2017-02-20 17:21:55 +01:00
|
|
|
}
|