2016-02-05 13:40:41 +01:00
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
2016-04-06 10:07:24 +02:00
//! Disk-backed `HashDB` implementation.
2016-01-18 12:41:31 +01:00
use common ::* ;
use rlp ::* ;
use hashdb ::* ;
2016-02-04 02:40:35 +01:00
use memorydb ::* ;
2016-06-18 17:58:28 +02:00
use super ::{ DB_PREFIX_LEN , LATEST_ERA_KEY , VERSION_KEY } ;
2016-03-11 13:50:39 +01:00
use super ::traits ::JournalDB ;
2016-03-06 22:39:04 +01:00
use kvdb ::{ Database , DBTransaction , DatabaseConfig } ;
2016-01-31 17:01:36 +01:00
#[ cfg(test) ]
use std ::env ;
2016-01-18 12:41:31 +01:00
2016-03-12 09:51:17 +01:00
#[ derive(Clone, PartialEq, Eq) ]
struct RefInfo {
queue_refs : usize ,
in_archive : bool ,
}
impl HeapSizeOf for RefInfo {
fn heap_size_of_children ( & self ) -> usize { 0 }
}
impl fmt ::Display for RefInfo {
fn fmt ( & self , f : & mut fmt ::Formatter ) -> fmt ::Result {
write! ( f , " {}+{} " , self . queue_refs , if self . in_archive { 1 } else { 0 } )
}
}
impl fmt ::Debug for RefInfo {
fn fmt ( & self , f : & mut fmt ::Formatter ) -> fmt ::Result {
write! ( f , " {}+{} " , self . queue_refs , if self . in_archive { 1 } else { 0 } )
}
}
#[ derive(Clone, PartialEq, Eq) ]
enum RemoveFrom {
Queue ,
Archive ,
}
2016-04-06 10:07:24 +02:00
/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay
2016-03-06 21:57:55 +01:00
/// and latent-removal semantics.
2016-01-18 12:41:31 +01:00
///
2016-04-06 10:07:24 +02:00
/// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect
2016-01-18 12:41:31 +01:00
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect.
2016-03-12 11:19:42 +01:00
pub struct EarlyMergeDB {
2016-02-04 02:40:35 +01:00
overlay : MemoryDB ,
2016-03-06 22:39:04 +01:00
backing : Arc < Database > ,
2016-03-12 09:51:17 +01:00
refs : Option < Arc < RwLock < HashMap < H256 , RefInfo > > > > ,
2016-03-12 19:19:45 +01:00
latest_era : Option < u64 > ,
2016-01-18 12:41:31 +01:00
}
2016-04-12 03:42:50 +02:00
const DB_VERSION : u32 = 0x003 ;
2016-03-06 22:39:04 +01:00
const PADDING : [ u8 ; 10 ] = [ 0 u8 ; 10 ] ;
2016-02-04 21:33:30 +01:00
2016-03-12 11:19:42 +01:00
impl EarlyMergeDB {
2016-03-06 22:39:04 +01:00
/// Create a new instance from file
2016-06-27 13:23:50 +02:00
pub fn new ( path : & str , config : DatabaseConfig ) -> EarlyMergeDB {
let opts = config . prefix ( DB_PREFIX_LEN ) ;
2016-03-06 22:39:04 +01:00
let backing = Database ::open ( & opts , path ) . unwrap_or_else ( | e | {
panic! ( " Error opening state db: {} " , e ) ;
} ) ;
if ! backing . is_empty ( ) {
2016-02-05 01:49:06 +01:00
match backing . get ( & VERSION_KEY ) . map ( | d | d . map ( | v | decode ::< u32 > ( & v ) ) ) {
2016-03-11 13:50:39 +01:00
Ok ( Some ( DB_VERSION ) ) = > { } ,
2016-04-12 03:42:50 +02:00
v = > panic! ( " Incompatible DB version, expected {} , got {:?} ; to resolve, remove {} and restart. " , DB_VERSION , v , path )
2016-02-05 01:49:06 +01:00
}
} else {
2016-03-11 13:50:39 +01:00
backing . put ( & VERSION_KEY , & encode ( & DB_VERSION ) ) . expect ( " Error writing version to database " ) ;
2016-02-05 01:49:06 +01:00
}
2016-03-11 15:54:28 +01:00
2016-03-12 17:30:46 +01:00
let ( latest_era , refs ) = EarlyMergeDB ::read_refs ( & backing ) ;
let refs = Some ( Arc ::new ( RwLock ::new ( refs ) ) ) ;
2016-03-12 11:19:42 +01:00
EarlyMergeDB {
2016-02-04 02:40:35 +01:00
overlay : MemoryDB ::new ( ) ,
2016-03-06 22:39:04 +01:00
backing : Arc ::new ( backing ) ,
2016-03-12 09:51:17 +01:00
refs : refs ,
2016-03-12 17:30:46 +01:00
latest_era : latest_era ,
2016-01-21 23:33:52 +01:00
}
}
2016-01-18 12:41:31 +01:00
/// Create a new instance with an anonymous temporary database.
2016-01-31 10:52:07 +01:00
#[ cfg(test) ]
2016-03-12 11:19:42 +01:00
fn new_temp ( ) -> EarlyMergeDB {
2016-01-18 12:41:31 +01:00
let mut dir = env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
2016-06-27 18:47:50 +02:00
Self ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) )
2016-01-18 12:41:31 +01:00
}
2016-03-06 21:57:55 +01:00
fn morph_key ( key : & H256 , index : u8 ) -> Bytes {
2016-07-06 11:23:29 +02:00
let mut ret = key . to_bytes ( ) ;
2016-03-06 21:57:55 +01:00
ret . push ( index ) ;
ret
2016-03-04 20:19:36 +01:00
}
2016-03-06 21:57:55 +01:00
// The next three are valid only as long as there is an insert operation of `key` in the journal.
2016-03-06 22:39:04 +01:00
fn set_already_in ( batch : & DBTransaction , key : & H256 ) { batch . put ( & Self ::morph_key ( key , 0 ) , & [ 1 u8 ] ) . expect ( " Low-level database error. Some issue with your hard disk? " ) ; }
fn reset_already_in ( batch : & DBTransaction , key : & H256 ) { batch . delete ( & Self ::morph_key ( key , 0 ) ) . expect ( " Low-level database error. Some issue with your hard disk? " ) ; }
fn is_already_in ( backing : & Database , key : & H256 ) -> bool {
2016-03-06 21:57:55 +01:00
backing . get ( & Self ::morph_key ( key , 0 ) ) . expect ( " Low-level database error. Some issue with your hard disk? " ) . is_some ( )
}
2016-03-12 09:51:17 +01:00
fn insert_keys ( inserts : & [ ( H256 , Bytes ) ] , backing : & Database , refs : & mut HashMap < H256 , RefInfo > , batch : & DBTransaction , trace : bool ) {
2016-03-06 21:57:55 +01:00
for & ( ref h , ref d ) in inserts {
2016-03-12 09:51:17 +01:00
if let Some ( c ) = refs . get_mut ( h ) {
2016-03-06 21:57:55 +01:00
// already counting. increment.
2016-03-12 09:51:17 +01:00
c . queue_refs + = 1 ;
if trace {
trace! ( target : " jdb.fine " , " insert({}): In queue: Incrementing refs to {} " , h , c . queue_refs ) ;
}
2016-03-06 21:57:55 +01:00
continue ;
2016-03-04 20:19:36 +01:00
}
2016-03-06 21:57:55 +01:00
// this is the first entry for this node in the journal.
2016-07-06 11:23:29 +02:00
if backing . get ( h ) . expect ( " Low-level database error. Some issue with your hard disk? " ) . is_some ( ) {
2016-03-06 21:57:55 +01:00
// already in the backing DB. start counting, and remember it was already in.
Self ::set_already_in ( batch , & h ) ;
2016-03-12 09:51:17 +01:00
refs . insert ( h . clone ( ) , RefInfo { queue_refs : 1 , in_archive : true } ) ;
if trace {
trace! ( target : " jdb.fine " , " insert({}): New to queue, in DB: Recording and inserting into queue " , h ) ;
}
2016-03-06 21:57:55 +01:00
continue ;
2016-03-04 20:19:36 +01:00
}
2016-03-06 21:57:55 +01:00
// Gets removed when a key leaves the journal, so should never be set when we're placing a new key.
//Self::reset_already_in(&h);
assert! ( ! Self ::is_already_in ( backing , & h ) ) ;
2016-07-06 11:23:29 +02:00
batch . put ( h , d ) . expect ( " Low-level database error. Some issue with your hard disk? " ) ;
2016-03-12 09:51:17 +01:00
refs . insert ( h . clone ( ) , RefInfo { queue_refs : 1 , in_archive : false } ) ;
if trace {
trace! ( target : " jdb.fine " , " insert({}): New to queue, not in DB: Inserting into queue and DB " , h ) ;
}
2016-03-04 20:19:36 +01:00
}
}
2016-03-12 09:51:17 +01:00
fn replay_keys ( inserts : & [ H256 ] , backing : & Database , refs : & mut HashMap < H256 , RefInfo > ) {
trace! ( target : " jdb.fine " , " replay_keys: inserts={:?}, refs={:?} " , inserts , refs ) ;
2016-03-06 21:57:55 +01:00
for h in inserts {
2016-03-12 09:51:17 +01:00
if let Some ( c ) = refs . get_mut ( h ) {
2016-03-06 21:57:55 +01:00
// already counting. increment.
2016-03-12 09:51:17 +01:00
c . queue_refs + = 1 ;
2016-03-06 21:57:55 +01:00
continue ;
}
// this is the first entry for this node in the journal.
// it is initialised to 1 if it was already in.
2016-03-12 09:51:17 +01:00
refs . insert ( h . clone ( ) , RefInfo { queue_refs : 1 , in_archive : Self ::is_already_in ( backing , h ) } ) ;
2016-03-06 21:57:55 +01:00
}
2016-03-12 09:51:17 +01:00
trace! ( target : " jdb.fine " , " replay_keys: (end) refs={:?} " , refs ) ;
2016-03-06 21:57:55 +01:00
}
2016-06-23 11:16:11 +02:00
fn remove_keys ( deletes : & [ H256 ] , refs : & mut HashMap < H256 , RefInfo > , batch : & DBTransaction , from : RemoveFrom , trace : bool ) {
// with a remove on {queue_refs: 1, in_archive: true}, we have two options:
2016-03-12 09:51:17 +01:00
// - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive)
// - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue)
// (the latter option would then mean removing the RefInfo, since it would no longer be counted in the queue.)
// both are valid, but we switch between them depending on context.
// All inserts in queue (i.e. those which may yet be reverted) have an entry in refs.
for h in deletes . iter ( ) {
let mut n : Option < RefInfo > = None ;
if let Some ( c ) = refs . get_mut ( h ) {
if c . in_archive & & from = = RemoveFrom ::Archive {
c . in_archive = false ;
Self ::reset_already_in ( batch , h ) ;
if trace {
2016-06-23 11:16:11 +02:00
trace! ( target : " jdb.fine " , " remove({}): In archive, 1 in queue: Reducing to queue only and recording " , h ) ;
2016-03-12 09:51:17 +01:00
}
continue ;
} else if c . queue_refs > 1 {
c . queue_refs - = 1 ;
if trace {
2016-06-23 11:16:11 +02:00
trace! ( target : " jdb.fine " , " remove({}): In queue > 1 refs: Decrementing ref count to {} " , h , c . queue_refs ) ;
2016-03-12 09:51:17 +01:00
}
2016-03-06 21:57:55 +01:00
continue ;
} else {
2016-03-12 09:51:17 +01:00
n = Some ( c . clone ( ) ) ;
2016-03-06 21:57:55 +01:00
}
}
2016-03-07 14:33:00 +01:00
match n {
2016-03-12 09:51:17 +01:00
Some ( RefInfo { queue_refs : 1 , in_archive : true } ) = > {
refs . remove ( h ) ;
Self ::reset_already_in ( batch , h ) ;
if trace {
2016-06-23 11:16:11 +02:00
trace! ( target : " jdb.fine " , " remove({}): In archive, 1 in queue: Removing from queue and leaving in archive " , h ) ;
2016-03-12 09:51:17 +01:00
}
}
Some ( RefInfo { queue_refs : 1 , in_archive : false } ) = > {
refs . remove ( h ) ;
2016-07-06 11:23:29 +02:00
batch . delete ( h ) . expect ( " Low-level database error. Some issue with your hard disk? " ) ;
2016-03-12 09:51:17 +01:00
if trace {
2016-06-23 11:16:11 +02:00
trace! ( target : " jdb.fine " , " remove({}): Not in archive, only 1 ref in queue: Removing from queue and DB " , h ) ;
2016-03-12 09:51:17 +01:00
}
2016-03-06 21:57:55 +01:00
}
2016-03-07 14:33:00 +01:00
None = > {
2016-03-06 21:57:55 +01:00
// Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs.
//assert!(!Self::is_already_in(db, &h));
2016-07-06 11:23:29 +02:00
batch . delete ( h ) . expect ( " Low-level database error. Some issue with your hard disk? " ) ;
2016-03-12 09:51:17 +01:00
if trace {
2016-06-23 11:16:11 +02:00
trace! ( target : " jdb.fine " , " remove({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB " , h ) ;
2016-03-12 09:51:17 +01:00
}
2016-03-06 21:57:55 +01:00
}
2016-03-12 09:51:17 +01:00
_ = > panic! ( " Invalid value in refs: {:?} " , n ) ,
2016-03-06 21:57:55 +01:00
}
}
2016-03-04 20:19:36 +01:00
}
2016-03-12 09:51:17 +01:00
#[ cfg(test) ]
fn can_reconstruct_refs ( & self ) -> bool {
2016-03-12 17:30:46 +01:00
let ( latest_era , reconstructed ) = Self ::read_refs ( & self . backing ) ;
2016-07-13 19:59:59 +02:00
let refs = self . refs . as_ref ( ) . unwrap ( ) . write ( ) ;
2016-03-12 17:30:46 +01:00
if * refs ! = reconstructed | | latest_era ! = self . latest_era {
2016-03-12 09:51:17 +01:00
let clean_refs = refs . iter ( ) . filter_map ( | ( k , v ) | if reconstructed . get ( k ) = = Some ( v ) { None } else { Some ( ( k . clone ( ) , v . clone ( ) ) ) } ) . collect ::< HashMap < _ , _ > > ( ) ;
let clean_recon = reconstructed . into_iter ( ) . filter_map ( | ( k , v ) | if refs . get ( & k ) = = Some ( & v ) { None } else { Some ( ( k . clone ( ) , v . clone ( ) ) ) } ) . collect ::< HashMap < _ , _ > > ( ) ;
warn! ( target : " jdb " , " mem: {:?} != log: {:?} " , clean_refs , clean_recon ) ;
false
} else {
true
}
}
2016-03-11 13:50:39 +01:00
fn payload ( & self , key : & H256 ) -> Option < Bytes > {
2016-07-06 11:23:29 +02:00
self . backing . get ( key ) . expect ( " Low-level database error. Some issue with your hard disk? " ) . map ( | v | v . to_vec ( ) )
2016-03-11 13:50:39 +01:00
}
2016-03-12 19:19:45 +01:00
fn read_refs ( db : & Database ) -> ( Option < u64 > , HashMap < H256 , RefInfo > ) {
2016-03-12 09:51:17 +01:00
let mut refs = HashMap ::new ( ) ;
2016-03-12 19:19:45 +01:00
let mut latest_era = None ;
2016-03-11 13:50:39 +01:00
if let Some ( val ) = db . get ( & LATEST_ERA_KEY ) . expect ( " Low-level database error. " ) {
2016-03-12 19:19:45 +01:00
let mut era = decode ::< u64 > ( & val ) ;
latest_era = Some ( era ) ;
2016-03-11 13:50:39 +01:00
loop {
let mut index = 0 usize ;
while let Some ( rlp_data ) = db . get ( {
let mut r = RlpStream ::new_list ( 3 ) ;
r . append ( & era ) ;
r . append ( & index ) ;
r . append ( & & PADDING [ .. ] ) ;
& r . drain ( )
} ) . expect ( " Low-level database error. " ) {
let rlp = Rlp ::new ( & rlp_data ) ;
let inserts : Vec < H256 > = rlp . val_at ( 1 ) ;
2016-03-12 09:51:17 +01:00
Self ::replay_keys ( & inserts , db , & mut refs ) ;
2016-03-11 13:50:39 +01:00
index + = 1 ;
} ;
if index = = 0 | | era = = 0 {
break ;
}
era - = 1 ;
}
}
2016-03-12 17:30:46 +01:00
( latest_era , refs )
2016-03-11 13:50:39 +01:00
}
2016-03-12 09:51:17 +01:00
}
2016-03-11 13:50:39 +01:00
2016-03-12 11:19:42 +01:00
impl HashDB for EarlyMergeDB {
2016-03-11 13:50:39 +01:00
fn keys ( & self ) -> HashMap < H256 , i32 > {
let mut ret : HashMap < H256 , i32 > = HashMap ::new ( ) ;
for ( key , _ ) in self . backing . iter ( ) {
let h = H256 ::from_slice ( key . deref ( ) ) ;
ret . insert ( h , 1 ) ;
}
for ( key , refs ) in self . overlay . keys ( ) . into_iter ( ) {
let refs = * ret . get ( & key ) . unwrap_or ( & 0 ) + refs ;
ret . insert ( key , refs ) ;
}
ret
}
2016-06-23 11:16:11 +02:00
fn get ( & self , key : & H256 ) -> Option < & [ u8 ] > {
2016-03-11 13:50:39 +01:00
let k = self . overlay . raw ( key ) ;
match k {
Some ( & ( ref d , rc ) ) if rc > 0 = > Some ( d ) ,
_ = > {
if let Some ( x ) = self . payload ( key ) {
Some ( & self . overlay . denote ( key , x ) . 0 )
}
else {
None
}
}
}
}
2016-06-23 11:16:11 +02:00
fn contains ( & self , key : & H256 ) -> bool {
self . get ( key ) . is_some ( )
2016-03-11 13:50:39 +01:00
}
fn insert ( & mut self , value : & [ u8 ] ) -> H256 {
self . overlay . insert ( value )
}
fn emplace ( & mut self , key : H256 , value : Bytes ) {
self . overlay . emplace ( key , value ) ;
}
2016-06-23 11:16:11 +02:00
fn remove ( & mut self , key : & H256 ) {
self . overlay . remove ( key ) ;
2016-03-11 13:50:39 +01:00
}
}
2016-03-12 11:19:42 +01:00
impl JournalDB for EarlyMergeDB {
2016-03-28 09:42:50 +02:00
fn boxed_clone ( & self ) -> Box < JournalDB > {
2016-03-12 11:19:42 +01:00
Box ::new ( EarlyMergeDB {
2016-03-27 14:35:27 +02:00
overlay : self . overlay . clone ( ) ,
2016-03-11 13:50:39 +01:00
backing : self . backing . clone ( ) ,
2016-03-12 09:51:17 +01:00
refs : self . refs . clone ( ) ,
2016-03-12 19:19:45 +01:00
latest_era : self . latest_era . clone ( ) ,
2016-03-11 13:50:39 +01:00
} )
}
2016-03-12 09:51:17 +01:00
fn is_empty ( & self ) -> bool {
self . backing . get ( & LATEST_ERA_KEY ) . expect ( " Low level database error " ) . is_none ( )
}
2016-04-12 03:42:50 +02:00
fn latest_era ( & self ) -> Option < u64 > { self . latest_era }
2016-04-12 00:51:14 +02:00
2016-03-11 13:50:39 +01:00
fn mem_used ( & self ) -> usize {
2016-03-12 09:51:17 +01:00
self . overlay . mem_used ( ) + match self . refs {
2016-07-13 19:59:59 +02:00
Some ( ref c ) = > c . read ( ) . heap_size_of_children ( ) ,
2016-03-11 13:50:39 +01:00
None = > 0
}
}
2016-07-11 12:34:29 +02:00
fn state ( & self , id : & H256 ) -> Option < Bytes > {
self . backing . get_by_prefix ( & id [ 0 .. DB_PREFIX_LEN ] ) . map ( | b | b . to_vec ( ) )
}
2016-03-13 21:06:24 +01:00
#[ cfg_attr(feature= " dev " , allow(cyclomatic_complexity)) ]
2016-03-11 13:50:39 +01:00
fn commit ( & mut self , now : u64 , id : & H256 , end : Option < ( u64 , H256 ) > ) -> Result < u32 , UtilError > {
2016-03-13 21:06:24 +01:00
// journal format:
2016-01-18 12:41:31 +01:00
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, n] => [ ... ]
2016-02-05 01:49:06 +01:00
// TODO: store reclaim_period.
2016-01-18 12:41:31 +01:00
2016-03-06 21:57:55 +01:00
// When we make a new commit, we make a journal of all blocks in the recent history and record
// all keys that were inserted and deleted. The journal is ordered by era; multiple commits can
// share the same era. This forms a data structure similar to a queue but whose items are tuples.
// By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history
// into ancient history) then only one commit from the tuple is considered canonical. This commit
// is kept in the main backing database, whereas any others from the same era are reverted.
2016-03-07 14:33:00 +01:00
//
2016-03-06 21:57:55 +01:00
// It is possible that a key, properly available in the backing database be deleted and re-inserted
// in the recent history queue, yet have both operations in commits that are eventually non-canonical.
// To avoid the original, and still required, key from being deleted, we maintain a reference count
// which includes an original key, if any.
2016-03-07 14:33:00 +01:00
//
2016-03-06 21:57:55 +01:00
// The semantics of the `counter` are:
// insert key k:
// counter already contains k: count += 1
// counter doesn't contain k:
// backing db contains k: count = 1
// backing db doesn't contain k: insert into backing db, count = 0
// delete key k:
2016-03-07 14:33:00 +01:00
// counter contains k (count is asserted to be non-zero):
2016-03-06 21:57:55 +01:00
// count > 1: counter -= 1
// count == 1: remove counter
// count == 0: remove key from backing db
// counter doesn't contain k: remove key from backing db
2016-02-05 22:54:33 +01:00
//
2016-03-06 21:57:55 +01:00
// Practically, this means that for each commit block turning from recent to ancient we do the
// following:
// is_canonical:
// inserts: Ignored (left alone in the backing database).
// deletes: Enacted; however, recent history queue is checked for ongoing references. This is
// reduced as a preference to deletion from the backing database.
// !is_canonical:
// inserts: Reverted; however, recent history queue is checked for ongoing references. This is
// reduced as a preference to deletion from the backing database.
// deletes: Ignored (they were never inserted).
2016-02-07 21:18:51 +01:00
//
2016-01-18 12:41:31 +01:00
// record new commit's details.
2016-07-13 19:59:59 +02:00
let mut refs = self . refs . as_ref ( ) . unwrap ( ) . write ( ) ;
2016-03-06 22:43:21 +01:00
let batch = DBTransaction ::new ( ) ;
2016-03-12 09:51:17 +01:00
let trace = false ;
2016-01-18 12:41:31 +01:00
{
let mut index = 0 usize ;
let mut last ;
2016-03-06 21:57:55 +01:00
while try ! ( self . backing . get ( {
2016-03-06 22:43:21 +01:00
let mut r = RlpStream ::new_list ( 3 ) ;
2016-03-06 21:57:55 +01:00
r . append ( & now ) ;
r . append ( & index ) ;
2016-03-06 22:39:04 +01:00
r . append ( & & PADDING [ .. ] ) ;
2016-03-06 21:57:55 +01:00
last = r . drain ( ) ;
& last
} ) ) . is_some ( ) {
2016-01-18 12:41:31 +01:00
index + = 1 ;
}
2016-03-06 21:57:55 +01:00
let drained = self . overlay . drain ( ) ;
2016-03-12 09:51:17 +01:00
if trace {
trace! ( target : " jdb " , " commit: #{} ({}), end era: {:?} " , now , id , end ) ;
}
2016-03-06 21:57:55 +01:00
let removes : Vec < H256 > = drained
. iter ( )
2016-03-07 14:33:00 +01:00
. filter_map ( | ( k , & ( _ , c ) ) | if c < 0 { Some ( k . clone ( ) ) } else { None } )
2016-03-06 21:57:55 +01:00
. collect ( ) ;
let inserts : Vec < ( H256 , Bytes ) > = drained
. into_iter ( )
. filter_map ( | ( k , ( v , r ) ) | if r > 0 { assert! ( r = = 1 ) ; Some ( ( k , v ) ) } else { assert! ( r > = - 1 ) ; None } )
. collect ( ) ;
2016-03-12 09:51:17 +01:00
// TODO: check all removes are in the db.
2016-01-18 12:41:31 +01:00
let mut r = RlpStream ::new_list ( 3 ) ;
r . append ( id ) ;
2016-03-06 21:57:55 +01:00
// Process the new inserts.
// We use the inserts for three things. For each:
// - we place into the backing DB or increment the counter if already in;
// - we note in the backing db that it was already in;
// - we write the key into our journal for this block;
r . begin_list ( inserts . len ( ) ) ;
inserts . iter ( ) . foreach ( | & ( k , _ ) | { r . append ( & k ) ; } ) ;
2016-02-04 02:40:35 +01:00
r . append ( & removes ) ;
2016-03-12 09:51:17 +01:00
Self ::insert_keys ( & inserts , & self . backing , & mut refs , & batch , trace ) ;
if trace {
let ins = inserts . iter ( ) . map ( | & ( k , _ ) | k ) . collect ::< Vec < _ > > ( ) ;
trace! ( target : " jdb.ops " , " Inserts: {:?} " , ins ) ;
trace! ( target : " jdb.ops " , " Deletes: {:?} " , removes ) ;
}
2016-02-04 21:33:30 +01:00
try ! ( batch . put ( & last , r . as_raw ( ) ) ) ;
2016-03-12 19:19:45 +01:00
if self . latest_era . map_or ( true , | e | now > e ) {
2016-03-12 17:30:46 +01:00
try ! ( batch . put ( & LATEST_ERA_KEY , & encode ( & now ) ) ) ;
2016-03-12 19:19:45 +01:00
self . latest_era = Some ( now ) ;
2016-03-12 17:30:46 +01:00
}
2016-01-18 12:41:31 +01:00
}
// apply old commits' details
2016-01-18 13:30:01 +01:00
if let Some ( ( end_era , canon_id ) ) = end {
let mut index = 0 usize ;
let mut last ;
while let Some ( rlp_data ) = try ! ( self . backing . get ( {
2016-03-06 22:43:21 +01:00
let mut r = RlpStream ::new_list ( 3 ) ;
2016-01-18 13:30:01 +01:00
r . append ( & end_era ) ;
r . append ( & index ) ;
2016-03-06 22:39:04 +01:00
r . append ( & & PADDING [ .. ] ) ;
2016-01-18 15:47:50 +01:00
last = r . drain ( ) ;
2016-01-18 13:30:01 +01:00
& last
} ) ) {
2016-02-07 21:18:51 +01:00
let rlp = Rlp ::new ( & rlp_data ) ;
2016-03-06 21:57:55 +01:00
let inserts : Vec < H256 > = rlp . val_at ( 1 ) ;
2016-03-12 09:51:17 +01:00
if canon_id = = rlp . val_at ( 0 ) {
// Collect keys to be removed. Canon block - remove the (enacted) deletes.
let deletes : Vec < H256 > = rlp . val_at ( 2 ) ;
if trace {
trace! ( target : " jdb.ops " , " Expunging: {:?} " , deletes ) ;
}
2016-06-23 11:16:11 +02:00
Self ::remove_keys ( & deletes , & mut refs , & batch , RemoveFrom ::Archive , trace ) ;
2016-03-12 09:51:17 +01:00
if trace {
trace! ( target : " jdb.ops " , " Finalising: {:?} " , inserts ) ;
}
2016-03-13 21:06:24 +01:00
for k in & inserts {
2016-03-12 09:51:17 +01:00
match refs . get ( k ) . cloned ( ) {
None = > {
// [in archive] -> SHIFT remove -> SHIFT insert None->Some{queue_refs: 1, in_archive: true} -> TAKE remove Some{queue_refs: 1, in_archive: true}->None -> TAKE insert
// already expunged from the queue (which is allowed since the key is in the archive).
// leave well alone.
}
Some ( RefInfo { queue_refs : 1 , in_archive : false } ) = > {
// just delete the refs entry.
refs . remove ( k ) ;
}
Some ( RefInfo { queue_refs : x , in_archive : false } ) = > {
// must set already in; ,
Self ::set_already_in ( & batch , k ) ;
refs . insert ( k . clone ( ) , RefInfo { queue_refs : x - 1 , in_archive : true } ) ;
}
2016-03-13 21:06:24 +01:00
Some ( RefInfo { in_archive : true , .. } ) = > {
2016-03-12 09:51:17 +01:00
// Invalid! Reinserted the same key twice.
warn! ( " Key {} inserted twice into same fork. " , k ) ;
}
}
}
} else {
// Collect keys to be removed. Non-canon block - remove the (reverted) inserts.
if trace {
trace! ( target : " jdb.ops " , " Reverting: {:?} " , inserts ) ;
}
2016-06-23 11:16:11 +02:00
Self ::remove_keys ( & inserts , & mut refs , & batch , RemoveFrom ::Queue , trace ) ;
2016-03-12 09:51:17 +01:00
}
2016-02-04 21:33:30 +01:00
try ! ( batch . delete ( & last ) ) ;
2016-01-18 13:30:01 +01:00
index + = 1 ;
2016-01-18 12:41:31 +01:00
}
2016-03-12 09:51:17 +01:00
if trace {
2016-03-12 12:16:08 +01:00
trace! ( target : " jdb " , " EarlyMergeDB: delete journal for time #{}.{}, (canon was {}) " , end_era , index , canon_id ) ;
2016-03-12 09:51:17 +01:00
}
2016-01-18 12:41:31 +01:00
}
2016-02-04 21:33:30 +01:00
try ! ( self . backing . write ( batch ) ) ;
2016-03-12 09:51:17 +01:00
// Comment out for now. TODO: automatically enable in tests.
if trace {
trace! ( target : " jdb " , " OK: {:?} " , refs . clone ( ) ) ;
}
2016-03-06 21:57:55 +01:00
Ok ( 0 )
2016-02-05 22:54:33 +01:00
}
2016-03-11 12:54:48 +01:00
}
2016-01-18 13:30:01 +01:00
#[ cfg(test) ]
mod tests {
2016-03-18 10:14:19 +01:00
#![ cfg_attr(feature= " dev " , allow(blacklisted_name)) ]
2016-04-06 10:07:24 +02:00
#![ cfg_attr(feature= " dev " , allow(similar_names)) ]
2016-03-18 10:14:19 +01:00
2016-01-18 13:30:01 +01:00
use common ::* ;
use super ::* ;
2016-03-12 09:57:57 +01:00
use super ::super ::traits ::JournalDB ;
2016-01-18 13:30:01 +01:00
use hashdb ::* ;
2016-03-12 09:51:17 +01:00
use log ::init_log ;
2016-06-27 18:47:50 +02:00
use kvdb ::DatabaseConfig ;
2016-01-18 13:30:01 +01:00
2016-03-06 21:57:55 +01:00
#[ test ]
fn insert_same_in_fork ( ) {
// history is 1
2016-03-12 11:19:42 +01:00
let mut jdb = EarlyMergeDB ::new_temp ( ) ;
2016-03-06 21:57:55 +01:00
let x = jdb . insert ( b " X " ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , None ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 21:57:55 +01:00
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , None ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 21:57:55 +01:00
jdb . commit ( 3 , & b " 1002a " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 21:57:55 +01:00
jdb . commit ( 4 , & b " 1003a " . sha3 ( ) , Some ( ( 2 , b " 2 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 21:57:55 +01:00
jdb . remove ( & x ) ;
jdb . commit ( 3 , & b " 1002b " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 21:57:55 +01:00
let x = jdb . insert ( b " X " ) ;
jdb . commit ( 4 , & b " 1003b " . sha3 ( ) , Some ( ( 2 , b " 2 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 21:57:55 +01:00
jdb . commit ( 5 , & b " 1004a " . sha3 ( ) , Some ( ( 3 , b " 1002a " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 21:57:55 +01:00
jdb . commit ( 6 , & b " 1005a " . sha3 ( ) , Some ( ( 4 , b " 1003a " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 21:57:55 +01:00
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & x ) ) ;
2016-03-06 21:57:55 +01:00
}
2016-03-13 11:50:09 +01:00
#[ test ]
fn insert_older_era ( ) {
let mut jdb = EarlyMergeDB ::new_temp ( ) ;
let foo = jdb . insert ( b " foo " ) ;
jdb . commit ( 0 , & b " 0a " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
let bar = jdb . insert ( b " bar " ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , Some ( ( 0 , b " 0a " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & bar ) ;
jdb . commit ( 0 , & b " 0b " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-13 11:55:48 +01:00
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-13 11:50:09 +01:00
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( jdb . contains ( & bar ) ) ;
2016-03-13 11:50:09 +01:00
}
2016-01-18 13:30:01 +01:00
#[ test ]
fn long_history ( ) {
// history is 3
2016-03-12 11:19:42 +01:00
let mut jdb = EarlyMergeDB ::new_temp ( ) ;
2016-01-18 13:30:01 +01:00
let h = jdb . insert ( b " foo " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & h ) ) ;
2016-01-18 13:30:01 +01:00
jdb . remove ( & h ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , None ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & h ) ) ;
2016-01-18 13:30:01 +01:00
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , None ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & h ) ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 3 , & b " 3 " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & h ) ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 4 , & b " 4 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( ! jdb . contains ( & h ) ) ;
2016-01-18 13:30:01 +01:00
}
#[ test ]
fn complex ( ) {
// history is 1
2016-03-12 11:19:42 +01:00
let mut jdb = EarlyMergeDB ::new_temp ( ) ;
2016-01-18 13:30:01 +01:00
let foo = jdb . insert ( b " foo " ) ;
let bar = jdb . insert ( b " bar " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( jdb . contains ( & bar ) ) ;
2016-01-18 13:30:01 +01:00
jdb . remove ( & foo ) ;
jdb . remove ( & bar ) ;
let baz = jdb . insert ( b " baz " ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( jdb . contains ( & bar ) ) ;
assert! ( jdb . contains ( & baz ) ) ;
2016-01-18 13:30:01 +01:00
let foo = jdb . insert ( b " foo " ) ;
jdb . remove ( & baz ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( ! jdb . contains ( & bar ) ) ;
assert! ( jdb . contains ( & baz ) ) ;
2016-01-18 13:30:01 +01:00
jdb . remove ( & foo ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 3 , & b " 3 " . sha3 ( ) , Some ( ( 2 , b " 2 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( ! jdb . contains ( & bar ) ) ;
assert! ( ! jdb . contains ( & baz ) ) ;
2016-01-18 13:30:01 +01:00
2016-01-18 23:50:40 +01:00
jdb . commit ( 4 , & b " 4 " . sha3 ( ) , Some ( ( 3 , b " 3 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( ! jdb . contains ( & foo ) ) ;
assert! ( ! jdb . contains ( & bar ) ) ;
assert! ( ! jdb . contains ( & baz ) ) ;
2016-01-18 13:30:01 +01:00
}
#[ test ]
fn fork ( ) {
// history is 1
2016-03-12 11:19:42 +01:00
let mut jdb = EarlyMergeDB ::new_temp ( ) ;
2016-01-18 13:30:01 +01:00
let foo = jdb . insert ( b " foo " ) ;
let bar = jdb . insert ( b " bar " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( jdb . contains ( & bar ) ) ;
2016-01-18 13:30:01 +01:00
jdb . remove ( & foo ) ;
let baz = jdb . insert ( b " baz " ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 1 , & b " 1a " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-01-18 13:30:01 +01:00
jdb . remove ( & bar ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 1 , & b " 1b " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-01-18 13:30:01 +01:00
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( jdb . contains ( & bar ) ) ;
assert! ( jdb . contains ( & baz ) ) ;
2016-01-18 13:30:01 +01:00
2016-01-18 23:50:40 +01:00
jdb . commit ( 2 , & b " 2b " . sha3 ( ) , Some ( ( 1 , b " 1b " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( ! jdb . contains ( & baz ) ) ;
assert! ( ! jdb . contains ( & bar ) ) ;
2016-01-18 13:30:01 +01:00
}
2016-02-04 21:33:30 +01:00
#[ test ]
fn overwrite ( ) {
// history is 1
2016-03-12 11:19:42 +01:00
let mut jdb = EarlyMergeDB ::new_temp ( ) ;
2016-02-04 21:33:30 +01:00
let foo = jdb . insert ( b " foo " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-02-04 21:33:30 +01:00
jdb . remove ( & foo ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-02-04 21:33:30 +01:00
jdb . insert ( b " foo " ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-02-04 21:33:30 +01:00
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-02-04 21:33:30 +01:00
jdb . commit ( 3 , & b " 2 " . sha3 ( ) , Some ( ( 0 , b " 2 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-02-04 21:33:30 +01:00
}
2016-02-05 22:54:33 +01:00
#[ test ]
2016-03-12 09:51:17 +01:00
fn fork_same_key_one ( ) {
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-02-05 22:54:33 +01:00
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-02-05 22:54:33 +01:00
let foo = jdb . insert ( b " foo " ) ;
jdb . commit ( 1 , & b " 1a " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-02-05 22:54:33 +01:00
jdb . insert ( b " foo " ) ;
jdb . commit ( 1 , & b " 1b " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 1 , & b " 1c " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-02-05 22:54:33 +01:00
jdb . commit ( 2 , & b " 2a " . sha3 ( ) , Some ( ( 1 , b " 1a " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-12 09:51:17 +01:00
}
#[ test ]
fn fork_same_key_other ( ) {
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-12 09:51:17 +01:00
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
let foo = jdb . insert ( b " foo " ) ;
jdb . commit ( 1 , & b " 1a " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 1 , & b " 1b " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 1 , & b " 1c " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-12 09:51:17 +01:00
jdb . commit ( 2 , & b " 2b " . sha3 ( ) , Some ( ( 1 , b " 1b " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-02-05 22:54:33 +01:00
}
2016-03-06 22:39:04 +01:00
2016-03-12 09:51:17 +01:00
#[ test ]
fn fork_ins_del_ins ( ) {
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-12 09:51:17 +01:00
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
let foo = jdb . insert ( b " foo " ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
jdb . commit ( 2 , & b " 2a " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
jdb . commit ( 2 , & b " 2b " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 3 , & b " 3a " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 3 , & b " 3b " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . commit ( 4 , & b " 4a " . sha3 ( ) , Some ( ( 2 , b " 2a " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . commit ( 5 , & b " 5a " . sha3 ( ) , Some ( ( 3 , b " 3a " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
}
2016-03-06 22:39:04 +01:00
#[ test ]
fn reopen ( ) {
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
let bar = H256 ::random ( ) ;
let foo = {
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-06 22:39:04 +01:00
// history is 1
let foo = jdb . insert ( b " foo " ) ;
jdb . emplace ( bar . clone ( ) , b " bar " . to_vec ( ) ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 22:39:04 +01:00
foo
} ;
{
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-06 22:39:04 +01:00
jdb . remove ( & foo ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 22:39:04 +01:00
}
{
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( jdb . contains ( & bar ) ) ;
2016-03-06 22:39:04 +01:00
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( ! jdb . contains ( & foo ) ) ;
2016-03-06 22:39:04 +01:00
}
}
#[ test ]
2016-03-12 09:51:17 +01:00
fn insert_delete_insert_delete_insert_expunge ( ) {
init_log ( ) ;
2016-03-06 22:39:04 +01:00
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-12 09:51:17 +01:00
// history is 4
let foo = jdb . insert ( b " foo " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
jdb . commit ( 3 , & b " 3 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 4 , & b " 4 " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
// expunge foo
jdb . commit ( 5 , & b " 5 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
}
#[ test ]
fn forked_insert_delete_insert_delete_insert_expunge ( ) {
init_log ( ) ;
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-12 09:51:17 +01:00
// history is 4
let foo = jdb . insert ( b " foo " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
jdb . commit ( 1 , & b " 1a " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
jdb . commit ( 1 , & b " 1b " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 2 , & b " 2a " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 2 , & b " 2b " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
jdb . commit ( 3 , & b " 3a " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
jdb . commit ( 3 , & b " 3b " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 4 , & b " 4a " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 4 , & b " 4b " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
// expunge foo
jdb . commit ( 5 , & b " 5 " . sha3 ( ) , Some ( ( 1 , b " 1a " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
}
#[ test ]
fn broken_assert ( ) {
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-12 09:51:17 +01:00
// history is 1
let foo = jdb . insert ( b " foo " ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
// foo is ancient history.
jdb . remove ( & foo ) ;
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 3 , & b " 3 " . sha3 ( ) , Some ( ( 2 , b " 2 " . sha3 ( ) ) ) ) . unwrap ( ) ; // BROKEN
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-12 09:51:17 +01:00
jdb . remove ( & foo ) ;
jdb . commit ( 4 , & b " 4 " . sha3 ( ) , Some ( ( 3 , b " 3 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . commit ( 5 , & b " 5 " . sha3 ( ) , Some ( ( 4 , b " 4 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( ! jdb . contains ( & foo ) ) ;
2016-03-12 09:51:17 +01:00
}
2016-03-13 21:06:24 +01:00
2016-03-12 09:51:17 +01:00
#[ test ]
fn reopen_test ( ) {
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-12 09:51:17 +01:00
// history is 4
let foo = jdb . insert ( b " foo " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . commit ( 3 , & b " 3 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . commit ( 4 , & b " 4 " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
// foo is ancient history.
jdb . insert ( b " foo " ) ;
let bar = jdb . insert ( b " bar " ) ;
jdb . commit ( 5 , & b " 5 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
jdb . remove ( & bar ) ;
jdb . commit ( 6 , & b " 6 " . sha3 ( ) , Some ( ( 2 , b " 2 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . insert ( b " bar " ) ;
jdb . commit ( 7 , & b " 7 " . sha3 ( ) , Some ( ( 3 , b " 3 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
}
2016-03-13 21:06:24 +01:00
2016-03-12 09:51:17 +01:00
#[ test ]
fn reopen_remove_three ( ) {
init_log ( ) ;
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
let foo = b " foo " . sha3 ( ) ;
{
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-06 22:39:04 +01:00
// history is 1
2016-03-12 09:51:17 +01:00
jdb . insert ( b " foo " ) ;
2016-03-06 22:39:04 +01:00
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-07 09:10:02 +01:00
// foo is ancient history.
2016-03-12 09:51:17 +01:00
jdb . remove ( & foo ) ;
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-12 09:51:17 +01:00
2016-03-06 22:39:04 +01:00
jdb . insert ( b " foo " ) ;
2016-03-12 09:51:17 +01:00
jdb . commit ( 3 , & b " 3 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-12 09:51:17 +01:00
// incantation to reopen the db
2016-06-27 18:47:50 +02:00
} ; { let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-06 22:39:04 +01:00
jdb . remove ( & foo ) ;
2016-03-12 09:51:17 +01:00
jdb . commit ( 4 , & b " 4 " . sha3 ( ) , Some ( ( 2 , b " 2 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-12 09:51:17 +01:00
// incantation to reopen the db
2016-06-27 18:47:50 +02:00
} ; { let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-12 09:51:17 +01:00
jdb . commit ( 5 , & b " 5 " . sha3 ( ) , Some ( ( 3 , b " 3 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-12 09:51:17 +01:00
// incantation to reopen the db
2016-06-27 18:47:50 +02:00
} ; { let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-12 09:51:17 +01:00
jdb . commit ( 6 , & b " 6 " . sha3 ( ) , Some ( ( 4 , b " 4 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( ! jdb . contains ( & foo ) ) ;
2016-03-06 22:39:04 +01:00
}
}
2016-03-13 21:06:24 +01:00
2016-03-06 22:39:04 +01:00
#[ test ]
fn reopen_fork ( ) {
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
let ( foo , bar , baz ) = {
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-06 22:39:04 +01:00
// history is 1
let foo = jdb . insert ( b " foo " ) ;
let bar = jdb . insert ( b " bar " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 22:39:04 +01:00
jdb . remove ( & foo ) ;
let baz = jdb . insert ( b " baz " ) ;
jdb . commit ( 1 , & b " 1a " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 22:39:04 +01:00
jdb . remove ( & bar ) ;
jdb . commit ( 1 , & b " 1b " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 22:39:04 +01:00
( foo , bar , baz )
} ;
{
2016-06-27 18:47:50 +02:00
let mut jdb = EarlyMergeDB ::new ( dir . to_str ( ) . unwrap ( ) , DatabaseConfig ::default ( ) ) ;
2016-03-06 22:39:04 +01:00
jdb . commit ( 2 , & b " 2b " . sha3 ( ) , Some ( ( 1 , b " 1b " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-12 09:51:17 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( ! jdb . contains ( & baz ) ) ;
assert! ( ! jdb . contains ( & bar ) ) ;
2016-03-06 22:39:04 +01:00
}
}
2016-01-18 13:30:01 +01:00
}