2016-02-05 13:40:41 +01:00
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
2016-01-18 12:41:31 +01:00
//! Disk-backed HashDB implementation.
use common ::* ;
use rlp ::* ;
use hashdb ::* ;
2016-02-04 02:40:35 +01:00
use memorydb ::* ;
2016-02-18 03:46:24 +01:00
use kvdb ::{ Database , DBTransaction , DatabaseConfig } ;
2016-01-31 17:01:36 +01:00
#[ cfg(test) ]
use std ::env ;
2016-01-18 12:41:31 +01:00
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay
2016-03-04 20:19:36 +01:00
/// and, possibly, latent-removal semantics.
///
/// If `counters` is `None`, then it behaves exactly like OverlayDB. If not it behaves
/// differently:
2016-01-18 12:41:31 +01:00
///
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect.
pub struct JournalDB {
2016-03-06 17:28:50 +01:00
transaction_overlay : MemoryDB ,
2016-02-18 03:46:24 +01:00
backing : Arc < Database > ,
2016-03-06 17:28:50 +01:00
journal_overlay : Option < Arc < RwLock < JournalOverlay > > > ,
}
struct JournalOverlay {
backing_overlay : MemoryDB ,
journal : VecDeque < JournalEntry >
}
struct JournalEntry {
id : H256 ,
index : usize ,
era : u64 ,
insertions : Vec < H256 > ,
deletions : Vec < H256 > ,
}
impl HeapSizeOf for JournalEntry {
fn heap_size_of_children ( & self ) -> usize {
self . insertions . heap_size_of_children ( ) + self . deletions . heap_size_of_children ( )
}
2016-01-18 12:41:31 +01:00
}
2016-02-04 21:33:30 +01:00
impl Clone for JournalDB {
fn clone ( & self ) -> JournalDB {
JournalDB {
2016-03-06 17:28:50 +01:00
transaction_overlay : MemoryDB ::new ( ) ,
2016-02-04 21:33:30 +01:00
backing : self . backing . clone ( ) ,
2016-03-06 17:28:50 +01:00
journal_overlay : self . journal_overlay . clone ( ) ,
2016-02-04 21:33:30 +01:00
}
}
}
2016-02-18 03:46:24 +01:00
// all keys must be at least 12 bytes
2016-03-04 20:19:36 +01:00
const LATEST_ERA_KEY : [ u8 ; 12 ] = [ b 'l' , b 'a' , b 's' , b 't' , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ] ;
const VERSION_KEY : [ u8 ; 12 ] = [ b 'j' , b 'v' , b 'e' , b 'r' , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ] ;
2016-02-05 01:49:06 +01:00
2016-03-04 20:19:36 +01:00
const DB_VERSION : u32 = 3 ;
const DB_VERSION_NO_JOURNAL : u32 = 3 + 256 ;
2016-02-18 03:46:24 +01:00
const PADDING : [ u8 ; 10 ] = [ 0 u8 ; 10 ] ;
2016-02-04 21:33:30 +01:00
2016-01-18 12:41:31 +01:00
impl JournalDB {
2016-02-18 03:46:24 +01:00
/// Create a new instance from file
pub fn new ( path : & str ) -> JournalDB {
2016-03-04 20:19:36 +01:00
Self ::from_prefs ( path , true )
}
/// Create a new instance from file
pub fn from_prefs ( path : & str , prefer_journal : bool ) -> JournalDB {
2016-02-18 03:46:24 +01:00
let opts = DatabaseConfig {
prefix_size : Some ( 12 ) //use 12 bytes as prefix, this must match account_db prefix
} ;
2016-02-18 21:15:56 +01:00
let backing = Database ::open ( & opts , path ) . unwrap_or_else ( | e | {
2016-02-18 03:46:24 +01:00
panic! ( " Error opening state db: {} " , e ) ;
} ) ;
2016-03-04 20:19:36 +01:00
let with_journal ;
2016-02-18 03:46:24 +01:00
if ! backing . is_empty ( ) {
2016-02-05 01:49:06 +01:00
match backing . get ( & VERSION_KEY ) . map ( | d | d . map ( | v | decode ::< u32 > ( & v ) ) ) {
2016-03-04 20:19:36 +01:00
Ok ( Some ( DB_VERSION ) ) = > { with_journal = true ; } ,
Ok ( Some ( DB_VERSION_NO_JOURNAL ) ) = > { with_journal = false ; } ,
2016-02-05 01:49:06 +01:00
v = > panic! ( " Incompatible DB version, expected {} , got {:?} " , DB_VERSION , v )
}
} else {
2016-03-04 20:19:36 +01:00
backing . put ( & VERSION_KEY , & encode ( & ( if prefer_journal { DB_VERSION } else { DB_VERSION_NO_JOURNAL } ) ) ) . expect ( " Error writing version to database " ) ;
with_journal = prefer_journal ;
2016-02-05 01:49:06 +01:00
}
2016-03-04 20:19:36 +01:00
2016-03-06 17:28:50 +01:00
let journal_overlay = if with_journal {
Some ( Arc ::new ( RwLock ::new ( JournalDB ::read_overlay ( & backing ) ) ) )
2016-03-04 20:19:36 +01:00
} else {
None
} ;
2016-01-21 23:33:52 +01:00
JournalDB {
2016-03-06 17:28:50 +01:00
transaction_overlay : MemoryDB ::new ( ) ,
2016-02-18 03:46:24 +01:00
backing : Arc ::new ( backing ) ,
2016-03-06 17:28:50 +01:00
journal_overlay : journal_overlay ,
2016-01-21 23:33:52 +01:00
}
}
2016-01-18 12:41:31 +01:00
/// Create a new instance with an anonymous temporary database.
2016-01-31 10:52:07 +01:00
#[ cfg(test) ]
2016-01-18 12:41:31 +01:00
pub fn new_temp ( ) -> JournalDB {
let mut dir = env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
2016-02-18 03:46:24 +01:00
Self ::new ( dir . to_str ( ) . unwrap ( ) )
2016-01-18 12:41:31 +01:00
}
2016-02-05 01:49:06 +01:00
/// Check if this database has any commits
pub fn is_empty ( & self ) -> bool {
2016-02-11 13:32:27 +01:00
self . backing . get ( & LATEST_ERA_KEY ) . expect ( " Low level database error " ) . is_none ( )
2016-02-05 01:49:06 +01:00
}
2016-01-18 12:41:31 +01:00
2016-03-04 20:19:36 +01:00
/// Commit all recent insert operations.
pub fn commit ( & mut self , now : u64 , id : & H256 , end : Option < ( u64 , H256 ) > ) -> Result < u32 , UtilError > {
2016-03-06 17:28:50 +01:00
let have_journal_overlay = self . journal_overlay . is_some ( ) ;
if have_journal_overlay {
self . commit_with_overlay ( now , id , end )
2016-03-04 20:19:36 +01:00
} else {
2016-03-06 17:28:50 +01:00
self . commit_without_overlay ( )
2016-03-04 20:19:36 +01:00
}
}
/// Drain the overlay and place it into a batch for the DB.
2016-03-04 23:53:57 +01:00
fn batch_overlay_insertions ( overlay : & mut MemoryDB , batch : & DBTransaction ) -> usize {
2016-03-06 17:28:50 +01:00
let mut insertions = 0 usize ;
let mut deletions = 0 usize ;
2016-03-04 20:19:36 +01:00
for i in overlay . drain ( ) . into_iter ( ) {
let ( key , ( value , rc ) ) = i ;
if rc > 0 {
assert! ( rc = = 1 ) ;
batch . put ( & key . bytes ( ) , & value ) . expect ( " Low-level database error. Some issue with your hard disk? " ) ;
2016-03-06 17:28:50 +01:00
insertions + = 1 ;
2016-03-04 20:19:36 +01:00
}
if rc < 0 {
assert! ( rc = = - 1 ) ;
2016-03-06 17:28:50 +01:00
deletions + = 1 ;
2016-03-04 20:19:36 +01:00
}
}
2016-03-06 17:28:50 +01:00
trace! ( " commit: Inserted {}, Deleted {} nodes " , insertions , deletions ) ;
insertions + deletions
2016-03-04 20:19:36 +01:00
}
2016-03-06 17:28:50 +01:00
/// Just commit the transaction overlay into the backing DB.
fn commit_without_overlay ( & mut self ) -> Result < u32 , UtilError > {
2016-03-04 20:19:36 +01:00
let batch = DBTransaction ::new ( ) ;
2016-03-06 17:28:50 +01:00
let ret = Self ::batch_overlay_insertions ( & mut self . transaction_overlay , & batch ) ;
2016-03-04 20:19:36 +01:00
try ! ( self . backing . write ( batch ) ) ;
Ok ( ret as u32 )
}
2016-01-18 12:41:31 +01:00
/// Commit all recent insert operations and historical removals from the old era
/// to the backing database.
2016-03-06 17:28:50 +01:00
fn commit_with_overlay ( & mut self , now : u64 , id : & H256 , end : Option < ( u64 , H256 ) > ) -> Result < u32 , UtilError > {
2016-01-18 12:41:31 +01:00
// record new commit's details.
2016-03-04 22:54:59 +01:00
trace! ( " commit: #{} ({}), end era: {:?} " , now , id , end ) ;
2016-03-06 17:28:50 +01:00
let mut journal_overlay = self . journal_overlay . as_mut ( ) . unwrap ( ) . write ( ) . unwrap ( ) ;
2016-02-18 03:46:24 +01:00
let batch = DBTransaction ::new ( ) ;
2016-01-18 12:41:31 +01:00
{
let mut index = 0 usize ;
let mut last ;
2016-03-04 23:53:57 +01:00
while {
let record = try ! ( self . backing . get ( {
let mut r = RlpStream ::new_list ( 3 ) ;
r . append ( & now ) ;
r . append ( & index ) ;
r . append ( & & PADDING [ .. ] ) ;
last = r . drain ( ) ;
& last
} ) ) ;
match record {
Some ( r ) = > {
assert! ( & Rlp ::new ( & r ) . val_at ::< H256 > ( 0 ) ! = id ) ;
true
} ,
None = > false ,
}
} {
2016-01-18 12:41:31 +01:00
index + = 1 ;
}
let mut r = RlpStream ::new_list ( 3 ) ;
2016-03-06 17:28:50 +01:00
let mut tx = self . transaction_overlay . drain ( ) ;
let inserted_keys : Vec < _ > = tx . iter ( ) . filter_map ( | ( k , & ( _ , c ) ) | if c > 0 { Some ( k . clone ( ) ) } else { None } ) . collect ( ) ;
let removed_keys : Vec < _ > = tx . iter ( ) . filter_map ( | ( k , & ( _ , c ) ) | if c < 0 { Some ( k . clone ( ) ) } else { None } ) . collect ( ) ;
2016-02-07 21:18:51 +01:00
// Increase counter for each inserted key no matter if the block is canonical or not.
2016-03-06 17:28:50 +01:00
let insertions = tx . drain ( ) . filter_map ( | ( k , ( v , c ) ) | if c > 0 { Some ( ( k , v ) ) } else { None } ) ;
2016-01-18 12:41:31 +01:00
r . append ( id ) ;
2016-03-06 17:28:50 +01:00
r . begin_list ( inserted_keys . len ( ) ) ;
for ( k , v ) in insertions {
r . begin_list ( 2 ) ;
r . append ( & k ) ;
r . append ( & v ) ;
journal_overlay . backing_overlay . emplace ( k , v ) ;
}
r . append ( & removed_keys ) ;
2016-02-04 21:33:30 +01:00
try ! ( batch . put ( & last , r . as_raw ( ) ) ) ;
2016-02-11 13:32:27 +01:00
try ! ( batch . put ( & LATEST_ERA_KEY , & encode ( & now ) ) ) ;
2016-03-06 17:28:50 +01:00
journal_overlay . journal . push_back ( JournalEntry { id : id . clone ( ) , index : index , era : now , insertions : inserted_keys , deletions : removed_keys } ) ;
2016-01-18 12:41:31 +01:00
}
// apply old commits' details
2016-03-06 17:28:50 +01:00
2016-01-18 13:30:01 +01:00
if let Some ( ( end_era , canon_id ) ) = end {
2016-03-06 17:28:50 +01:00
let mut canon_insertions : Vec < ( H256 , Bytes ) > = Vec ::new ( ) ;
let mut canon_deletions : Vec < H256 > = Vec ::new ( ) ;
let mut overlay_deletions : Vec < H256 > = Vec ::new ( ) ;
while journal_overlay . journal . front ( ) . map_or ( false , | e | e . era < = end_era ) {
let mut journal = journal_overlay . journal . pop_front ( ) . unwrap ( ) ;
//delete the record from the db
2016-02-18 03:46:24 +01:00
let mut r = RlpStream ::new_list ( 3 ) ;
2016-03-06 17:28:50 +01:00
r . append ( & journal . era ) ;
r . append ( & journal . index ) ;
2016-02-18 03:46:24 +01:00
r . append ( & & PADDING [ .. ] ) ;
2016-03-06 17:28:50 +01:00
try ! ( batch . delete ( & r . drain ( ) ) ) ;
trace! ( " commit: Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries " , end_era , journal . index , journal . id , canon_id , journal . insertions . len ( ) , journal . deletions . len ( ) ) ;
{
if canon_id = = journal . id {
for h in & journal . insertions {
match journal_overlay . backing_overlay . raw ( & h ) {
Some ( & ( ref d , rc ) ) if rc > 0 = > canon_insertions . push ( ( h . clone ( ) , d . clone ( ) ) ) , //TODO: optimizie this to avoid data copy
_ = > ( )
}
}
canon_deletions = journal . deletions ;
}
overlay_deletions . append ( & mut journal . insertions ) ;
2016-02-07 21:18:51 +01:00
}
2016-03-06 17:28:50 +01:00
if canon_id = = journal . id {
2016-01-18 13:30:01 +01:00
}
2016-01-18 12:41:31 +01:00
}
2016-03-06 17:28:50 +01:00
// apply canon inserts first
for ( k , v ) in canon_insertions {
try ! ( batch . put ( & k , & v ) ) ;
2016-02-07 21:18:51 +01:00
}
2016-03-06 17:28:50 +01:00
// clean the overlay
for k in overlay_deletions {
journal_overlay . backing_overlay . kill ( & k ) ;
}
// apply removes
for k in canon_deletions {
if ! journal_overlay . backing_overlay . exists ( & k ) {
try ! ( batch . delete ( & k ) ) ;
}
2016-02-05 22:54:33 +01:00
}
2016-03-06 17:28:50 +01:00
journal_overlay . backing_overlay . purge ( ) ;
2016-02-05 22:54:33 +01:00
}
2016-03-06 17:28:50 +01:00
try ! ( self . backing . write ( batch ) ) ;
Ok ( 0 as u32 )
2016-02-05 22:54:33 +01:00
}
2016-02-04 02:40:35 +01:00
fn payload ( & self , key : & H256 ) -> Option < Bytes > {
self . backing . get ( & key . bytes ( ) ) . expect ( " Low-level database error. Some issue with your hard disk? " ) . map ( | v | v . to_vec ( ) )
2016-01-18 12:41:31 +01:00
}
2016-02-04 21:33:30 +01:00
2016-03-06 17:28:50 +01:00
fn read_overlay ( db : & Database ) -> JournalOverlay {
let mut journal = VecDeque ::new ( ) ;
let mut overlay = MemoryDB ::new ( ) ;
let mut count = 0 ;
2016-02-11 13:32:27 +01:00
if let Some ( val ) = db . get ( & LATEST_ERA_KEY ) . expect ( " Low-level database error. " ) {
let mut era = decode ::< u64 > ( & val ) ;
2016-02-04 21:33:30 +01:00
loop {
let mut index = 0 usize ;
while let Some ( rlp_data ) = db . get ( {
2016-02-18 03:46:24 +01:00
let mut r = RlpStream ::new_list ( 3 ) ;
2016-02-04 21:33:30 +01:00
r . append ( & era ) ;
r . append ( & index ) ;
2016-02-18 03:46:24 +01:00
r . append ( & & PADDING [ .. ] ) ;
2016-02-04 21:33:30 +01:00
& r . drain ( )
} ) . expect ( " Low-level database error. " ) {
let rlp = Rlp ::new ( & rlp_data ) ;
2016-03-06 17:28:50 +01:00
let id : H256 = rlp . val_at ( 0 ) ;
let insertions = rlp . at ( 1 ) ;
let deletions : Vec < H256 > = rlp . val_at ( 2 ) ;
let mut inserted_keys = Vec ::new ( ) ;
for r in insertions . iter ( ) {
let k : H256 = r . val_at ( 0 ) ;
let v : Bytes = r . val_at ( 1 ) ;
overlay . emplace ( k . clone ( ) , v ) ;
inserted_keys . push ( k ) ;
count + = 1 ;
2016-02-04 21:33:30 +01:00
}
2016-03-06 17:28:50 +01:00
journal . push_front ( JournalEntry {
id : id ,
index : index ,
era : era ,
insertions : inserted_keys ,
deletions : deletions ,
} ) ;
2016-02-04 21:33:30 +01:00
index + = 1 ;
} ;
2016-02-11 13:32:27 +01:00
if index = = 0 | | era = = 0 {
2016-02-04 21:33:30 +01:00
break ;
}
2016-02-11 13:32:27 +01:00
era - = 1 ;
2016-02-04 21:33:30 +01:00
}
}
2016-03-06 17:28:50 +01:00
trace! ( " Recovered {} overlay entries, {} journal entries " , count , journal . len ( ) ) ;
JournalOverlay { backing_overlay : overlay , journal : journal }
}
/// Returns heap memory size used
pub fn mem_used ( & self ) -> usize {
let mut mem = self . transaction_overlay . mem_used ( ) ;
if let Some ( ref overlay ) = self . journal_overlay . as_ref ( ) {
let overlay = overlay . read ( ) . unwrap ( ) ;
mem + = overlay . backing_overlay . mem_used ( ) ;
mem + = overlay . journal . heap_size_of_children ( ) ;
}
mem
2016-02-04 21:33:30 +01:00
}
2016-01-18 12:41:31 +01:00
}
impl HashDB for JournalDB {
2016-02-04 02:40:35 +01:00
fn keys ( & self ) -> HashMap < H256 , i32 > {
let mut ret : HashMap < H256 , i32 > = HashMap ::new ( ) ;
2016-02-18 03:46:24 +01:00
for ( key , _ ) in self . backing . iter ( ) {
2016-02-04 02:40:35 +01:00
let h = H256 ::from_slice ( key . deref ( ) ) ;
ret . insert ( h , 1 ) ;
}
2016-03-06 17:28:50 +01:00
for ( key , refs ) in self . transaction_overlay . keys ( ) . into_iter ( ) {
2016-02-04 02:40:35 +01:00
let refs = * ret . get ( & key ) . unwrap_or ( & 0 ) + refs ;
ret . insert ( key , refs ) ;
}
ret
}
fn lookup ( & self , key : & H256 ) -> Option < & [ u8 ] > {
2016-03-06 17:28:50 +01:00
let k = self . transaction_overlay . raw ( key ) ;
2016-02-04 02:40:35 +01:00
match k {
Some ( & ( ref d , rc ) ) if rc > 0 = > Some ( d ) ,
_ = > {
2016-03-06 17:28:50 +01:00
let v = self . journal_overlay . as_ref ( ) . map_or ( None , | ref j | j . read ( ) . unwrap ( ) . backing_overlay . lookup ( key ) . map ( | v | v . to_vec ( ) ) ) ;
match v {
Some ( x ) = > {
Some ( & self . transaction_overlay . denote ( key , x ) . 0 )
}
_ = > {
if let Some ( x ) = self . payload ( key ) {
Some ( & self . transaction_overlay . denote ( key , x ) . 0 )
}
else {
None
}
}
2016-02-04 02:40:35 +01:00
}
}
}
}
fn exists ( & self , key : & H256 ) -> bool {
self . lookup ( key ) . is_some ( )
}
fn insert ( & mut self , value : & [ u8 ] ) -> H256 {
2016-03-06 17:28:50 +01:00
self . transaction_overlay . insert ( value )
2016-02-04 02:40:35 +01:00
}
fn emplace ( & mut self , key : H256 , value : Bytes ) {
2016-03-06 17:28:50 +01:00
self . transaction_overlay . emplace ( key , value ) ;
2016-02-04 02:40:35 +01:00
}
fn kill ( & mut self , key : & H256 ) {
2016-03-06 17:28:50 +01:00
self . transaction_overlay . kill ( key ) ;
2016-02-04 21:33:30 +01:00
}
2016-01-18 12:41:31 +01:00
}
2016-01-18 13:30:01 +01:00
#[ cfg(test) ]
mod tests {
use common ::* ;
use super ::* ;
use hashdb ::* ;
#[ test ]
fn long_history ( ) {
// history is 3
let mut jdb = JournalDB ::new_temp ( ) ;
let h = jdb . insert ( b " foo " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . exists ( & h ) ) ;
jdb . remove ( & h ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . exists ( & h ) ) ;
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . exists ( & h ) ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 3 , & b " 3 " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-01-18 13:30:01 +01:00
assert! ( jdb . exists ( & h ) ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 4 , & b " 4 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-01-18 13:30:01 +01:00
assert! ( ! jdb . exists ( & h ) ) ;
}
#[ test ]
fn complex ( ) {
// history is 1
let mut jdb = JournalDB ::new_temp ( ) ;
let foo = jdb . insert ( b " foo " ) ;
let bar = jdb . insert ( b " bar " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . exists ( & foo ) ) ;
assert! ( jdb . exists ( & bar ) ) ;
jdb . remove ( & foo ) ;
jdb . remove ( & bar ) ;
let baz = jdb . insert ( b " baz " ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-01-18 13:30:01 +01:00
assert! ( jdb . exists ( & foo ) ) ;
assert! ( jdb . exists ( & bar ) ) ;
assert! ( jdb . exists ( & baz ) ) ;
let foo = jdb . insert ( b " foo " ) ;
jdb . remove ( & baz ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-01-18 13:30:01 +01:00
assert! ( jdb . exists ( & foo ) ) ;
assert! ( ! jdb . exists ( & bar ) ) ;
assert! ( jdb . exists ( & baz ) ) ;
jdb . remove ( & foo ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 3 , & b " 3 " . sha3 ( ) , Some ( ( 2 , b " 2 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-01-18 13:30:01 +01:00
assert! ( jdb . exists ( & foo ) ) ;
assert! ( ! jdb . exists ( & bar ) ) ;
assert! ( ! jdb . exists ( & baz ) ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 4 , & b " 4 " . sha3 ( ) , Some ( ( 3 , b " 3 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-01-18 13:30:01 +01:00
assert! ( ! jdb . exists ( & foo ) ) ;
assert! ( ! jdb . exists ( & bar ) ) ;
assert! ( ! jdb . exists ( & baz ) ) ;
}
#[ test ]
fn fork ( ) {
// history is 1
let mut jdb = JournalDB ::new_temp ( ) ;
let foo = jdb . insert ( b " foo " ) ;
let bar = jdb . insert ( b " bar " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . exists ( & foo ) ) ;
assert! ( jdb . exists ( & bar ) ) ;
jdb . remove ( & foo ) ;
let baz = jdb . insert ( b " baz " ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 1 , & b " 1a " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-01-18 13:30:01 +01:00
jdb . remove ( & bar ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 1 , & b " 1b " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-01-18 13:30:01 +01:00
assert! ( jdb . exists ( & foo ) ) ;
assert! ( jdb . exists ( & bar ) ) ;
assert! ( jdb . exists ( & baz ) ) ;
2016-01-18 23:50:40 +01:00
jdb . commit ( 2 , & b " 2b " . sha3 ( ) , Some ( ( 1 , b " 1b " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-01-18 13:30:01 +01:00
assert! ( jdb . exists ( & foo ) ) ;
assert! ( ! jdb . exists ( & baz ) ) ;
assert! ( ! jdb . exists ( & bar ) ) ;
}
2016-02-04 21:33:30 +01:00
#[ test ]
fn overwrite ( ) {
// history is 1
let mut jdb = JournalDB ::new_temp ( ) ;
let foo = jdb . insert ( b " foo " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
assert! ( jdb . exists ( & foo ) ) ;
jdb . remove ( & foo ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
jdb . insert ( b " foo " ) ;
assert! ( jdb . exists ( & foo ) ) ;
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . exists ( & foo ) ) ;
jdb . commit ( 3 , & b " 2 " . sha3 ( ) , Some ( ( 0 , b " 2 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . exists ( & foo ) ) ;
}
2016-02-05 22:54:33 +01:00
#[ test ]
fn fork_same_key ( ) {
// history is 1
let mut jdb = JournalDB ::new_temp ( ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
let foo = jdb . insert ( b " foo " ) ;
jdb . commit ( 1 , & b " 1a " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 1 , & b " 1b " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . exists ( & foo ) ) ;
jdb . commit ( 2 , & b " 2a " . sha3 ( ) , Some ( ( 1 , b " 1a " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . exists ( & foo ) ) ;
}
2016-02-11 13:32:27 +01:00
#[ test ]
fn reopen ( ) {
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
2016-03-06 17:28:50 +01:00
let bar = H256 ::random ( ) ;
2016-02-11 13:32:27 +01:00
let foo = {
2016-02-18 03:46:24 +01:00
let mut jdb = JournalDB ::new ( dir . to_str ( ) . unwrap ( ) ) ;
2016-02-11 13:32:27 +01:00
// history is 1
let foo = jdb . insert ( b " foo " ) ;
2016-03-06 17:28:50 +01:00
jdb . emplace ( bar . clone ( ) , b " bar " . to_vec ( ) ) ;
2016-02-11 13:32:27 +01:00
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
foo
} ;
{
2016-02-18 03:46:24 +01:00
let mut jdb = JournalDB ::new ( dir . to_str ( ) . unwrap ( ) ) ;
2016-02-11 13:32:27 +01:00
jdb . remove ( & foo ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
}
{
2016-02-18 03:46:24 +01:00
let mut jdb = JournalDB ::new ( dir . to_str ( ) . unwrap ( ) ) ;
2016-02-11 13:32:27 +01:00
assert! ( jdb . exists ( & foo ) ) ;
2016-03-06 17:28:50 +01:00
assert! ( jdb . exists ( & bar ) ) ;
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( ! jdb . exists ( & foo ) ) ;
}
}
#[ test ]
fn reopen_remove ( ) {
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
let bar = H256 ::random ( ) ;
let foo = {
let mut jdb = JournalDB ::new ( dir . to_str ( ) . unwrap ( ) ) ;
// history is 1
let foo = jdb . insert ( b " foo " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
jdb . insert ( b " foo " ) ;
jdb . commit ( 1 , & b " 1 " . sha3 ( ) , None ) . unwrap ( ) ;
foo
} ;
{
let mut jdb = JournalDB ::new ( dir . to_str ( ) . unwrap ( ) ) ;
jdb . remove ( & foo ) ;
2016-02-11 13:32:27 +01:00
jdb . commit ( 2 , & b " 2 " . sha3 ( ) , Some ( ( 1 , b " 1 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-03-06 17:28:50 +01:00
assert! ( jdb . exists ( & foo ) ) ;
jdb . commit ( 3 , & b " 3 " . sha3 ( ) , Some ( ( 2 , b " 2 " . sha3 ( ) ) ) ) . unwrap ( ) ;
2016-02-11 13:32:27 +01:00
assert! ( ! jdb . exists ( & foo ) ) ;
}
}
2016-03-06 17:28:50 +01:00
#[ test ]
fn reopen_fork ( ) {
let mut dir = ::std ::env ::temp_dir ( ) ;
dir . push ( H32 ::random ( ) . hex ( ) ) ;
let ( foo , bar , baz ) = {
let mut jdb = JournalDB ::new ( dir . to_str ( ) . unwrap ( ) ) ;
// history is 1
let foo = jdb . insert ( b " foo " ) ;
let bar = jdb . insert ( b " bar " ) ;
jdb . commit ( 0 , & b " 0 " . sha3 ( ) , None ) . unwrap ( ) ;
jdb . remove ( & foo ) ;
let baz = jdb . insert ( b " baz " ) ;
jdb . commit ( 1 , & b " 1a " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
jdb . remove ( & bar ) ;
jdb . commit ( 1 , & b " 1b " . sha3 ( ) , Some ( ( 0 , b " 0 " . sha3 ( ) ) ) ) . unwrap ( ) ;
( foo , bar , baz )
} ;
{
let mut jdb = JournalDB ::new ( dir . to_str ( ) . unwrap ( ) ) ;
jdb . commit ( 2 , & b " 2b " . sha3 ( ) , Some ( ( 1 , b " 1b " . sha3 ( ) ) ) ) . unwrap ( ) ;
assert! ( jdb . exists ( & foo ) ) ;
assert! ( ! jdb . exists ( & baz ) ) ;
assert! ( ! jdb . exists ( & bar ) ) ;
}
}
2016-01-18 13:30:01 +01:00
}