2018-06-04 10:19:50 +02:00
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
2016-02-05 13:40:41 +01:00
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
2016-04-06 10:07:24 +02:00
//! `JournalDB` over in-memory overlay
2016-01-18 12:41:31 +01:00
2017-07-29 17:12:07 +02:00
use std ::collections ::HashMap ;
2017-08-26 19:09:32 +02:00
use std ::collections ::hash_map ::Entry ;
2018-07-06 15:09:39 +02:00
use std ::io ;
2017-07-29 21:56:42 +02:00
use std ::sync ::Arc ;
2018-07-02 18:50:05 +02:00
use bytes ::Bytes ;
use ethereum_types ::H256 ;
2016-01-18 12:41:31 +01:00
use hashdb ::* ;
2018-07-02 18:50:05 +02:00
use heapsize ::HeapSizeOf ;
use keccak_hasher ::KeccakHasher ;
use kvdb ::{ KeyValueDB , DBTransaction } ;
2016-02-04 02:40:35 +01:00
use memorydb ::* ;
2018-07-02 18:50:05 +02:00
use parking_lot ::RwLock ;
use plain_hasher ::H256FastMap ;
use rlp ::{ Rlp , RlpStream , encode , decode , DecoderError , Decodable , Encodable } ;
2018-07-06 15:09:39 +02:00
use super ::{ DB_PREFIX_LEN , LATEST_ERA_KEY , JournalDB , error_negatively_reference_hash } ;
2018-03-15 11:14:38 +01:00
use util ::DatabaseKey ;
2016-01-18 12:41:31 +01:00
2016-04-06 10:07:24 +02:00
/// Implementation of the `JournalDB` trait for a disk-backed database with a memory overlay
2016-03-04 20:19:36 +01:00
/// and, possibly, latent-removal semantics.
///
2016-04-06 10:07:24 +02:00
/// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect
2016-01-18 12:41:31 +01:00
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect.
2016-03-07 07:06:55 +01:00
///
2016-03-12 10:41:35 +01:00
/// There are two memory overlays:
/// - Transaction overlay contains current transaction data. It is merged with with history
2016-03-07 07:06:55 +01:00
/// overlay on each `commit()`
2016-03-12 10:41:35 +01:00
/// - History overlay contains all data inserted during the history period. When the node
2016-03-07 07:06:55 +01:00
/// in the overlay becomes ancient it is written to disk on `commit()`
///
2016-03-12 10:41:35 +01:00
/// There is also a journal maintained in memory and on the disk as well which lists insertions
/// and removals for each commit during the history period. This is used to track
2016-03-07 07:06:55 +01:00
/// data nodes that go out of history scope and must be written to disk.
///
/// Commit workflow:
2016-03-08 18:35:25 +01:00
/// 1. Create a new journal record from the transaction overlay.
2016-08-03 16:34:32 +02:00
/// 2. Insert each node from the transaction overlay into the History overlay increasing reference
2016-03-07 07:06:55 +01:00
/// count if it is already there. Note that the reference counting is managed by `MemoryDB`
2016-03-08 18:35:25 +01:00
/// 3. Clear the transaction overlay.
/// 4. For a canonical journal record that becomes ancient inserts its insertions into the disk DB
/// 5. For each journal record that goes out of the history scope (becomes ancient) remove its
2016-03-12 10:41:35 +01:00
/// insertions from the history overlay, decreasing the reference counter and removing entry if
2016-03-07 07:06:55 +01:00
/// if reaches zero.
2016-03-12 10:41:35 +01:00
/// 6. For a canonical journal record that becomes ancient delete its removals from the disk only if
2016-03-07 07:06:55 +01:00
/// the removed key is not present in the history overlay.
2016-03-08 18:35:25 +01:00
/// 7. Delete ancient record from memory and disk.
2016-03-13 00:20:31 +01:00
2016-03-12 11:19:42 +01:00
pub struct OverlayRecentDB {
2018-07-02 18:50:05 +02:00
transaction_overlay : MemoryDB < KeccakHasher > ,
2017-02-20 17:21:55 +01:00
backing : Arc < KeyValueDB > ,
2016-03-11 22:43:59 +01:00
journal_overlay : Arc < RwLock < JournalOverlay > > ,
2016-07-28 23:46:24 +02:00
column : Option < u32 > ,
2016-03-06 17:28:50 +01:00
}
2018-03-15 11:14:38 +01:00
struct DatabaseValue {
id : H256 ,
inserts : Vec < ( H256 , DBValue ) > ,
deletes : Vec < H256 > ,
}
impl Decodable for DatabaseValue {
2018-04-16 15:52:12 +02:00
fn decode ( rlp : & Rlp ) -> Result < Self , DecoderError > {
2018-03-15 11:14:38 +01:00
let id = rlp . val_at ( 0 ) ? ;
let inserts = rlp . at ( 1 ) ? . iter ( ) . map ( | r | {
let k = r . val_at ( 0 ) ? ;
let v = DBValue ::from_slice ( r . at ( 1 ) ? . data ( ) ? ) ;
Ok ( ( k , v ) )
} ) . collect ::< Result < Vec < _ > , _ > > ( ) ? ;
let deletes = rlp . list_at ( 2 ) ? ;
let value = DatabaseValue {
id ,
inserts ,
deletes ,
} ;
Ok ( value )
}
}
struct DatabaseValueRef < ' a > {
id : & ' a H256 ,
inserts : & ' a [ ( H256 , DBValue ) ] ,
deletes : & ' a [ H256 ] ,
}
impl < ' a > Encodable for DatabaseValueRef < ' a > {
fn rlp_append ( & self , s : & mut RlpStream ) {
s . begin_list ( 3 ) ;
s . append ( self . id ) ;
s . begin_list ( self . inserts . len ( ) ) ;
for kv in self . inserts {
s . begin_list ( 2 ) ;
s . append ( & kv . 0 ) ;
s . append ( & & * kv . 1 ) ;
}
s . append_list ( self . deletes ) ;
}
}
2016-03-11 11:52:11 +01:00
#[ derive(PartialEq) ]
2016-03-06 17:28:50 +01:00
struct JournalOverlay {
2018-07-02 18:50:05 +02:00
backing_overlay : MemoryDB < KeccakHasher > , // Nodes added in the history period
2016-10-26 13:53:47 +02:00
pending_overlay : H256FastMap < DBValue > , // Nodes being transfered from backing_overlay to backing db
2016-03-11 11:52:11 +01:00
journal : HashMap < u64 , Vec < JournalEntry > > ,
2016-03-13 11:50:09 +01:00
latest_era : Option < u64 > ,
2016-10-14 14:44:56 +02:00
earliest_era : Option < u64 > ,
2017-01-20 13:25:53 +01:00
cumulative_size : usize , // cumulative size of all entries.
2016-03-06 17:28:50 +01:00
}
2016-03-11 11:52:11 +01:00
#[ derive(PartialEq) ]
2016-03-06 17:28:50 +01:00
struct JournalEntry {
id : H256 ,
insertions : Vec < H256 > ,
deletions : Vec < H256 > ,
}
impl HeapSizeOf for JournalEntry {
fn heap_size_of_children ( & self ) -> usize {
self . insertions . heap_size_of_children ( ) + self . deletions . heap_size_of_children ( )
}
2016-01-18 12:41:31 +01:00
}
2016-03-12 11:19:42 +01:00
impl Clone for OverlayRecentDB {
fn clone ( & self ) -> OverlayRecentDB {
OverlayRecentDB {
2016-03-27 14:35:27 +02:00
transaction_overlay : self . transaction_overlay . clone ( ) ,
2016-02-04 21:33:30 +01:00
backing : self . backing . clone ( ) ,
2016-03-06 17:28:50 +01:00
journal_overlay : self . journal_overlay . clone ( ) ,
2016-07-28 23:46:24 +02:00
column : self . column . clone ( ) ,
2016-02-04 21:33:30 +01:00
}
}
}
2016-03-12 11:19:42 +01:00
impl OverlayRecentDB {
2016-07-28 23:46:24 +02:00
/// Create a new instance.
2017-02-20 17:21:55 +01:00
pub fn new ( backing : Arc < KeyValueDB > , col : Option < u32 > ) -> OverlayRecentDB {
let journal_overlay = Arc ::new ( RwLock ::new ( OverlayRecentDB ::read_overlay ( & * backing , col ) ) ) ;
2016-03-12 11:19:42 +01:00
OverlayRecentDB {
2016-03-06 17:28:50 +01:00
transaction_overlay : MemoryDB ::new ( ) ,
2016-07-28 23:46:24 +02:00
backing : backing ,
2016-03-06 17:28:50 +01:00
journal_overlay : journal_overlay ,
2016-07-28 23:46:24 +02:00
column : col ,
2016-01-21 23:33:52 +01:00
}
}
2016-03-11 22:43:59 +01:00
#[ cfg(test) ]
fn can_reconstruct_refs ( & self ) -> bool {
2017-02-20 17:21:55 +01:00
let reconstructed = Self ::read_overlay ( & * self . backing , self . column ) ;
2016-07-13 19:59:59 +02:00
let journal_overlay = self . journal_overlay . read ( ) ;
2016-10-14 14:44:56 +02:00
journal_overlay . backing_overlay = = reconstructed . backing_overlay & &
journal_overlay . pending_overlay = = reconstructed . pending_overlay & &
journal_overlay . journal = = reconstructed . journal & &
2017-01-20 13:25:53 +01:00
journal_overlay . latest_era = = reconstructed . latest_era & &
journal_overlay . cumulative_size = = reconstructed . cumulative_size
2016-02-05 01:49:06 +01:00
}
2016-01-18 12:41:31 +01:00
2016-10-26 13:53:47 +02:00
fn payload ( & self , key : & H256 ) -> Option < DBValue > {
2016-08-03 22:03:40 +02:00
self . backing . get ( self . column , key ) . expect ( " Low-level database error. Some issue with your hard disk? " )
2016-03-04 20:19:36 +01:00
}
2017-02-20 17:21:55 +01:00
fn read_overlay ( db : & KeyValueDB , col : Option < u32 > ) -> JournalOverlay {
2016-03-11 22:43:59 +01:00
let mut journal = HashMap ::new ( ) ;
let mut overlay = MemoryDB ::new ( ) ;
let mut count = 0 ;
2016-03-13 11:50:09 +01:00
let mut latest_era = None ;
2016-10-14 14:44:56 +02:00
let mut earliest_era = None ;
2017-01-20 13:25:53 +01:00
let mut cumulative_size = 0 ;
2016-07-28 23:46:24 +02:00
if let Some ( val ) = db . get ( col , & LATEST_ERA_KEY ) . expect ( " Low-level database error. " ) {
2018-05-08 11:22:12 +02:00
let mut era = decode ::< u64 > ( & val ) . expect ( " decoding db value failed " ) ;
2016-03-13 11:50:09 +01:00
latest_era = Some ( era ) ;
2016-03-11 22:43:59 +01:00
loop {
2018-03-15 11:14:38 +01:00
let mut db_key = DatabaseKey {
era ,
index : 0 usize ,
} ;
while let Some ( rlp_data ) = db . get ( col , & encode ( & db_key ) ) . expect ( " Low-level database error. " ) {
trace! ( " read_overlay: era={}, index={} " , era , db_key . index ) ;
2018-05-08 11:22:12 +02:00
let value = decode ::< DatabaseValue > ( & rlp_data ) . expect ( & format! ( " read_overlay: Error decoding DatabaseValue era= {} , index {} " , era , db_key . index ) ) ;
2018-03-15 11:14:38 +01:00
count + = value . inserts . len ( ) ;
2016-03-11 22:43:59 +01:00
let mut inserted_keys = Vec ::new ( ) ;
2018-03-15 11:14:38 +01:00
for ( k , v ) in value . inserts {
2017-01-20 13:25:53 +01:00
let short_key = to_short_key ( & k ) ;
if ! overlay . contains ( & short_key ) {
cumulative_size + = v . len ( ) ;
}
2018-03-15 11:14:38 +01:00
overlay . emplace ( short_key , v ) ;
2016-03-11 22:43:59 +01:00
inserted_keys . push ( k ) ;
}
journal . entry ( era ) . or_insert_with ( Vec ::new ) . push ( JournalEntry {
2018-03-15 11:14:38 +01:00
id : value . id ,
2016-03-11 22:43:59 +01:00
insertions : inserted_keys ,
2018-03-15 11:14:38 +01:00
deletions : value . deletes ,
2016-03-11 22:43:59 +01:00
} ) ;
2018-03-15 11:14:38 +01:00
db_key . index + = 1 ;
2016-10-14 14:44:56 +02:00
earliest_era = Some ( era ) ;
2016-03-11 22:43:59 +01:00
} ;
2018-03-15 11:14:38 +01:00
if db_key . index = = 0 | | era = = 0 {
2016-03-11 22:43:59 +01:00
break ;
}
era - = 1 ;
2016-03-04 20:19:36 +01:00
}
}
2016-03-11 22:43:59 +01:00
trace! ( " Recovered {} overlay entries, {} journal entries " , count , journal . len ( ) ) ;
2016-08-11 18:58:11 +02:00
JournalOverlay {
backing_overlay : overlay ,
pending_overlay : HashMap ::default ( ) ,
journal : journal ,
2016-10-14 14:44:56 +02:00
latest_era : latest_era ,
earliest_era : earliest_era ,
2017-01-20 13:25:53 +01:00
cumulative_size : cumulative_size ,
2016-10-14 14:44:56 +02:00
}
2016-03-04 20:19:36 +01:00
}
2016-08-03 16:34:32 +02:00
}
#[ inline ]
fn to_short_key ( key : & H256 ) -> H256 {
let mut k = H256 ::new ( ) ;
k [ 0 .. DB_PREFIX_LEN ] . copy_from_slice ( & key [ 0 .. DB_PREFIX_LEN ] ) ;
k
2016-03-11 22:43:59 +01:00
}
2016-03-04 20:19:36 +01:00
2016-03-12 11:19:42 +01:00
impl JournalDB for OverlayRecentDB {
2016-03-28 09:42:50 +02:00
fn boxed_clone ( & self ) -> Box < JournalDB > {
2016-03-11 22:43:59 +01:00
Box ::new ( self . clone ( ) )
}
fn mem_used ( & self ) -> usize {
let mut mem = self . transaction_overlay . mem_used ( ) ;
2016-07-13 19:59:59 +02:00
let overlay = self . journal_overlay . read ( ) ;
2017-01-20 13:25:53 +01:00
2016-03-11 22:43:59 +01:00
mem + = overlay . backing_overlay . mem_used ( ) ;
2016-08-11 18:58:11 +02:00
mem + = overlay . pending_overlay . heap_size_of_children ( ) ;
2016-03-11 22:43:59 +01:00
mem + = overlay . journal . heap_size_of_children ( ) ;
2017-01-20 13:25:53 +01:00
2016-03-11 22:43:59 +01:00
mem
2016-03-04 20:19:36 +01:00
}
2017-01-20 13:25:53 +01:00
fn journal_size ( & self ) -> usize {
self . journal_overlay . read ( ) . cumulative_size
}
2016-03-11 22:43:59 +01:00
fn is_empty ( & self ) -> bool {
2016-07-28 23:46:24 +02:00
self . backing . get ( self . column , & LATEST_ERA_KEY ) . expect ( " Low level database error " ) . is_none ( )
}
2017-02-20 17:21:55 +01:00
fn backing ( & self ) -> & Arc < KeyValueDB > {
2016-07-28 23:46:24 +02:00
& self . backing
2016-03-11 22:43:59 +01:00
}
2016-07-13 19:59:59 +02:00
fn latest_era ( & self ) -> Option < u64 > { self . journal_overlay . read ( ) . latest_era }
2016-04-12 00:51:14 +02:00
2016-10-14 14:44:56 +02:00
fn earliest_era ( & self ) -> Option < u64 > { self . journal_overlay . read ( ) . earliest_era }
2016-07-11 12:34:29 +02:00
fn state ( & self , key : & H256 ) -> Option < Bytes > {
2016-08-11 18:58:11 +02:00
let journal_overlay = self . journal_overlay . read ( ) ;
let key = to_short_key ( key ) ;
2017-06-28 14:16:53 +02:00
journal_overlay . backing_overlay . get ( & key ) . map ( | v | v . into_vec ( ) )
. or_else ( | | journal_overlay . pending_overlay . get ( & key ) . map ( | d | d . clone ( ) . into_vec ( ) ) )
2017-06-28 09:36:42 +02:00
. or_else ( | | self . backing . get_by_prefix ( self . column , & key [ 0 .. DB_PREFIX_LEN ] ) . map ( | b | b . into_vec ( ) ) )
2016-07-11 12:34:29 +02:00
}
2018-07-06 15:09:39 +02:00
fn journal_under ( & mut self , batch : & mut DBTransaction , now : u64 , id : & H256 ) -> io ::Result < u32 > {
2016-09-26 17:14:44 +02:00
trace! ( target : " journaldb " , " entry: #{} ({}) " , now , id ) ;
2016-07-13 19:59:59 +02:00
let mut journal_overlay = self . journal_overlay . write ( ) ;
2016-09-26 17:14:44 +02:00
2016-08-11 18:58:11 +02:00
// flush previous changes
journal_overlay . pending_overlay . clear ( ) ;
2016-09-26 17:14:44 +02:00
let mut tx = self . transaction_overlay . drain ( ) ;
let inserted_keys : Vec < _ > = tx . iter ( ) . filter_map ( | ( k , & ( _ , c ) ) | if c > 0 { Some ( k . clone ( ) ) } else { None } ) . collect ( ) ;
let removed_keys : Vec < _ > = tx . iter ( ) . filter_map ( | ( k , & ( _ , c ) ) | if c < 0 { Some ( k . clone ( ) ) } else { None } ) . collect ( ) ;
let ops = inserted_keys . len ( ) + removed_keys . len ( ) ;
// Increase counter for each inserted key no matter if the block is canonical or not.
2018-03-15 11:14:38 +01:00
let insertions : Vec < _ > = tx . drain ( ) . filter_map ( | ( k , ( v , c ) ) | if c > 0 { Some ( ( k , v ) ) } else { None } ) . collect ( ) ;
2016-09-26 17:14:44 +02:00
2018-03-15 11:14:38 +01:00
let encoded_value = {
let value_ref = DatabaseValueRef {
id ,
inserts : & insertions ,
deletes : & removed_keys ,
} ;
encode ( & value_ref )
} ;
2017-01-20 13:25:53 +01:00
2018-03-15 11:14:38 +01:00
for ( k , v ) in insertions {
2017-01-20 13:25:53 +01:00
let short_key = to_short_key ( & k ) ;
if ! journal_overlay . backing_overlay . contains ( & short_key ) {
journal_overlay . cumulative_size + = v . len ( ) ;
}
journal_overlay . backing_overlay . emplace ( short_key , v ) ;
2016-09-26 17:14:44 +02:00
}
let index = journal_overlay . journal . get ( & now ) . map_or ( 0 , | j | j . len ( ) ) ;
2018-03-15 11:14:38 +01:00
let db_key = DatabaseKey {
era : now ,
index ,
} ;
batch . put_vec ( self . column , & encode ( & db_key ) , encoded_value . into_vec ( ) ) ;
2016-09-26 17:14:44 +02:00
if journal_overlay . latest_era . map_or ( true , | e | now > e ) {
2017-01-20 13:25:53 +01:00
trace! ( target : " journaldb " , " Set latest era to {} " , now ) ;
2017-06-28 14:16:53 +02:00
batch . put_vec ( self . column , & LATEST_ERA_KEY , encode ( & now ) . into_vec ( ) ) ;
2016-09-26 17:14:44 +02:00
journal_overlay . latest_era = Some ( now ) ;
2016-01-18 12:41:31 +01:00
}
2017-01-30 17:20:41 +01:00
if journal_overlay . earliest_era . map_or ( true , | e | e > now ) {
trace! ( target : " journaldb " , " Set earliest era to {} " , now ) ;
journal_overlay . earliest_era = Some ( now ) ;
}
2016-09-26 17:14:44 +02:00
journal_overlay . journal . entry ( now ) . or_insert_with ( Vec ::new ) . push ( JournalEntry { id : id . clone ( ) , insertions : inserted_keys , deletions : removed_keys } ) ;
Ok ( ops as u32 )
}
2018-07-06 15:09:39 +02:00
fn mark_canonical ( & mut self , batch : & mut DBTransaction , end_era : u64 , canon_id : & H256 ) -> io ::Result < u32 > {
2016-09-26 17:14:44 +02:00
trace! ( target : " journaldb " , " canonical: #{} ({}) " , end_era , canon_id ) ;
let mut journal_overlay = self . journal_overlay . write ( ) ;
2016-08-03 16:34:32 +02:00
let journal_overlay = & mut * journal_overlay ;
2016-09-26 17:14:44 +02:00
let mut ops = 0 ;
2016-01-18 12:41:31 +01:00
// apply old commits' details
2016-09-26 17:14:44 +02:00
if let Some ( ref mut records ) = journal_overlay . journal . get_mut ( & end_era ) {
2016-10-26 13:53:47 +02:00
let mut canon_insertions : Vec < ( H256 , DBValue ) > = Vec ::new ( ) ;
2016-09-26 17:14:44 +02:00
let mut canon_deletions : Vec < H256 > = Vec ::new ( ) ;
let mut overlay_deletions : Vec < H256 > = Vec ::new ( ) ;
let mut index = 0 usize ;
for mut journal in records . drain ( .. ) {
//delete the record from the db
2018-03-15 11:14:38 +01:00
let db_key = DatabaseKey {
era : end_era ,
index ,
} ;
batch . delete ( self . column , & encode ( & db_key ) ) ;
2016-09-26 17:14:44 +02:00
trace! ( target : " journaldb " , " Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries " , end_era , index , journal . id , canon_id , journal . insertions . len ( ) , journal . deletions . len ( ) ) ;
{
if * canon_id = = journal . id {
2017-10-17 10:44:05 +02:00
for h in & journal . insertions {
2016-09-26 17:14:44 +02:00
if let Some ( ( d , rc ) ) = journal_overlay . backing_overlay . raw ( & to_short_key ( h ) ) {
if rc > 0 {
2016-10-26 13:53:47 +02:00
canon_insertions . push ( ( h . clone ( ) , d ) ) ; //TODO: optimize this to avoid data copy
2016-03-08 18:35:25 +01:00
}
2016-03-06 17:28:50 +01:00
}
}
2016-09-26 17:14:44 +02:00
canon_deletions = journal . deletions ;
2016-03-06 17:28:50 +01:00
}
2016-09-26 17:14:44 +02:00
overlay_deletions . append ( & mut journal . insertions ) ;
2016-02-07 21:18:51 +01:00
}
2016-09-26 17:14:44 +02:00
index + = 1 ;
}
ops + = canon_insertions . len ( ) ;
ops + = canon_deletions . len ( ) ;
// apply canon inserts first
for ( k , v ) in canon_insertions {
batch . put ( self . column , & k , & v ) ;
journal_overlay . pending_overlay . insert ( to_short_key ( & k ) , v ) ;
}
// update the overlay
for k in overlay_deletions {
2017-01-20 13:25:53 +01:00
if let Some ( val ) = journal_overlay . backing_overlay . remove_and_purge ( & to_short_key ( & k ) ) {
journal_overlay . cumulative_size - = val . len ( ) ;
}
2016-09-26 17:14:44 +02:00
}
// apply canon deletions
for k in canon_deletions {
if ! journal_overlay . backing_overlay . contains ( & to_short_key ( & k ) ) {
batch . delete ( self . column , & k ) ;
2016-03-08 18:35:25 +01:00
}
2016-02-05 22:54:33 +01:00
}
}
2016-09-26 17:14:44 +02:00
journal_overlay . journal . remove ( & end_era ) ;
2017-01-30 17:20:41 +01:00
2017-01-20 13:25:53 +01:00
if ! journal_overlay . journal . is_empty ( ) {
trace! ( target : " journaldb " , " Set earliest_era to {} " , end_era + 1 ) ;
journal_overlay . earliest_era = Some ( end_era + 1 ) ;
}
2016-09-26 17:14:44 +02:00
Ok ( ops as u32 )
2016-02-05 22:54:33 +01:00
}
2016-08-11 18:58:11 +02:00
fn flush ( & self ) {
self . journal_overlay . write ( ) . pending_overlay . clear ( ) ;
}
2018-07-06 15:09:39 +02:00
fn inject ( & mut self , batch : & mut DBTransaction ) -> io ::Result < u32 > {
2016-08-03 16:34:32 +02:00
let mut ops = 0 ;
for ( key , ( value , rc ) ) in self . transaction_overlay . drain ( ) {
if rc ! = 0 { ops + = 1 }
match rc {
0 = > { }
2017-03-24 14:02:04 +01:00
_ if rc > 0 = > {
2016-08-18 09:43:56 +02:00
batch . put ( self . column , & key , & value )
2016-08-03 16:34:32 +02:00
}
- 1 = > {
2016-12-27 12:53:56 +01:00
if cfg! ( debug_assertions ) & & self . backing . get ( self . column , & key ) ? . is_none ( ) {
2018-07-06 15:09:39 +02:00
return Err ( error_negatively_reference_hash ( & key ) ) ;
2016-08-03 16:34:32 +02:00
}
2016-08-18 09:43:56 +02:00
batch . delete ( self . column , & key )
2016-08-03 16:34:32 +02:00
}
2017-03-24 14:02:04 +01:00
_ = > panic! ( " Attempted to inject invalid state ( {} ) " , rc ) ,
2016-08-03 16:34:32 +02:00
}
}
Ok ( ops )
}
2016-08-25 14:28:45 +02:00
2018-07-02 18:50:05 +02:00
fn consolidate ( & mut self , with : MemoryDB < KeccakHasher > ) {
2016-08-25 14:28:45 +02:00
self . transaction_overlay . consolidate ( with ) ;
}
2016-01-18 12:41:31 +01:00
}
2018-07-02 18:50:05 +02:00
impl HashDB < KeccakHasher > for OverlayRecentDB {
2016-03-07 14:33:00 +01:00
fn keys ( & self ) -> HashMap < H256 , i32 > {
2017-08-26 19:09:32 +02:00
let mut ret : HashMap < H256 , i32 > = self . backing . iter ( self . column )
. map ( | ( key , _ ) | ( H256 ::from_slice ( & * key ) , 1 ) )
. collect ( ) ;
2016-02-04 02:40:35 +01:00
2016-10-27 08:28:12 +02:00
for ( key , refs ) in self . transaction_overlay . keys ( ) {
2017-08-26 19:09:32 +02:00
match ret . entry ( key ) {
Entry ::Occupied ( mut entry ) = > {
* entry . get_mut ( ) + = refs ;
} ,
Entry ::Vacant ( entry ) = > {
2017-08-26 19:16:08 +02:00
entry . insert ( refs ) ;
2017-08-26 19:09:32 +02:00
}
}
2016-02-04 02:40:35 +01:00
}
ret
}
2016-10-26 13:53:47 +02:00
fn get ( & self , key : & H256 ) -> Option < DBValue > {
2017-08-26 19:09:32 +02:00
if let Some ( ( d , rc ) ) = self . transaction_overlay . raw ( key ) {
if rc > 0 {
return Some ( d )
}
2016-02-04 02:40:35 +01:00
}
2016-10-26 13:53:47 +02:00
let v = {
let journal_overlay = self . journal_overlay . read ( ) ;
let key = to_short_key ( key ) ;
journal_overlay . backing_overlay . get ( & key )
. or_else ( | | journal_overlay . pending_overlay . get ( & key ) . cloned ( ) )
} ;
v . or_else ( | | self . payload ( key ) )
2016-02-04 02:40:35 +01:00
}
2016-06-23 11:16:11 +02:00
fn contains ( & self , key : & H256 ) -> bool {
self . get ( key ) . is_some ( )
2016-02-04 02:40:35 +01:00
}
2016-03-12 10:41:35 +01:00
fn insert ( & mut self , value : & [ u8 ] ) -> H256 {
2016-03-06 17:28:50 +01:00
self . transaction_overlay . insert ( value )
2016-02-04 02:40:35 +01:00
}
2016-10-26 13:53:47 +02:00
fn emplace ( & mut self , key : H256 , value : DBValue ) {
2016-03-12 10:41:35 +01:00
self . transaction_overlay . emplace ( key , value ) ;
2016-02-04 02:40:35 +01:00
}
2016-06-23 11:16:11 +02:00
fn remove ( & mut self , key : & H256 ) {
self . transaction_overlay . remove ( key ) ;
2016-02-04 21:33:30 +01:00
}
2016-01-18 12:41:31 +01:00
}
2016-01-18 13:30:01 +01:00
#[ cfg(test) ]
mod tests {
2016-03-18 10:14:19 +01:00
2017-08-30 16:20:21 +02:00
use keccak ::keccak ;
2016-01-18 13:30:01 +01:00
use super ::* ;
2016-11-28 17:05:37 +01:00
use hashdb ::{ HashDB , DBValue } ;
2017-03-22 06:23:40 +01:00
use ethcore_logger ::init_log ;
2017-10-17 10:40:45 +02:00
use { kvdb_memorydb , JournalDB } ;
2016-07-28 23:46:24 +02:00
2017-10-13 17:10:03 +02:00
fn new_db ( ) -> OverlayRecentDB {
2017-10-15 16:17:15 +02:00
let backing = Arc ::new ( kvdb_memorydb ::create ( 0 ) ) ;
2016-07-28 23:46:24 +02:00
OverlayRecentDB ::new ( backing , None )
}
2016-01-18 13:30:01 +01:00
2016-03-06 21:57:55 +01:00
#[ test ]
fn insert_same_in_fork ( ) {
// history is 1
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-03-06 21:57:55 +01:00
let x = jdb . insert ( b " X " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 1002a " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 4 , & keccak ( b " 1003a " ) , Some ( ( 2 , keccak ( b " 2 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 21:57:55 +01:00
jdb . remove ( & x ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 1002b " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 21:57:55 +01:00
let x = jdb . insert ( b " X " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 4 , & keccak ( b " 1003b " ) , Some ( ( 2 , keccak ( b " 2 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 21:57:55 +01:00
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 5 , & keccak ( b " 1004a " ) , Some ( ( 3 , keccak ( b " 1002a " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 6 , & keccak ( b " 1005a " ) , Some ( ( 4 , keccak ( b " 1003a " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 21:57:55 +01:00
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & x ) ) ;
2016-03-06 21:57:55 +01:00
}
2016-01-18 13:30:01 +01:00
#[ test ]
fn long_history ( ) {
// history is 3
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-01-18 13:30:01 +01:00
let h = jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & h ) ) ;
2016-01-18 13:30:01 +01:00
jdb . remove ( & h ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & h ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & h ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 3 " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & h ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 4 , & keccak ( b " 4 " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( ! jdb . contains ( & h ) ) ;
2016-01-18 13:30:01 +01:00
}
#[ test ]
fn complex ( ) {
// history is 1
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-01-18 13:30:01 +01:00
let foo = jdb . insert ( b " foo " ) ;
let bar = jdb . insert ( b " bar " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( jdb . contains ( & bar ) ) ;
2016-01-18 13:30:01 +01:00
jdb . remove ( & foo ) ;
jdb . remove ( & bar ) ;
let baz = jdb . insert ( b " baz " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1 " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( jdb . contains ( & bar ) ) ;
assert! ( jdb . contains ( & baz ) ) ;
2016-01-18 13:30:01 +01:00
let foo = jdb . insert ( b " foo " ) ;
jdb . remove ( & baz ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2 " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( ! jdb . contains ( & bar ) ) ;
assert! ( jdb . contains ( & baz ) ) ;
2016-01-18 13:30:01 +01:00
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 3 " ) , Some ( ( 2 , keccak ( b " 2 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( ! jdb . contains ( & bar ) ) ;
assert! ( ! jdb . contains ( & baz ) ) ;
2016-01-18 13:30:01 +01:00
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 4 , & keccak ( b " 4 " ) , Some ( ( 3 , keccak ( b " 3 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( ! jdb . contains ( & foo ) ) ;
assert! ( ! jdb . contains ( & bar ) ) ;
assert! ( ! jdb . contains ( & baz ) ) ;
2016-01-18 13:30:01 +01:00
}
#[ test ]
fn fork ( ) {
// history is 1
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-01-18 13:30:01 +01:00
let foo = jdb . insert ( b " foo " ) ;
let bar = jdb . insert ( b " bar " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( jdb . contains ( & bar ) ) ;
2016-01-18 13:30:01 +01:00
jdb . remove ( & foo ) ;
let baz = jdb . insert ( b " baz " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1a " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-01-18 13:30:01 +01:00
jdb . remove ( & bar ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1b " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-01-18 13:30:01 +01:00
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( jdb . contains ( & bar ) ) ;
assert! ( jdb . contains ( & baz ) ) ;
2016-01-18 13:30:01 +01:00
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2b " ) , Some ( ( 1 , keccak ( b " 1b " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( ! jdb . contains ( & baz ) ) ;
assert! ( ! jdb . contains ( & bar ) ) ;
2016-01-18 13:30:01 +01:00
}
2016-02-04 21:33:30 +01:00
#[ test ]
fn overwrite ( ) {
// history is 1
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-02-04 21:33:30 +01:00
let foo = jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-02-04 21:33:30 +01:00
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1 " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-02-04 21:33:30 +01:00
jdb . insert ( b " foo " ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2 " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 2 " ) , Some ( ( 0 , keccak ( b " 2 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-02-04 21:33:30 +01:00
}
2016-02-05 22:54:33 +01:00
#[ test ]
2016-03-11 11:52:11 +01:00
fn fork_same_key_one ( ) {
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-02-05 22:54:33 +01:00
let foo = jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1a " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-02-05 22:54:33 +01:00
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1b " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1c " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-02-05 22:54:33 +01:00
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2a " ) , Some ( ( 1 , keccak ( b " 1a " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-02-05 22:54:33 +01:00
}
2016-02-11 13:32:27 +01:00
2016-03-11 11:52:11 +01:00
#[ test ]
fn fork_same_key_other ( ) {
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-03-11 11:52:11 +01:00
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
let foo = jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1a " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1b " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1c " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-11 11:52:11 +01:00
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2b " ) , Some ( ( 1 , keccak ( b " 1b " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-11 11:52:11 +01:00
}
#[ test ]
fn fork_ins_del_ins ( ) {
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-03-11 11:52:11 +01:00
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
let foo = jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2a " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2b " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 3a " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 3b " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 4 , & keccak ( b " 4a " ) , Some ( ( 2 , keccak ( b " 2a " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 5 , & keccak ( b " 5a " ) , Some ( ( 3 , keccak ( b " 3a " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
}
2016-03-06 22:39:04 +01:00
2016-02-11 13:32:27 +01:00
#[ test ]
fn reopen ( ) {
2017-10-15 16:17:15 +02:00
let shared_db = Arc ::new ( kvdb_memorydb ::create ( 0 ) ) ;
2016-03-06 17:28:50 +01:00
let bar = H256 ::random ( ) ;
2016-02-11 13:32:27 +01:00
let foo = {
2017-10-13 17:10:03 +02:00
let mut jdb = OverlayRecentDB ::new ( shared_db . clone ( ) , None ) ;
2016-02-11 13:32:27 +01:00
// history is 1
let foo = jdb . insert ( b " foo " ) ;
2016-10-26 13:53:47 +02:00
jdb . emplace ( bar . clone ( ) , DBValue ::from_slice ( b " bar " ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-02-11 13:32:27 +01:00
foo
} ;
{
2017-10-13 17:10:03 +02:00
let mut jdb = OverlayRecentDB ::new ( shared_db . clone ( ) , None ) ;
2016-02-11 13:32:27 +01:00
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1 " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-02-11 13:32:27 +01:00
}
{
2017-10-13 17:10:03 +02:00
let mut jdb = OverlayRecentDB ::new ( shared_db . clone ( ) , None ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( jdb . contains ( & bar ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2 " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( ! jdb . contains ( & foo ) ) ;
2016-03-06 17:28:50 +01:00
}
}
#[ test ]
2016-03-11 11:52:11 +01:00
fn insert_delete_insert_delete_insert_expunge ( ) {
init_log ( ) ;
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-03-11 11:52:11 +01:00
// history is 4
let foo = jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 3 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 4 , & keccak ( b " 4 " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
// expunge foo
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 5 , & keccak ( b " 5 " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
}
#[ test ]
fn forked_insert_delete_insert_delete_insert_expunge ( ) {
init_log ( ) ;
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-03-11 11:52:11 +01:00
// history is 4
let foo = jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1a " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1b " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2a " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2b " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 3a " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 3b " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 4 , & keccak ( b " 4a " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 4 , & keccak ( b " 4b " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
// expunge foo
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 5 , & keccak ( b " 5 " ) , Some ( ( 1 , keccak ( b " 1a " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
}
#[ test ]
fn broken_assert ( ) {
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-03-11 11:52:11 +01:00
let foo = jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1 " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
// foo is ancient history.
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2 " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 3 " ) , Some ( ( 2 , keccak ( b " 2 " ) ) ) ) . unwrap ( ) ; // BROKEN
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-11 11:52:11 +01:00
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 4 , & keccak ( b " 4 " ) , Some ( ( 3 , keccak ( b " 3 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 5 , & keccak ( b " 5 " ) , Some ( ( 4 , keccak ( b " 4 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( ! jdb . contains ( & foo ) ) ;
2016-03-11 11:52:11 +01:00
}
2016-03-12 10:41:35 +01:00
2016-03-11 11:52:11 +01:00
#[ test ]
fn reopen_test ( ) {
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-03-11 11:52:11 +01:00
// history is 4
let foo = jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 3 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 4 , & keccak ( b " 4 " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
// foo is ancient history.
jdb . insert ( b " foo " ) ;
let bar = jdb . insert ( b " bar " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 5 , & keccak ( b " 5 " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & foo ) ;
jdb . remove ( & bar ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 6 , & keccak ( b " 6 " ) , Some ( ( 2 , keccak ( b " 2 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . insert ( b " foo " ) ;
jdb . insert ( b " bar " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 7 , & keccak ( b " 7 " ) , Some ( ( 3 , keccak ( b " 3 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
}
2016-03-12 10:41:35 +01:00
2016-03-11 11:52:11 +01:00
#[ test ]
fn reopen_remove_three ( ) {
init_log ( ) ;
2017-10-15 16:17:15 +02:00
let shared_db = Arc ::new ( kvdb_memorydb ::create ( 0 ) ) ;
2017-08-30 16:20:21 +02:00
let foo = keccak ( b " foo " ) ;
2016-03-11 11:52:11 +01:00
{
2017-10-13 17:10:03 +02:00
let mut jdb = OverlayRecentDB ::new ( shared_db . clone ( ) , None ) ;
2016-03-06 17:28:50 +01:00
// history is 1
2016-03-11 11:52:11 +01:00
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-07 09:10:02 +01:00
// foo is ancient history.
2016-03-11 11:52:11 +01:00
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2 " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-11 11:52:11 +01:00
2016-03-06 17:28:50 +01:00
jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 3 , & keccak ( b " 3 " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-11 11:52:11 +01:00
// incantation to reopen the db
2016-07-28 23:46:24 +02:00
} ; {
2017-10-13 17:10:03 +02:00
let mut jdb = OverlayRecentDB ::new ( shared_db . clone ( ) , None ) ;
2016-03-06 17:28:50 +01:00
jdb . remove ( & foo ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 4 , & keccak ( b " 4 " ) , Some ( ( 2 , keccak ( b " 2 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-11 11:52:11 +01:00
// incantation to reopen the db
2016-07-28 23:46:24 +02:00
} ; {
2017-10-13 17:10:03 +02:00
let mut jdb = OverlayRecentDB ::new ( shared_db . clone ( ) , None ) ;
2016-03-11 11:52:11 +01:00
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 5 , & keccak ( b " 5 " ) , Some ( ( 3 , keccak ( b " 3 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
2016-03-11 11:52:11 +01:00
// incantation to reopen the db
2016-07-28 23:46:24 +02:00
} ; {
2017-10-13 17:10:03 +02:00
let mut jdb = OverlayRecentDB ::new ( shared_db , None ) ;
2016-03-11 11:52:11 +01:00
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 6 , & keccak ( b " 6 " ) , Some ( ( 4 , keccak ( b " 4 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( ! jdb . contains ( & foo ) ) ;
2016-02-11 13:32:27 +01:00
}
}
2016-03-12 10:41:35 +01:00
2016-03-06 17:28:50 +01:00
#[ test ]
fn reopen_fork ( ) {
2017-10-15 16:17:15 +02:00
let shared_db = Arc ::new ( kvdb_memorydb ::create ( 0 ) ) ;
2017-10-13 17:10:03 +02:00
2016-03-06 17:28:50 +01:00
let ( foo , bar , baz ) = {
2017-10-13 17:10:03 +02:00
let mut jdb = OverlayRecentDB ::new ( shared_db . clone ( ) , None ) ;
2016-03-06 17:28:50 +01:00
// history is 1
let foo = jdb . insert ( b " foo " ) ;
let bar = jdb . insert ( b " bar " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0 " ) , None ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 17:28:50 +01:00
jdb . remove ( & foo ) ;
let baz = jdb . insert ( b " baz " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1a " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 17:28:50 +01:00
jdb . remove ( & bar ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1b " ) , Some ( ( 0 , keccak ( b " 0 " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-03-06 17:28:50 +01:00
( foo , bar , baz )
} ;
{
2017-10-13 17:10:03 +02:00
let mut jdb = OverlayRecentDB ::new ( shared_db , None ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2b " ) , Some ( ( 1 , keccak ( b " 1b " ) ) ) ) . unwrap ( ) ;
2016-03-11 11:52:11 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( ! jdb . contains ( & baz ) ) ;
assert! ( ! jdb . contains ( & bar ) ) ;
2016-03-06 17:28:50 +01:00
}
}
2016-03-13 11:50:09 +01:00
#[ test ]
fn insert_older_era ( ) {
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-03-13 11:50:09 +01:00
let foo = jdb . insert ( b " foo " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0a " ) , None ) . unwrap ( ) ;
2016-03-13 11:50:09 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
let bar = jdb . insert ( b " bar " ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 1 , & keccak ( b " 1 " ) , Some ( ( 0 , keccak ( b " 0a " ) ) ) ) . unwrap ( ) ;
2016-03-13 11:50:09 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
jdb . remove ( & bar ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 0 , & keccak ( b " 0b " ) , None ) . unwrap ( ) ;
2016-03-13 11:50:09 +01:00
assert! ( jdb . can_reconstruct_refs ( ) ) ;
2017-08-30 16:20:21 +02:00
jdb . commit_batch ( 2 , & keccak ( b " 2 " ) , Some ( ( 1 , keccak ( b " 1 " ) ) ) ) . unwrap ( ) ;
2016-03-13 11:50:09 +01:00
2016-06-23 11:16:11 +02:00
assert! ( jdb . contains ( & foo ) ) ;
assert! ( jdb . contains ( & bar ) ) ;
2016-03-13 11:50:09 +01:00
}
2016-08-03 16:34:32 +02:00
#[ test ]
fn inject ( ) {
2017-10-13 17:10:03 +02:00
let mut jdb = new_db ( ) ;
2016-08-03 16:34:32 +02:00
let key = jdb . insert ( b " dog " ) ;
jdb . inject_batch ( ) . unwrap ( ) ;
2016-10-26 13:53:47 +02:00
assert_eq! ( jdb . get ( & key ) . unwrap ( ) , DBValue ::from_slice ( b " dog " ) ) ;
2016-08-03 16:34:32 +02:00
jdb . remove ( & key ) ;
jdb . inject_batch ( ) . unwrap ( ) ;
assert! ( jdb . get ( & key ) . is_none ( ) ) ;
}
2017-01-30 17:20:41 +01:00
#[ test ]
fn earliest_era ( ) {
2017-10-15 16:17:15 +02:00
let shared_db = Arc ::new ( kvdb_memorydb ::create ( 0 ) ) ;
2017-01-30 17:20:41 +01:00
// empty DB
2017-10-13 17:10:03 +02:00
let mut jdb = OverlayRecentDB ::new ( shared_db . clone ( ) , None ) ;
2017-01-30 17:20:41 +01:00
assert! ( jdb . earliest_era ( ) . is_none ( ) ) ;
// single journalled era.
let _key = jdb . insert ( b " hello! " ) ;
let mut batch = jdb . backing ( ) . transaction ( ) ;
2017-08-30 16:20:21 +02:00
jdb . journal_under ( & mut batch , 0 , & keccak ( b " 0 " ) ) . unwrap ( ) ;
2017-01-30 17:20:41 +01:00
jdb . backing ( ) . write_buffered ( batch ) ;
assert_eq! ( jdb . earliest_era ( ) , Some ( 0 ) ) ;
// second journalled era.
let mut batch = jdb . backing ( ) . transaction ( ) ;
2017-08-30 16:20:21 +02:00
jdb . journal_under ( & mut batch , 1 , & keccak ( b " 1 " ) ) . unwrap ( ) ;
2017-01-30 17:20:41 +01:00
jdb . backing ( ) . write_buffered ( batch ) ;
assert_eq! ( jdb . earliest_era ( ) , Some ( 0 ) ) ;
// single journalled era.
let mut batch = jdb . backing ( ) . transaction ( ) ;
2017-08-30 16:20:21 +02:00
jdb . mark_canonical ( & mut batch , 0 , & keccak ( b " 0 " ) ) . unwrap ( ) ;
2017-01-30 17:20:41 +01:00
jdb . backing ( ) . write_buffered ( batch ) ;
assert_eq! ( jdb . earliest_era ( ) , Some ( 1 ) ) ;
// no journalled eras.
let mut batch = jdb . backing ( ) . transaction ( ) ;
2017-08-30 16:20:21 +02:00
jdb . mark_canonical ( & mut batch , 1 , & keccak ( b " 1 " ) ) . unwrap ( ) ;
2017-01-30 17:20:41 +01:00
jdb . backing ( ) . write_buffered ( batch ) ;
assert_eq! ( jdb . earliest_era ( ) , Some ( 1 ) ) ;
// reconstructed: no journal entries.
drop ( jdb ) ;
2017-10-13 17:10:03 +02:00
let jdb = OverlayRecentDB ::new ( shared_db , None ) ;
2017-01-30 17:20:41 +01:00
assert_eq! ( jdb . earliest_era ( ) , None ) ;
}
2016-08-10 16:29:40 +02:00
}