2016-02-05 13:40:41 +01:00
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
2016-02-02 15:29:53 +01:00
//! Blockchain database.
2015-12-17 17:20:10 +01:00
2016-02-22 00:36:59 +01:00
use std ::sync ::atomic ::{ AtomicUsize , Ordering as AtomicOrder } ;
2016-05-26 18:24:51 +02:00
use bloomchain as bc ;
2016-01-09 12:30:41 +01:00
use util ::* ;
2015-12-21 02:34:41 +01:00
use header ::* ;
2016-05-26 18:24:51 +02:00
use super ::extras ::* ;
2015-12-14 17:12:47 +01:00
use transaction ::* ;
2015-12-17 02:13:14 +01:00
use views ::* ;
2016-02-11 14:35:03 +01:00
use receipt ::Receipt ;
2016-05-26 18:24:51 +02:00
use blooms ::{ Bloom , BloomGroup } ;
2016-04-17 17:18:25 +02:00
use blockchain ::block_info ::{ BlockInfo , BlockLocation , BranchBecomingCanonChainData } ;
2016-02-27 01:37:12 +01:00
use blockchain ::best_block ::BestBlock ;
2016-05-16 18:33:32 +02:00
use types ::tree_route ::TreeRoute ;
2016-02-27 02:16:39 +01:00
use blockchain ::update ::ExtrasUpdate ;
2016-05-26 18:24:51 +02:00
use blockchain ::{ CacheSize , ImportRoute , Config } ;
2016-07-25 10:21:02 +02:00
use db ::{ Writable , Readable , CacheUpdatePolicy } ;
2016-02-16 14:46:21 +01:00
2016-05-26 18:24:51 +02:00
const LOG_BLOOMS_LEVELS : usize = 3 ;
const LOG_BLOOMS_ELEMENTS_PER_INDEX : usize = 16 ;
2015-12-21 02:57:02 +01:00
2016-01-12 13:14:01 +01:00
/// Interface for querying blocks by hash and by number.
pub trait BlockProvider {
/// Returns true if the given block is known
/// (though not necessarily a part of the canon chain).
fn is_known ( & self , hash : & H256 ) -> bool ;
/// Get raw block data
fn block ( & self , hash : & H256 ) -> Option < Bytes > ;
/// Get the familial details concerning a block.
fn block_details ( & self , hash : & H256 ) -> Option < BlockDetails > ;
/// Get the hash of given block's number.
fn block_hash ( & self , index : BlockNumber ) -> Option < H256 > ;
2016-02-08 15:53:22 +01:00
/// Get the address of transaction with given hash.
fn transaction_address ( & self , hash : & H256 ) -> Option < TransactionAddress > ;
2016-02-17 12:35:37 +01:00
/// Get receipts of block with given hash.
fn block_receipts ( & self , hash : & H256 ) -> Option < BlockReceipts > ;
2016-01-12 13:14:01 +01:00
/// Get the partial-header of a block.
fn block_header ( & self , hash : & H256 ) -> Option < Header > {
self . block ( hash ) . map ( | bytes | BlockView ::new ( & bytes ) . header ( ) )
}
/// Get a list of uncles for a given block.
2016-03-02 18:05:47 +01:00
/// Returns None if block does not exist.
2016-01-12 13:14:01 +01:00
fn uncles ( & self , hash : & H256 ) -> Option < Vec < Header > > {
self . block ( hash ) . map ( | bytes | BlockView ::new ( & bytes ) . uncles ( ) )
}
/// Get a list of uncle hashes for a given block.
/// Returns None if block does not exist.
fn uncle_hashes ( & self , hash : & H256 ) -> Option < Vec < H256 > > {
self . block ( hash ) . map ( | bytes | BlockView ::new ( & bytes ) . uncle_hashes ( ) )
}
/// Get the number of given block's hash.
fn block_number ( & self , hash : & H256 ) -> Option < BlockNumber > {
self . block ( hash ) . map ( | bytes | BlockView ::new ( & bytes ) . header_view ( ) . number ( ) )
}
2016-02-08 15:53:22 +01:00
/// Get transaction with given transaction hash.
2016-02-10 19:29:27 +01:00
fn transaction ( & self , address : & TransactionAddress ) -> Option < LocalizedTransaction > {
self . block ( & address . block_hash ) . and_then ( | bytes | BlockView ::new ( & bytes ) . localized_transaction_at ( address . index ) )
2016-02-08 15:53:22 +01:00
}
2016-03-20 17:29:39 +01:00
/// Get transaction receipt.
fn transaction_receipt ( & self , address : & TransactionAddress ) -> Option < Receipt > {
self . block_receipts ( & address . block_hash ) . and_then ( | br | br . receipts . into_iter ( ) . nth ( address . index ) )
}
2016-01-12 13:14:01 +01:00
/// Get a list of transactions for a given block.
2016-02-10 11:28:40 +01:00
/// Returns None if block does not exist.
2016-02-09 15:17:01 +01:00
fn transactions ( & self , hash : & H256 ) -> Option < Vec < LocalizedTransaction > > {
self . block ( hash ) . map ( | bytes | BlockView ::new ( & bytes ) . localized_transactions ( ) )
2016-01-12 13:14:01 +01:00
}
/// Returns reference to genesis hash.
fn genesis_hash ( & self ) -> H256 {
self . block_hash ( 0 ) . expect ( " Genesis hash should always exist " )
}
2016-01-26 15:00:22 +01:00
/// Returns the header of the genesis block.
fn genesis_header ( & self ) -> Header {
self . block_header ( & self . genesis_hash ( ) ) . unwrap ( )
}
2016-02-12 00:40:45 +01:00
/// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom ( & self , bloom : & H2048 , from_block : BlockNumber , to_block : BlockNumber ) -> Vec < BlockNumber > ;
2016-01-12 13:14:01 +01:00
}
2016-01-18 15:48:38 +01:00
#[ derive(Debug, Hash, Eq, PartialEq, Clone) ]
2016-01-18 19:23:28 +01:00
enum CacheID {
Block ( H256 ) ,
2016-05-26 18:24:51 +02:00
BlockDetails ( H256 ) ,
BlockHashes ( BlockNumber ) ,
TransactionAddresses ( H256 ) ,
BlocksBlooms ( LogGroupPosition ) ,
BlockReceipts ( H256 ) ,
2016-01-18 15:48:38 +01:00
}
struct CacheManager {
cache_usage : VecDeque < HashSet < CacheID > > ,
in_use : HashSet < CacheID > ,
}
2016-05-26 18:24:51 +02:00
impl bc ::group ::BloomGroupDatabase for BlockChain {
fn blooms_at ( & self , position : & bc ::group ::GroupPosition ) -> Option < bc ::group ::BloomGroup > {
let position = LogGroupPosition ::from ( position . clone ( ) ) ;
self . note_used ( CacheID ::BlocksBlooms ( position . clone ( ) ) ) ;
self . extras_db . read_with_cache ( & self . blocks_blooms , & position ) . map ( Into ::into )
}
}
2015-12-17 17:20:10 +01:00
/// Structure providing fast access to blockchain data.
2015-12-26 15:47:07 +01:00
///
2015-12-21 15:25:58 +01:00
/// **Does not do input data verification.**
2015-12-09 19:03:25 +01:00
pub struct BlockChain {
2016-02-22 00:36:59 +01:00
// All locks must be captured in the order declared here.
pref_cache_size : AtomicUsize ,
max_cache_size : AtomicUsize ,
2016-05-26 18:24:51 +02:00
blooms_config : bc ::Config ,
2016-02-02 01:59:14 +01:00
2016-01-07 16:08:12 +01:00
best_block : RwLock < BestBlock > ,
2015-12-13 22:39:01 +01:00
2015-12-14 14:15:27 +01:00
// block cache
2016-01-07 16:08:12 +01:00
blocks : RwLock < HashMap < H256 , Bytes > > ,
2015-12-14 13:32:22 +01:00
2015-12-14 14:15:27 +01:00
// extra caches
2016-01-07 16:08:12 +01:00
block_details : RwLock < HashMap < H256 , BlockDetails > > ,
2016-01-11 01:07:58 +01:00
block_hashes : RwLock < HashMap < BlockNumber , H256 > > ,
2016-01-07 16:08:12 +01:00
transaction_addresses : RwLock < HashMap < H256 , TransactionAddress > > ,
2016-05-26 18:24:51 +02:00
blocks_blooms : RwLock < HashMap < LogGroupPosition , BloomGroup > > ,
2016-02-17 12:35:37 +01:00
block_receipts : RwLock < HashMap < H256 , BlockReceipts > > ,
2015-12-14 14:15:27 +01:00
2016-02-18 03:46:24 +01:00
extras_db : Database ,
blocks_db : Database ,
2016-01-18 15:48:38 +01:00
cache_man : RwLock < CacheManager > ,
2016-02-12 00:40:45 +01:00
2016-02-22 00:36:59 +01:00
insert_lock : Mutex < ( ) >
2015-12-09 19:03:25 +01:00
}
2016-01-12 13:14:01 +01:00
impl BlockProvider for BlockChain {
/// Returns true if the given block is known
/// (though not necessarily a part of the canon chain).
fn is_known ( & self , hash : & H256 ) -> bool {
2016-04-20 15:45:42 +02:00
self . extras_db . exists_with_cache ( & self . block_details , hash )
2016-01-12 13:14:01 +01:00
}
/// Get raw block data
fn block ( & self , hash : & H256 ) -> Option < Bytes > {
{
2016-07-13 19:59:59 +02:00
let read = self . blocks . read ( ) ;
2016-01-17 15:56:09 +01:00
if let Some ( v ) = read . get ( hash ) {
return Some ( v . clone ( ) ) ;
2016-01-12 13:14:01 +01:00
}
}
let opt = self . blocks_db . get ( hash )
. expect ( " Low level database error. Some issue with disk? " ) ;
2016-01-18 19:23:28 +01:00
self . note_used ( CacheID ::Block ( hash . clone ( ) ) ) ;
2016-01-12 13:14:01 +01:00
match opt {
Some ( b ) = > {
let bytes : Bytes = b . to_vec ( ) ;
2016-07-13 19:59:59 +02:00
let mut write = self . blocks . write ( ) ;
2016-01-12 13:14:01 +01:00
write . insert ( hash . clone ( ) , bytes . clone ( ) ) ;
Some ( bytes )
} ,
None = > None
}
}
/// Get the familial details concerning a block.
fn block_details ( & self , hash : & H256 ) -> Option < BlockDetails > {
2016-05-26 18:24:51 +02:00
self . note_used ( CacheID ::BlockDetails ( hash . clone ( ) ) ) ;
self . extras_db . read_with_cache ( & self . block_details , hash )
2016-01-12 13:14:01 +01:00
}
/// Get the hash of given block's number.
fn block_hash ( & self , index : BlockNumber ) -> Option < H256 > {
2016-05-26 18:24:51 +02:00
self . note_used ( CacheID ::BlockHashes ( index ) ) ;
self . extras_db . read_with_cache ( & self . block_hashes , & index )
2016-01-12 13:14:01 +01:00
}
2016-02-08 15:53:22 +01:00
/// Get the address of transaction with given hash.
fn transaction_address ( & self , hash : & H256 ) -> Option < TransactionAddress > {
2016-05-26 18:24:51 +02:00
self . note_used ( CacheID ::TransactionAddresses ( hash . clone ( ) ) ) ;
self . extras_db . read_with_cache ( & self . transaction_addresses , hash )
2016-02-08 15:53:22 +01:00
}
2016-02-12 00:40:45 +01:00
2016-02-17 12:35:37 +01:00
/// Get receipts of block with given hash.
fn block_receipts ( & self , hash : & H256 ) -> Option < BlockReceipts > {
2016-05-26 18:24:51 +02:00
self . note_used ( CacheID ::BlockReceipts ( hash . clone ( ) ) ) ;
self . extras_db . read_with_cache ( & self . block_receipts , hash )
2016-02-17 12:35:37 +01:00
}
2016-02-12 00:40:45 +01:00
/// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom ( & self , bloom : & H2048 , from_block : BlockNumber , to_block : BlockNumber ) -> Vec < BlockNumber > {
2016-05-26 18:24:51 +02:00
let range = from_block as bc ::Number .. to_block as bc ::Number ;
let chain = bc ::group ::BloomGroupChain ::new ( self . blooms_config , self ) ;
chain . with_bloom ( & range , & Bloom ::from ( bloom . clone ( ) ) . into ( ) )
. into_iter ( )
. map ( | b | b as BlockNumber )
. collect ( )
2016-02-12 00:40:45 +01:00
}
2016-01-12 13:14:01 +01:00
}
2016-02-02 01:59:14 +01:00
const COLLECTION_QUEUE_SIZE : usize = 8 ;
2016-01-18 15:48:38 +01:00
2016-03-02 17:04:44 +01:00
pub struct AncestryIter < ' a > {
current : H256 ,
chain : & ' a BlockChain ,
}
2016-03-02 17:31:42 +01:00
2016-03-02 17:04:44 +01:00
impl < ' a > Iterator for AncestryIter < ' a > {
type Item = H256 ;
fn next ( & mut self ) -> Option < H256 > {
if self . current . is_zero ( ) {
Option ::None
} else {
2016-03-02 17:31:42 +01:00
let mut n = self . chain . block_details ( & self . current ) . unwrap ( ) . parent ;
mem ::swap ( & mut self . current , & mut n ) ;
Some ( n )
2016-03-02 17:04:44 +01:00
}
}
}
2015-12-09 19:03:25 +01:00
impl BlockChain {
2015-12-12 15:52:37 +01:00
/// Create new instance of blockchain from given Genesis
2016-05-26 18:24:51 +02:00
pub fn new ( config : Config , genesis : & [ u8 ] , path : & Path ) -> BlockChain {
2015-12-17 15:11:42 +01:00
// open extras db
2015-12-14 14:15:27 +01:00
let mut extras_path = path . to_path_buf ( ) ;
extras_path . push ( " extras " ) ;
2016-06-20 12:42:04 +02:00
let extras_db = match config . db_cache_size {
None = > Database ::open_default ( extras_path . to_str ( ) . unwrap ( ) ) . unwrap ( ) ,
Some ( cache_size ) = > Database ::open (
& DatabaseConfig ::with_cache ( cache_size / 2 ) ,
extras_path . to_str ( ) . unwrap ( ) ) . unwrap ( ) ,
} ;
2015-12-14 14:15:27 +01:00
2015-12-17 15:11:42 +01:00
// open blocks db
2015-12-14 14:15:27 +01:00
let mut blocks_path = path . to_path_buf ( ) ;
blocks_path . push ( " blocks " ) ;
2016-06-20 12:42:04 +02:00
let blocks_db = match config . db_cache_size {
None = > Database ::open_default ( blocks_path . to_str ( ) . unwrap ( ) ) . unwrap ( ) ,
Some ( cache_size ) = > Database ::open (
& DatabaseConfig ::with_cache ( cache_size / 2 ) ,
blocks_path . to_str ( ) . unwrap ( ) ) . unwrap ( ) ,
} ;
2015-12-13 22:39:01 +01:00
2016-01-18 19:23:28 +01:00
let mut cache_man = CacheManager { cache_usage : VecDeque ::new ( ) , in_use : HashSet ::new ( ) } ;
( 0 .. COLLECTION_QUEUE_SIZE ) . foreach ( | _ | cache_man . cache_usage . push_back ( HashSet ::new ( ) ) ) ;
2015-12-17 01:54:24 +01:00
let bc = BlockChain {
2016-02-29 18:11:59 +01:00
pref_cache_size : AtomicUsize ::new ( config . pref_cache_size ) ,
max_cache_size : AtomicUsize ::new ( config . max_cache_size ) ,
2016-05-26 18:24:51 +02:00
blooms_config : bc ::Config {
levels : LOG_BLOOMS_LEVELS ,
elements_per_index : LOG_BLOOMS_ELEMENTS_PER_INDEX ,
} ,
2016-02-27 02:16:39 +01:00
best_block : RwLock ::new ( BestBlock ::default ( ) ) ,
2016-01-07 16:08:12 +01:00
blocks : RwLock ::new ( HashMap ::new ( ) ) ,
block_details : RwLock ::new ( HashMap ::new ( ) ) ,
block_hashes : RwLock ::new ( HashMap ::new ( ) ) ,
transaction_addresses : RwLock ::new ( HashMap ::new ( ) ) ,
blocks_blooms : RwLock ::new ( HashMap ::new ( ) ) ,
2016-02-17 12:35:37 +01:00
block_receipts : RwLock ::new ( HashMap ::new ( ) ) ,
2015-12-14 14:15:27 +01:00
extras_db : extras_db ,
2016-01-18 15:48:38 +01:00
blocks_db : blocks_db ,
2016-01-18 19:23:28 +01:00
cache_man : RwLock ::new ( cache_man ) ,
2016-02-22 00:36:59 +01:00
insert_lock : Mutex ::new ( ( ) ) ,
2015-12-17 01:54:24 +01:00
} ;
2015-12-17 15:11:42 +01:00
// load best block
let best_block_hash = match bc . extras_db . get ( b " best " ) . unwrap ( ) {
2016-07-04 18:24:14 +02:00
Some ( best ) = > {
2016-07-25 10:21:02 +02:00
let new_best = H256 ::from_slice ( & best ) ;
2016-07-23 17:05:34 +02:00
if ! bc . blocks_db . get ( & new_best ) . unwrap ( ) . is_some ( ) {
warn! ( " Best block {} not found " , new_best . hex ( ) ) ;
}
/* TODO: enable this once the best block issue is resolved
2016-07-17 23:03:29 +02:00
while ! bc . blocks_db . get ( & new_best ) . unwrap ( ) . is_some ( ) {
match bc . rewind ( ) {
Some ( h ) = > {
new_best = h ;
}
None = > {
warn! ( " Can't rewind blockchain " ) ;
break ;
}
2016-07-13 14:28:46 +02:00
}
2016-07-17 23:03:29 +02:00
info! ( " Restored mismatched best block. Was: {}, new: {} " , H256 ::from_slice ( & best ) . hex ( ) , new_best . hex ( ) ) ;
2016-07-23 17:05:34 +02:00
} * /
2016-07-17 23:03:29 +02:00
new_best
2016-07-04 18:24:14 +02:00
}
2015-12-17 15:11:42 +01:00
None = > {
// best block does not exist
// we need to insert genesis into the cache
2015-12-17 17:20:10 +01:00
let block = BlockView ::new ( genesis ) ;
2015-12-17 15:11:42 +01:00
let header = block . header_view ( ) ;
let hash = block . sha3 ( ) ;
let details = BlockDetails {
number : header . number ( ) ,
total_difficulty : header . difficulty ( ) ,
parent : header . parent_hash ( ) ,
children : vec ! [ ]
} ;
2016-07-26 00:25:49 +02:00
let block_batch = DBTransaction ::new ( ) ;
block_batch . put ( & hash , genesis ) . unwrap ( ) ;
bc . blocks_db . write ( block_batch ) . expect ( " Low level database error. Some issue with disk? " ) ;
2015-12-17 15:11:42 +01:00
2016-02-18 03:46:24 +01:00
let batch = DBTransaction ::new ( ) ;
2016-04-15 18:54:35 +02:00
batch . write ( & hash , & details ) ;
batch . write ( & header . number ( ) , & hash ) ;
2015-12-17 15:11:42 +01:00
batch . put ( b " best " , & hash ) . unwrap ( ) ;
bc . extras_db . write ( batch ) . unwrap ( ) ;
2015-12-26 15:47:07 +01:00
2015-12-17 15:11:42 +01:00
hash
}
} ;
2015-12-21 02:57:02 +01:00
{
2016-07-13 19:59:59 +02:00
let mut best_block = bc . best_block . write ( ) ;
2015-12-21 02:57:02 +01:00
best_block . number = bc . block_number ( & best_block_hash ) . unwrap ( ) ;
best_block . total_difficulty = bc . block_details ( & best_block_hash ) . unwrap ( ) . total_difficulty ;
best_block . hash = best_block_hash ;
}
2015-12-17 15:11:42 +01:00
2015-12-17 01:54:24 +01:00
bc
2015-12-11 03:51:23 +01:00
}
2016-07-17 09:18:15 +02:00
/// Returns true if the given parent block has given child
/// (though not necessarily a part of the canon chain).
fn is_known_child ( & self , parent : & H256 , hash : & H256 ) -> bool {
self . extras_db . read_with_cache ( & self . block_details , parent ) . map_or ( false , | d | d . children . contains ( hash ) )
}
2016-07-17 23:03:29 +02:00
/// Rewind to a previous block
2016-07-25 10:21:02 +02:00
#[ cfg(test) ]
fn rewind ( & self ) -> Option < H256 > {
use db ::Key ;
2016-07-17 23:03:29 +02:00
let batch = DBTransaction ::new ( ) ;
// track back to the best block we have in the blocks database
if let Some ( best_block_hash ) = self . extras_db . get ( b " best " ) . unwrap ( ) {
let best_block_hash = H256 ::from_slice ( & best_block_hash ) ;
if best_block_hash = = self . genesis_hash ( ) {
return None ;
}
if let Some ( extras ) = self . extras_db . read ( & best_block_hash ) as Option < BlockDetails > {
type DetailsKey = Key < BlockDetails , Target = H264 > ;
batch . delete ( & ( DetailsKey ::key ( & best_block_hash ) ) ) . unwrap ( ) ;
let hash = extras . parent ;
let range = extras . number as bc ::Number .. extras . number as bc ::Number ;
let chain = bc ::group ::BloomGroupChain ::new ( self . blooms_config , self ) ;
let changes = chain . replace ( & range , vec! [ ] ) ;
for ( k , v ) in changes . into_iter ( ) {
batch . write ( & LogGroupPosition ::from ( k ) , & BloomGroup ::from ( v ) ) ;
}
batch . put ( b " best " , & hash ) . unwrap ( ) ;
let mut best_block = self . best_block . write ( ) ;
best_block . number = extras . number - 1 ;
best_block . total_difficulty = self . block_details ( & hash ) . unwrap ( ) . total_difficulty ;
best_block . hash = hash ;
// update parent extras
if let Some ( mut details ) = self . extras_db . read ( & hash ) as Option < BlockDetails > {
details . children . clear ( ) ;
batch . write ( & hash , & details ) ;
}
self . extras_db . write ( batch ) . unwrap ( ) ;
self . block_details . write ( ) . clear ( ) ;
self . block_hashes . write ( ) . clear ( ) ;
self . blocks . write ( ) . clear ( ) ;
self . block_receipts . write ( ) . clear ( ) ;
return Some ( hash ) ;
}
}
2016-07-19 09:25:51 +02:00
None
2016-07-17 23:03:29 +02:00
}
2016-02-02 01:59:14 +01:00
/// Set the cache configuration.
2016-02-22 00:36:59 +01:00
pub fn configure_cache ( & self , pref_cache_size : usize , max_cache_size : usize ) {
self . pref_cache_size . store ( pref_cache_size , AtomicOrder ::Relaxed ) ;
self . max_cache_size . store ( max_cache_size , AtomicOrder ::Relaxed ) ;
2016-02-02 01:59:14 +01:00
}
2015-12-17 15:11:42 +01:00
/// Returns a tree route between `from` and `to`, which is a tuple of:
2015-12-26 15:47:07 +01:00
///
2015-12-17 15:11:42 +01:00
/// - a vector of hashes of all blocks, ordered from `from` to `to`.
2015-12-17 17:20:10 +01:00
///
2015-12-17 15:11:42 +01:00
/// - common ancestor of these blocks.
2015-12-17 17:20:10 +01:00
///
2015-12-17 15:11:42 +01:00
/// - an index where best common ancestor would be
2015-12-26 15:47:07 +01:00
///
2015-12-17 17:20:10 +01:00
/// 1.) from newer to older
2015-12-26 15:47:07 +01:00
///
2015-12-17 17:20:10 +01:00
/// - bc: `A1 -> A2 -> A3 -> A4 -> A5`
/// - from: A5, to: A4
2015-12-26 15:47:07 +01:00
/// - route:
2015-12-17 17:20:10 +01:00
///
/// ```json
/// { blocks: [A5], ancestor: A4, index: 1 }
/// ```
2015-12-26 15:47:07 +01:00
///
2015-12-17 17:20:10 +01:00
/// 2.) from older to newer
2015-12-26 15:47:07 +01:00
///
2015-12-17 17:20:10 +01:00
/// - bc: `A1 -> A2 -> A3 -> A4 -> A5`
/// - from: A3, to: A4
2015-12-26 15:47:07 +01:00
/// - route:
///
2015-12-17 17:20:10 +01:00
/// ```json
/// { blocks: [A4], ancestor: A3, index: 0 }
/// ```
2015-12-17 15:11:42 +01:00
///
/// 3.) fork:
///
2015-12-26 15:47:07 +01:00
/// - bc:
2015-12-17 17:20:10 +01:00
///
/// ```text
/// A1 -> A2 -> A3 -> A4
2015-12-17 15:11:42 +01:00
/// -> B3 -> B4
2015-12-26 15:47:07 +01:00
/// ```
2015-12-17 17:20:10 +01:00
/// - from: B4, to: A4
2015-12-26 15:47:07 +01:00
/// - route:
///
2015-12-17 17:20:10 +01:00
/// ```json
/// { blocks: [B4, B3, A3, A4], ancestor: A2, index: 2 }
/// ```
2016-02-27 01:37:12 +01:00
pub fn tree_route ( & self , from : H256 , to : H256 ) -> TreeRoute {
2015-12-17 15:11:42 +01:00
let mut from_branch = vec! [ ] ;
let mut to_branch = vec! [ ] ;
2016-07-19 09:23:53 +02:00
let mut from_details = self . block_details ( & from ) . unwrap_or_else ( | | panic! ( " 0. Expected to find details for block {:?} " , from ) ) ;
let mut to_details = self . block_details ( & to ) . unwrap_or_else ( | | panic! ( " 1. Expected to find details for block {:?} " , to ) ) ;
2016-02-27 01:37:12 +01:00
let mut current_from = from ;
let mut current_to = to ;
2015-12-17 15:11:42 +01:00
// reset from && to to the same level
while from_details . number > to_details . number {
from_branch . push ( current_from ) ;
current_from = from_details . parent . clone ( ) ;
2016-07-19 09:23:53 +02:00
from_details = self . block_details ( & from_details . parent ) . unwrap_or_else ( | | panic! ( " 2. Expected to find details for block {:?} " , from_details . parent ) ) ;
2015-12-17 15:11:42 +01:00
}
while to_details . number > from_details . number {
to_branch . push ( current_to ) ;
current_to = to_details . parent . clone ( ) ;
2016-07-19 09:23:53 +02:00
to_details = self . block_details ( & to_details . parent ) . unwrap_or_else ( | | panic! ( " 3. Expected to find details for block {:?} " , to_details . parent ) ) ;
2015-12-17 15:11:42 +01:00
}
assert_eq! ( from_details . number , to_details . number ) ;
// move to shared parent
2015-12-18 11:34:55 +01:00
while current_from ! = current_to {
2015-12-17 15:11:42 +01:00
from_branch . push ( current_from ) ;
current_from = from_details . parent . clone ( ) ;
2016-07-19 09:23:53 +02:00
from_details = self . block_details ( & from_details . parent ) . unwrap_or_else ( | | panic! ( " 4. Expected to find details for block {:?} " , from_details . parent ) ) ;
2015-12-17 15:11:42 +01:00
to_branch . push ( current_to ) ;
current_to = to_details . parent . clone ( ) ;
2016-07-19 09:23:53 +02:00
to_details = self . block_details ( & to_details . parent ) . unwrap_or_else ( | | panic! ( " 5. Expected to find details for block {:?} " , from_details . parent ) ) ;
2015-12-17 15:11:42 +01:00
}
let index = from_branch . len ( ) ;
2015-12-17 20:37:04 +01:00
from_branch . extend ( to_branch . into_iter ( ) . rev ( ) ) ;
2015-12-17 15:11:42 +01:00
TreeRoute {
blocks : from_branch ,
2015-12-21 16:31:51 +01:00
ancestor : current_from ,
2015-12-17 15:11:42 +01:00
index : index
}
}
2016-04-06 10:07:24 +02:00
#[ cfg_attr(feature= " dev " , allow(similar_names)) ]
2015-12-17 01:54:24 +01:00
/// Inserts the block into backing cache database.
/// Expects the block to be valid and already verified.
/// If the block is already known, does nothing.
2016-03-09 21:55:23 +01:00
pub fn insert_block ( & self , bytes : & [ u8 ] , receipts : Vec < Receipt > ) -> ImportRoute {
2015-12-17 15:11:42 +01:00
// create views onto rlp
2015-12-17 01:54:24 +01:00
let block = BlockView ::new ( bytes ) ;
let header = block . header_view ( ) ;
2016-01-07 16:08:12 +01:00
let hash = header . sha3 ( ) ;
2015-12-17 01:54:24 +01:00
2016-07-17 09:18:15 +02:00
if self . is_known_child ( & header . parent_hash ( ) , & hash ) {
2016-03-09 21:55:23 +01:00
return ImportRoute ::none ( ) ;
2015-12-17 01:54:24 +01:00
}
2016-02-22 00:36:59 +01:00
let _lock = self . insert_lock . lock ( ) ;
2015-12-21 15:22:24 +01:00
// store block in db
2016-07-26 20:31:25 +02:00
self . blocks_db . put ( & hash , bytes ) . unwrap ( ) ;
2016-02-27 01:37:12 +01:00
let info = self . block_info ( bytes ) ;
2016-07-26 00:20:37 +02:00
if let BlockLocation ::BranchBecomingCanonChain ( ref d ) = info . location {
info! ( target : " reorg " , " {} Using {} (#{}) " , Colour ::Yellow . bold ( ) . paint ( " Switching fork to a new branch. " ) , info . hash , info . number ) ;
info! ( target : " reorg " , " {}{} " , Colour ::Red . bold ( ) . paint ( " Retracting " ) , d . retracted . iter ( ) . fold ( String ::new ( ) , | acc , h | format! ( " {} {} " , acc , h ) ) ) ;
info! ( target : " reorg " , " {} {} (#{}) " , Colour ::Blue . bold ( ) . paint ( " Leaving " ) , d . ancestor , self . block_details ( & d . ancestor ) . expect ( " `ancestor` is in the route; qed " ) . number ) ;
info! ( target : " reorg " , " {}{} " , Colour ::Green . bold ( ) . paint ( " Enacting " ) , d . enacted . iter ( ) . fold ( String ::new ( ) , | acc , h | format! ( " {} {} " , acc , h ) ) ) ;
}
2016-02-27 01:37:12 +01:00
self . apply_update ( ExtrasUpdate {
2016-02-27 10:19:33 +01:00
block_hashes : self . prepare_block_hashes_update ( bytes , & info ) ,
block_details : self . prepare_block_details_update ( bytes , & info ) ,
block_receipts : self . prepare_block_receipts_update ( receipts , & info ) ,
transactions_addresses : self . prepare_transaction_addresses_update ( bytes , & info ) ,
blocks_blooms : self . prepare_block_blooms_update ( bytes , & info ) ,
2016-03-09 21:55:23 +01:00
info : info . clone ( ) ,
2016-02-27 01:37:12 +01:00
} ) ;
2016-03-09 21:55:23 +01:00
ImportRoute ::from ( info )
2016-02-16 14:46:21 +01:00
}
2015-12-21 15:22:24 +01:00
2016-07-26 00:20:37 +02:00
/// Get inserted block info which is critical to prepare extras updates.
fn block_info ( & self , block_bytes : & [ u8 ] ) -> BlockInfo {
let block = BlockView ::new ( block_bytes ) ;
let header = block . header_view ( ) ;
let hash = block . sha3 ( ) ;
let number = header . number ( ) ;
let parent_hash = header . parent_hash ( ) ;
let parent_details = self . block_details ( & parent_hash ) . unwrap_or_else ( | | panic! ( " Invalid parent hash: {:?} " , parent_hash ) ) ;
let total_difficulty = parent_details . total_difficulty + header . difficulty ( ) ;
let is_new_best = total_difficulty > self . best_block_total_difficulty ( ) ;
BlockInfo {
hash : hash ,
number : number ,
total_difficulty : total_difficulty ,
location : if is_new_best {
// on new best block we need to make sure that all ancestors
// are moved to "canon chain"
// find the route between old best block and the new one
let best_hash = self . best_block_hash ( ) ;
let route = self . tree_route ( best_hash , parent_hash ) ;
assert_eq! ( number , parent_details . number + 1 ) ;
match route . blocks . len ( ) {
0 = > BlockLocation ::CanonChain ,
_ = > {
let retracted = route . blocks . iter ( ) . take ( route . index ) . cloned ( ) . collect ::< Vec < _ > > ( ) . into_iter ( ) . collect ::< Vec < _ > > ( ) ;
let enacted = route . blocks . into_iter ( ) . skip ( route . index ) . collect ::< Vec < _ > > ( ) ;
BlockLocation ::BranchBecomingCanonChain ( BranchBecomingCanonChainData {
ancestor : route . ancestor ,
enacted : enacted ,
retracted : retracted ,
} )
}
}
} else {
BlockLocation ::Branch
}
}
}
2016-02-16 14:46:21 +01:00
/// Applies extras update.
fn apply_update ( & self , update : ExtrasUpdate ) {
2016-02-27 10:19:33 +01:00
let batch = DBTransaction ::new ( ) ;
2016-03-10 21:01:17 +01:00
{
2016-04-18 18:15:03 +02:00
for hash in update . block_details . keys ( ) . cloned ( ) {
2016-05-26 18:24:51 +02:00
self . note_used ( CacheID ::BlockDetails ( hash ) ) ;
2016-03-10 21:01:17 +01:00
}
2016-04-18 18:15:03 +02:00
2016-07-13 19:59:59 +02:00
let mut write_details = self . block_details . write ( ) ;
2016-07-14 19:16:01 +02:00
batch . extend_with_cache ( & mut * write_details , update . block_details , CacheUpdatePolicy ::Overwrite ) ;
2016-03-10 21:01:17 +01:00
}
{
2016-07-13 19:59:59 +02:00
let mut write_receipts = self . block_receipts . write ( ) ;
2016-07-14 19:16:01 +02:00
batch . extend_with_cache ( & mut * write_receipts , update . block_receipts , CacheUpdatePolicy ::Remove ) ;
2016-03-10 21:01:17 +01:00
}
{
2016-07-13 19:59:59 +02:00
let mut write_blocks_blooms = self . blocks_blooms . write ( ) ;
2016-07-14 19:16:01 +02:00
batch . extend_with_cache ( & mut * write_blocks_blooms , update . blocks_blooms , CacheUpdatePolicy ::Remove ) ;
2016-03-10 21:01:17 +01:00
}
2016-07-17 23:03:29 +02:00
// These cached values must be updated last with all three locks taken to avoid
// cache decoherence
2016-02-29 19:49:29 +01:00
{
2016-07-17 23:03:29 +02:00
let mut best_block = self . best_block . write ( ) ;
2016-02-29 19:49:29 +01:00
// update best block
match update . info . location {
BlockLocation ::Branch = > ( ) ,
_ = > {
2016-07-14 19:16:01 +02:00
batch . put ( b " best " , & update . info . hash ) . unwrap ( ) ;
2016-02-29 19:49:29 +01:00
* best_block = BestBlock {
hash : update . info . hash ,
number : update . info . number ,
total_difficulty : update . info . total_difficulty
} ;
}
2016-02-22 00:36:59 +01:00
}
2015-12-21 15:22:24 +01:00
2016-07-14 19:16:01 +02:00
let mut write_hashes = self . block_hashes . write ( ) ;
let mut write_txs = self . transaction_addresses . write ( ) ;
batch . extend_with_cache ( & mut * write_hashes , update . block_hashes , CacheUpdatePolicy ::Remove ) ;
batch . extend_with_cache ( & mut * write_txs , update . transactions_addresses , CacheUpdatePolicy ::Remove ) ;
2016-02-27 01:37:12 +01:00
2016-03-15 10:59:58 +01:00
// update extras database
self . extras_db . write ( batch ) . unwrap ( ) ;
}
2015-12-21 15:22:24 +01:00
}
2016-03-02 18:32:54 +01:00
/// Iterator that lists `first` and then all of `first`'s ancestors, by hash.
2016-03-02 18:05:47 +01:00
pub fn ancestry_iter ( & self , first : H256 ) -> Option < AncestryIter > {
2016-03-02 18:32:54 +01:00
if self . is_known ( & first ) {
Some ( AncestryIter {
current : first ,
2016-07-26 20:31:25 +02:00
chain : self ,
2016-03-02 18:32:54 +01:00
} )
} else {
None
2016-03-02 17:04:44 +01:00
}
}
2016-03-02 19:38:00 +01:00
/// Given a block's `parent`, find every block header which represents a valid possible uncle.
pub fn find_uncle_headers ( & self , parent : & H256 , uncle_generations : usize ) -> Option < Vec < Header > > {
2016-05-24 21:56:17 +02:00
self . find_uncle_hashes ( parent , uncle_generations ) . map ( | v | v . into_iter ( ) . filter_map ( | h | self . block_header ( & h ) ) . collect ( ) )
}
/// Given a block's `parent`, find every block hash which represents a valid possible uncle.
pub fn find_uncle_hashes ( & self , parent : & H256 , uncle_generations : usize ) -> Option < Vec < H256 > > {
2016-03-02 18:32:54 +01:00
if ! self . is_known ( parent ) { return None ; }
2016-03-02 19:38:00 +01:00
let mut excluded = HashSet ::new ( ) ;
2016-03-02 18:32:54 +01:00
for a in self . ancestry_iter ( parent . clone ( ) ) . unwrap ( ) . take ( uncle_generations ) {
2016-03-02 19:38:00 +01:00
excluded . extend ( self . uncle_hashes ( & a ) . unwrap ( ) . into_iter ( ) ) ;
excluded . insert ( a ) ;
}
let mut ret = Vec ::new ( ) ;
for a in self . ancestry_iter ( parent . clone ( ) ) . unwrap ( ) . skip ( 1 ) . take ( uncle_generations ) {
ret . extend ( self . block_details ( & a ) . unwrap ( ) . children . iter ( )
2016-05-24 21:56:17 +02:00
. filter ( | h | ! excluded . contains ( h ) )
2016-03-02 19:38:00 +01:00
) ;
2016-03-02 18:32:54 +01:00
}
2016-03-02 19:38:00 +01:00
Some ( ret )
2016-03-01 19:59:12 +01:00
}
2016-02-27 10:19:33 +01:00
/// This function returns modified block hashes.
fn prepare_block_hashes_update ( & self , block_bytes : & [ u8 ] , info : & BlockInfo ) -> HashMap < BlockNumber , H256 > {
let mut block_hashes = HashMap ::new ( ) ;
2016-02-27 01:37:12 +01:00
let block = BlockView ::new ( block_bytes ) ;
let header = block . header_view ( ) ;
let number = header . number ( ) ;
match info . location {
2016-02-27 10:19:33 +01:00
BlockLocation ::Branch = > ( ) ,
2016-02-27 01:37:12 +01:00
BlockLocation ::CanonChain = > {
2016-02-27 10:19:33 +01:00
block_hashes . insert ( number , info . hash . clone ( ) ) ;
2016-02-27 01:37:12 +01:00
} ,
2016-04-17 17:18:25 +02:00
BlockLocation ::BranchBecomingCanonChain ( ref data ) = > {
let ancestor_number = self . block_number ( & data . ancestor ) . unwrap ( ) ;
2016-02-27 01:37:12 +01:00
let start_number = ancestor_number + 1 ;
2016-04-17 17:18:25 +02:00
for ( index , hash ) in data . enacted . iter ( ) . cloned ( ) . enumerate ( ) {
2016-02-27 10:19:33 +01:00
block_hashes . insert ( start_number + index as BlockNumber , hash ) ;
2016-02-27 01:37:12 +01:00
}
2016-02-27 10:19:33 +01:00
block_hashes . insert ( number , info . hash . clone ( ) ) ;
2016-02-27 01:37:12 +01:00
}
2016-02-27 10:19:33 +01:00
}
block_hashes
2016-02-27 01:37:12 +01:00
}
2016-02-27 10:19:33 +01:00
/// This function returns modified block details.
fn prepare_block_details_update ( & self , block_bytes : & [ u8 ] , info : & BlockInfo ) -> HashMap < H256 , BlockDetails > {
2016-02-27 01:37:12 +01:00
let block = BlockView ::new ( block_bytes ) ;
let header = block . header_view ( ) ;
2015-12-17 17:20:10 +01:00
let parent_hash = header . parent_hash ( ) ;
2015-12-17 15:11:42 +01:00
2016-02-27 01:37:12 +01:00
// update parent
2016-07-19 09:23:53 +02:00
let mut parent_details = self . block_details ( & parent_hash ) . unwrap_or_else ( | | panic! ( " Invalid parent hash: {:?} " , parent_hash ) ) ;
2016-02-27 01:37:12 +01:00
parent_details . children . push ( info . hash . clone ( ) ) ;
2015-12-17 15:11:42 +01:00
// create current block details
let details = BlockDetails {
number : header . number ( ) ,
2016-02-27 01:37:12 +01:00
total_difficulty : info . total_difficulty ,
2015-12-17 17:20:10 +01:00
parent : parent_hash . clone ( ) ,
2015-12-17 15:11:42 +01:00
children : vec ! [ ]
} ;
2015-12-26 15:47:07 +01:00
2016-02-27 01:37:12 +01:00
// write to batch
2016-02-27 10:19:33 +01:00
let mut block_details = HashMap ::new ( ) ;
block_details . insert ( parent_hash , parent_details ) ;
block_details . insert ( info . hash . clone ( ) , details ) ;
block_details
2016-02-27 01:37:12 +01:00
}
2015-12-17 15:11:42 +01:00
2016-02-27 10:19:33 +01:00
/// This function returns modified block receipts.
fn prepare_block_receipts_update ( & self , receipts : Vec < Receipt > , info : & BlockInfo ) -> HashMap < H256 , BlockReceipts > {
let mut block_receipts = HashMap ::new ( ) ;
block_receipts . insert ( info . hash . clone ( ) , BlockReceipts ::new ( receipts ) ) ;
block_receipts
2016-02-27 01:37:12 +01:00
}
2015-12-21 15:22:24 +01:00
2016-02-27 10:19:33 +01:00
/// This function returns modified transaction addresses.
fn prepare_transaction_addresses_update ( & self , block_bytes : & [ u8 ] , info : & BlockInfo ) -> HashMap < H256 , TransactionAddress > {
2016-02-27 01:37:12 +01:00
let block = BlockView ::new ( block_bytes ) ;
2016-02-29 19:49:29 +01:00
let transaction_hashes = block . transaction_hashes ( ) ;
2015-12-17 15:11:42 +01:00
2016-02-27 10:19:33 +01:00
transaction_hashes . into_iter ( )
. enumerate ( )
. fold ( HashMap ::new ( ) , | mut acc , ( i , tx_hash ) | {
acc . insert ( tx_hash , TransactionAddress {
block_hash : info . hash . clone ( ) ,
index : i
} ) ;
acc
} )
2016-02-27 01:37:12 +01:00
}
2016-02-12 14:03:23 +01:00
2016-02-27 10:19:33 +01:00
/// This functions returns modified blocks blooms.
2016-02-27 01:37:12 +01:00
///
2016-03-01 13:44:09 +01:00
/// To accelerate blooms lookups, blomms are stored in multiple
/// layers (BLOOM_LEVELS, currently 3).
2016-02-27 19:17:29 +01:00
/// ChainFilter is responsible for building and rebuilding these layers.
/// It returns them in HashMap, where values are Blooms and
/// keys are BloomIndexes. BloomIndex represents bloom location on one
/// of these layers.
2016-03-01 13:44:09 +01:00
///
2016-02-27 19:17:29 +01:00
/// To reduce number of queries to databse, block blooms are stored
2016-03-01 13:44:09 +01:00
/// in BlocksBlooms structure which contains info about several
2016-02-27 19:17:29 +01:00
/// (BLOOM_INDEX_SIZE, currently 16) consecutive blocks blooms.
2016-03-01 13:44:09 +01:00
///
2016-02-27 19:17:29 +01:00
/// Later, BloomIndexer is used to map bloom location on filter layer (BloomIndex)
/// to bloom location in database (BlocksBloomLocation).
2016-03-01 13:44:09 +01:00
///
2016-05-26 18:24:51 +02:00
fn prepare_block_blooms_update ( & self , block_bytes : & [ u8 ] , info : & BlockInfo ) -> HashMap < LogGroupPosition , BloomGroup > {
2016-02-27 01:37:12 +01:00
let block = BlockView ::new ( block_bytes ) ;
let header = block . header_view ( ) ;
2016-02-12 02:03:04 +01:00
2016-05-26 18:24:51 +02:00
let log_blooms = match info . location {
2016-02-27 01:37:12 +01:00
BlockLocation ::Branch = > HashMap ::new ( ) ,
BlockLocation ::CanonChain = > {
2016-05-26 18:24:51 +02:00
let chain = bc ::group ::BloomGroupChain ::new ( self . blooms_config , self ) ;
chain . insert ( info . number as bc ::Number , Bloom ::from ( header . log_bloom ( ) ) . into ( ) )
2016-02-12 02:03:04 +01:00
} ,
2016-04-17 17:18:25 +02:00
BlockLocation ::BranchBecomingCanonChain ( ref data ) = > {
let ancestor_number = self . block_number ( & data . ancestor ) . unwrap ( ) ;
2016-01-10 22:55:07 +01:00
let start_number = ancestor_number + 1 ;
2016-05-26 18:24:51 +02:00
let range = start_number as bc ::Number .. self . best_block_number ( ) as bc ::Number ;
2016-02-12 02:03:04 +01:00
2016-05-26 18:24:51 +02:00
let mut blooms : Vec < bc ::Bloom > = data . enacted . iter ( )
2016-02-12 14:03:23 +01:00
. map ( | hash | self . block ( hash ) . unwrap ( ) )
. map ( | bytes | BlockView ::new ( & bytes ) . header_view ( ) . log_bloom ( ) )
2016-05-26 18:24:51 +02:00
. map ( Bloom ::from )
. map ( Into ::into )
2016-02-12 14:03:23 +01:00
. collect ( ) ;
2016-05-26 18:24:51 +02:00
blooms . push ( Bloom ::from ( header . log_bloom ( ) ) . into ( ) ) ;
2015-12-17 15:11:42 +01:00
2016-05-26 18:24:51 +02:00
let chain = bc ::group ::BloomGroupChain ::new ( self . blooms_config , self ) ;
chain . replace ( & range , blooms )
2016-02-27 01:37:12 +01:00
}
} ;
2016-02-16 14:46:21 +01:00
2016-05-26 18:24:51 +02:00
log_blooms . into_iter ( )
. map ( | p | ( From ::from ( p . 0 ) , From ::from ( p . 1 ) ) )
. collect ( )
2015-12-14 17:12:47 +01:00
}
2015-12-17 17:20:10 +01:00
/// Get best block hash.
2015-12-17 15:11:42 +01:00
pub fn best_block_hash ( & self ) -> H256 {
2016-07-13 19:59:59 +02:00
self . best_block . read ( ) . hash . clone ( )
2015-12-17 15:11:42 +01:00
}
2015-12-17 17:20:10 +01:00
/// Get best block number.
2016-01-11 01:07:58 +01:00
pub fn best_block_number ( & self ) -> BlockNumber {
2016-07-13 19:59:59 +02:00
self . best_block . read ( ) . number
2015-12-17 15:11:42 +01:00
}
2015-12-17 17:20:10 +01:00
/// Get best block total difficulty.
2015-12-17 15:11:42 +01:00
pub fn best_block_total_difficulty ( & self ) -> U256 {
2016-07-13 19:59:59 +02:00
self . best_block . read ( ) . total_difficulty
2015-12-16 17:39:15 +01:00
}
2015-12-17 17:20:10 +01:00
/// Get current cache size.
2015-12-16 17:39:15 +01:00
pub fn cache_size ( & self ) -> CacheSize {
CacheSize {
2016-07-13 19:59:59 +02:00
blocks : self . blocks . read ( ) . heap_size_of_children ( ) ,
block_details : self . block_details . read ( ) . heap_size_of_children ( ) ,
transaction_addresses : self . transaction_addresses . read ( ) . heap_size_of_children ( ) ,
blocks_blooms : self . blocks_blooms . read ( ) . heap_size_of_children ( ) ,
block_receipts : self . block_receipts . read ( ) . heap_size_of_children ( ) ,
2015-12-16 17:39:15 +01:00
}
}
2016-01-18 19:23:28 +01:00
/// Let the cache system know that a cacheable item has been used.
2016-01-18 15:48:38 +01:00
fn note_used ( & self , id : CacheID ) {
2016-07-13 19:59:59 +02:00
let mut cache_man = self . cache_man . write ( ) ;
2016-01-18 19:23:28 +01:00
if ! cache_man . cache_usage [ 0 ] . contains ( & id ) {
cache_man . cache_usage [ 0 ] . insert ( id . clone ( ) ) ;
if cache_man . in_use . contains ( & id ) {
if let Some ( c ) = cache_man . cache_usage . iter_mut ( ) . skip ( 1 ) . find ( | e | e . contains ( & id ) ) {
c . remove ( & id ) ;
}
} else {
cache_man . in_use . insert ( id ) ;
}
2016-01-18 15:48:38 +01:00
}
}
/// Ticks our cache system and throws out any old data.
2016-02-02 01:59:14 +01:00
pub fn collect_garbage ( & self ) {
2016-07-25 18:17:45 +02:00
if self . cache_size ( ) . total ( ) < self . pref_cache_size . load ( AtomicOrder ::Relaxed ) {
// rotate cache
let mut cache_man = self . cache_man . write ( ) ;
const AVERAGE_BYTES_PER_CACHE_ENTRY : usize = 400 ; //estimated
if cache_man . cache_usage [ 0 ] . len ( ) > self . pref_cache_size . load ( AtomicOrder ::Relaxed ) / COLLECTION_QUEUE_SIZE / AVERAGE_BYTES_PER_CACHE_ENTRY {
trace! ( " Cache rotation, cache_size = {} " , self . cache_size ( ) . total ( ) ) ;
let cache = cache_man . cache_usage . pop_back ( ) . unwrap ( ) ;
cache_man . cache_usage . push_front ( cache ) ;
}
return ;
}
2016-02-02 01:59:14 +01:00
2016-07-25 18:17:45 +02:00
for i in 0 .. COLLECTION_QUEUE_SIZE {
2016-02-02 01:59:14 +01:00
{
2016-07-25 18:17:45 +02:00
trace! ( " Cache cleanup round started {}, cache_size = {} " , i , self . cache_size ( ) . total ( ) ) ;
2016-07-13 19:59:59 +02:00
let mut blocks = self . blocks . write ( ) ;
let mut block_details = self . block_details . write ( ) ;
let mut block_hashes = self . block_hashes . write ( ) ;
let mut transaction_addresses = self . transaction_addresses . write ( ) ;
let mut blocks_blooms = self . blocks_blooms . write ( ) ;
let mut block_receipts = self . block_receipts . write ( ) ;
let mut cache_man = self . cache_man . write ( ) ;
2016-02-02 01:59:14 +01:00
for id in cache_man . cache_usage . pop_back ( ) . unwrap ( ) . into_iter ( ) {
cache_man . in_use . remove ( & id ) ;
match id {
CacheID ::Block ( h ) = > { blocks . remove ( & h ) ; } ,
2016-05-26 18:24:51 +02:00
CacheID ::BlockDetails ( h ) = > { block_details . remove ( & h ) ; }
CacheID ::BlockHashes ( h ) = > { block_hashes . remove ( & h ) ; }
CacheID ::TransactionAddresses ( h ) = > { transaction_addresses . remove ( & h ) ; }
CacheID ::BlocksBlooms ( h ) = > { blocks_blooms . remove ( & h ) ; }
CacheID ::BlockReceipts ( h ) = > { block_receipts . remove ( & h ) ; }
2016-02-02 01:59:14 +01:00
}
}
cache_man . cache_usage . push_front ( HashSet ::new ( ) ) ;
2016-01-18 19:23:28 +01:00
2016-02-02 01:59:14 +01:00
// TODO: handle block_hashes properly.
block_hashes . clear ( ) ;
2016-02-29 21:15:39 +01:00
blocks . shrink_to_fit ( ) ;
block_details . shrink_to_fit ( ) ;
2016-03-03 12:39:19 +01:00
block_hashes . shrink_to_fit ( ) ;
transaction_addresses . shrink_to_fit ( ) ;
blocks_blooms . shrink_to_fit ( ) ;
block_receipts . shrink_to_fit ( ) ;
2016-01-18 19:23:28 +01:00
}
2016-07-25 18:17:45 +02:00
trace! ( " Cache cleanup round complete {}, cache_size = {} " , i , self . cache_size ( ) . total ( ) ) ;
2016-02-22 00:36:59 +01:00
if self . cache_size ( ) . total ( ) < self . max_cache_size . load ( AtomicOrder ::Relaxed ) { break ; }
2016-01-18 19:23:28 +01:00
}
2016-01-18 15:48:38 +01:00
2016-01-18 19:23:28 +01:00
// TODO: m_lastCollection = chrono::system_clock::now();
2016-01-18 15:48:38 +01:00
}
2015-12-09 19:03:25 +01:00
}
2015-12-13 22:39:01 +01:00
2015-12-17 17:20:10 +01:00
#[ cfg(test) ]
mod tests {
2016-04-06 10:07:24 +02:00
#![ cfg_attr(feature= " dev " , allow(similar_names)) ]
2015-12-17 17:20:10 +01:00
use std ::str ::FromStr ;
use rustc_serialize ::hex ::FromHex ;
use util ::hash ::* ;
2016-03-01 13:44:09 +01:00
use util ::sha3 ::Hashable ;
2016-05-26 18:24:51 +02:00
use blockchain ::{ BlockProvider , BlockChain , Config , ImportRoute } ;
2016-01-27 16:41:50 +01:00
use tests ::helpers ::* ;
2016-02-19 15:18:20 +01:00
use devtools ::* ;
2016-03-02 04:25:03 +01:00
use blockchain ::generator ::{ ChainGenerator , ChainIterator , BlockFinalizer } ;
2016-03-01 13:44:09 +01:00
use views ::BlockView ;
2015-12-17 17:20:10 +01:00
#[ test ]
2016-03-01 13:44:09 +01:00
fn basic_blockchain_insert ( ) {
let mut canon_chain = ChainGenerator ::default ( ) ;
2016-03-02 04:25:03 +01:00
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let first = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
2016-03-01 13:44:09 +01:00
let genesis_hash = BlockView ::new ( & genesis ) . header_view ( ) . sha3 ( ) ;
let first_hash = BlockView ::new ( & first ) . header_view ( ) . sha3 ( ) ;
2015-12-17 17:20:10 +01:00
2016-01-27 16:41:50 +01:00
let temp = RandomTempPath ::new ( ) ;
2016-05-26 18:24:51 +02:00
let bc = BlockChain ::new ( Config ::default ( ) , & genesis , temp . as_path ( ) ) ;
2015-12-26 15:47:07 +01:00
2015-12-17 20:37:04 +01:00
assert_eq! ( bc . genesis_hash ( ) , genesis_hash . clone ( ) ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . best_block_number ( ) , 0 ) ;
2015-12-17 20:37:04 +01:00
assert_eq! ( bc . best_block_hash ( ) , genesis_hash . clone ( ) ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_hash ( 0 ) , Some ( genesis_hash . clone ( ) ) ) ;
assert_eq! ( bc . block_hash ( 1 ) , None ) ;
2016-02-12 00:40:45 +01:00
assert_eq! ( bc . block_details ( & genesis_hash ) . unwrap ( ) . children , vec! [ ] ) ;
2015-12-17 17:20:10 +01:00
2016-02-17 12:35:37 +01:00
bc . insert_block ( & first , vec! [ ] ) ;
2015-12-17 17:20:10 +01:00
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_hash ( 0 ) , Some ( genesis_hash . clone ( ) ) ) ;
assert_eq! ( bc . best_block_number ( ) , 1 ) ;
2015-12-17 20:37:04 +01:00
assert_eq! ( bc . best_block_hash ( ) , first_hash . clone ( ) ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_hash ( 1 ) , Some ( first_hash . clone ( ) ) ) ;
2015-12-17 20:37:04 +01:00
assert_eq! ( bc . block_details ( & first_hash ) . unwrap ( ) . parent , genesis_hash . clone ( ) ) ;
assert_eq! ( bc . block_details ( & genesis_hash ) . unwrap ( ) . children , vec! [ first_hash . clone ( ) ] ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_hash ( 2 ) , None ) ;
2015-12-17 17:20:10 +01:00
}
2015-12-21 16:31:51 +01:00
2016-03-02 17:31:42 +01:00
#[ test ]
fn check_ancestry_iter ( ) {
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let genesis_hash = BlockView ::new ( & genesis ) . header_view ( ) . sha3 ( ) ;
let temp = RandomTempPath ::new ( ) ;
2016-05-26 18:24:51 +02:00
let bc = BlockChain ::new ( Config ::default ( ) , & genesis , temp . as_path ( ) ) ;
2016-03-02 17:31:42 +01:00
let mut block_hashes = vec! [ genesis_hash . clone ( ) ] ;
for _ in 0 .. 10 {
let block = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
block_hashes . push ( BlockView ::new ( & block ) . header_view ( ) . sha3 ( ) ) ;
bc . insert_block ( & block , vec! [ ] ) ;
}
block_hashes . reverse ( ) ;
2016-03-02 18:32:54 +01:00
assert_eq! ( bc . ancestry_iter ( block_hashes [ 0 ] . clone ( ) ) . unwrap ( ) . collect ::< Vec < _ > > ( ) , block_hashes )
2016-03-02 17:31:42 +01:00
}
2015-12-21 16:31:51 +01:00
#[ test ]
2016-03-11 11:16:49 +01:00
#[ cfg_attr(feature= " dev " , allow(cyclomatic_complexity)) ]
2016-03-02 23:41:15 +01:00
fn test_find_uncles ( ) {
2016-03-01 16:22:06 +01:00
let mut canon_chain = ChainGenerator ::default ( ) ;
2016-03-02 23:41:15 +01:00
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b1b = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let b1a = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b2b = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let b2a = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b3b = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let b3a = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b4b = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let b4a = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b5b = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let b5a = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
2016-03-01 16:22:06 +01:00
2016-03-02 23:41:15 +01:00
let temp = RandomTempPath ::new ( ) ;
2016-05-26 18:24:51 +02:00
let bc = BlockChain ::new ( Config ::default ( ) , & genesis , temp . as_path ( ) ) ;
2016-03-02 23:41:15 +01:00
bc . insert_block ( & b1a , vec! [ ] ) ;
bc . insert_block ( & b1b , vec! [ ] ) ;
bc . insert_block ( & b2a , vec! [ ] ) ;
bc . insert_block ( & b2b , vec! [ ] ) ;
bc . insert_block ( & b3a , vec! [ ] ) ;
bc . insert_block ( & b3b , vec! [ ] ) ;
bc . insert_block ( & b4a , vec! [ ] ) ;
bc . insert_block ( & b4b , vec! [ ] ) ;
bc . insert_block ( & b5a , vec! [ ] ) ;
bc . insert_block ( & b5b , vec! [ ] ) ;
assert_eq! (
[ & b4b , & b3b , & b2b ] . iter ( ) . map ( | b | BlockView ::new ( b ) . header ( ) ) . collect ::< Vec < _ > > ( ) ,
bc . find_uncle_headers ( & BlockView ::new ( & b4a ) . header_view ( ) . sha3 ( ) , 3 ) . unwrap ( )
) ;
// TODO: insert block that already includes one of them as an uncle to check it's not allowed.
}
2015-12-21 16:31:51 +01:00
#[ test ]
2016-03-11 11:16:49 +01:00
#[ cfg_attr(feature= " dev " , allow(cyclomatic_complexity)) ]
2015-12-21 16:31:51 +01:00
fn test_small_fork ( ) {
2016-03-01 16:22:06 +01:00
let mut canon_chain = ChainGenerator ::default ( ) ;
2016-03-02 04:25:03 +01:00
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b1 = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b2 = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b3b = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let b3a = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
2016-03-01 16:22:06 +01:00
let genesis_hash = BlockView ::new ( & genesis ) . header_view ( ) . sha3 ( ) ;
let b1_hash = BlockView ::new ( & b1 ) . header_view ( ) . sha3 ( ) ;
let b2_hash = BlockView ::new ( & b2 ) . header_view ( ) . sha3 ( ) ;
let b3a_hash = BlockView ::new ( & b3a ) . header_view ( ) . sha3 ( ) ;
let b3b_hash = BlockView ::new ( & b3b ) . header_view ( ) . sha3 ( ) ;
2015-12-21 16:31:51 +01:00
// b3a is a part of canon chain, whereas b3b is part of sidechain
2016-03-01 16:22:06 +01:00
let best_block_hash = b3a_hash . clone ( ) ;
2015-12-21 16:31:51 +01:00
2016-01-27 16:41:50 +01:00
let temp = RandomTempPath ::new ( ) ;
2016-05-26 18:24:51 +02:00
let bc = BlockChain ::new ( Config ::default ( ) , & genesis , temp . as_path ( ) ) ;
2016-03-09 21:55:23 +01:00
let ir1 = bc . insert_block ( & b1 , vec! [ ] ) ;
let ir2 = bc . insert_block ( & b2 , vec! [ ] ) ;
let ir3b = bc . insert_block ( & b3b , vec! [ ] ) ;
let ir3a = bc . insert_block ( & b3a , vec! [ ] ) ;
assert_eq! ( ir1 , ImportRoute {
2016-03-10 10:17:17 +01:00
enacted : vec ! [ b1_hash ] ,
retracted : vec ! [ ] ,
2016-04-24 23:16:06 +02:00
omitted : vec ! [ ] ,
2016-03-09 21:55:23 +01:00
} ) ;
assert_eq! ( ir2 , ImportRoute {
2016-03-10 10:17:17 +01:00
enacted : vec ! [ b2_hash ] ,
retracted : vec ! [ ] ,
2016-04-24 23:16:06 +02:00
omitted : vec ! [ ] ,
2016-03-09 21:55:23 +01:00
} ) ;
assert_eq! ( ir3b , ImportRoute {
2016-03-10 10:17:17 +01:00
enacted : vec ! [ b3b_hash ] ,
retracted : vec ! [ ] ,
2016-04-24 23:16:06 +02:00
omitted : vec ! [ ] ,
2016-03-09 21:55:23 +01:00
} ) ;
assert_eq! ( ir3a , ImportRoute {
2016-03-10 10:17:17 +01:00
enacted : vec ! [ b3a_hash ] ,
retracted : vec ! [ b3b_hash ] ,
2016-04-24 23:16:06 +02:00
omitted : vec ! [ ] ,
2016-03-09 21:55:23 +01:00
} ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( bc . best_block_hash ( ) , best_block_hash ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_number ( & genesis_hash ) . unwrap ( ) , 0 ) ;
assert_eq! ( bc . block_number ( & b1_hash ) . unwrap ( ) , 1 ) ;
assert_eq! ( bc . block_number ( & b2_hash ) . unwrap ( ) , 2 ) ;
assert_eq! ( bc . block_number ( & b3a_hash ) . unwrap ( ) , 3 ) ;
assert_eq! ( bc . block_number ( & b3b_hash ) . unwrap ( ) , 3 ) ;
assert_eq! ( bc . block_hash ( 0 ) . unwrap ( ) , genesis_hash ) ;
assert_eq! ( bc . block_hash ( 1 ) . unwrap ( ) , b1_hash ) ;
assert_eq! ( bc . block_hash ( 2 ) . unwrap ( ) , b2_hash ) ;
assert_eq! ( bc . block_hash ( 3 ) . unwrap ( ) , b3a_hash ) ;
2015-12-21 16:31:51 +01:00
// test trie route
2016-02-27 01:37:12 +01:00
let r0_1 = bc . tree_route ( genesis_hash . clone ( ) , b1_hash . clone ( ) ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r0_1 . ancestor , genesis_hash ) ;
assert_eq! ( r0_1 . blocks , [ b1_hash . clone ( ) ] ) ;
assert_eq! ( r0_1 . index , 0 ) ;
2016-02-27 01:37:12 +01:00
let r0_2 = bc . tree_route ( genesis_hash . clone ( ) , b2_hash . clone ( ) ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r0_2 . ancestor , genesis_hash ) ;
assert_eq! ( r0_2 . blocks , [ b1_hash . clone ( ) , b2_hash . clone ( ) ] ) ;
assert_eq! ( r0_2 . index , 0 ) ;
2016-02-27 01:37:12 +01:00
let r1_3a = bc . tree_route ( b1_hash . clone ( ) , b3a_hash . clone ( ) ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r1_3a . ancestor , b1_hash ) ;
assert_eq! ( r1_3a . blocks , [ b2_hash . clone ( ) , b3a_hash . clone ( ) ] ) ;
assert_eq! ( r1_3a . index , 0 ) ;
2016-02-27 01:37:12 +01:00
let r1_3b = bc . tree_route ( b1_hash . clone ( ) , b3b_hash . clone ( ) ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r1_3b . ancestor , b1_hash ) ;
assert_eq! ( r1_3b . blocks , [ b2_hash . clone ( ) , b3b_hash . clone ( ) ] ) ;
assert_eq! ( r1_3b . index , 0 ) ;
2016-02-27 01:37:12 +01:00
let r3a_3b = bc . tree_route ( b3a_hash . clone ( ) , b3b_hash . clone ( ) ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3a_3b . ancestor , b2_hash ) ;
assert_eq! ( r3a_3b . blocks , [ b3a_hash . clone ( ) , b3b_hash . clone ( ) ] ) ;
assert_eq! ( r3a_3b . index , 1 ) ;
2016-02-27 01:37:12 +01:00
let r1_0 = bc . tree_route ( b1_hash . clone ( ) , genesis_hash . clone ( ) ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r1_0 . ancestor , genesis_hash ) ;
assert_eq! ( r1_0 . blocks , [ b1_hash . clone ( ) ] ) ;
assert_eq! ( r1_0 . index , 1 ) ;
2016-02-27 01:37:12 +01:00
let r2_0 = bc . tree_route ( b2_hash . clone ( ) , genesis_hash . clone ( ) ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r2_0 . ancestor , genesis_hash ) ;
assert_eq! ( r2_0 . blocks , [ b2_hash . clone ( ) , b1_hash . clone ( ) ] ) ;
assert_eq! ( r2_0 . index , 2 ) ;
2015-12-26 15:47:07 +01:00
2016-02-27 01:37:12 +01:00
let r3a_1 = bc . tree_route ( b3a_hash . clone ( ) , b1_hash . clone ( ) ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3a_1 . ancestor , b1_hash ) ;
assert_eq! ( r3a_1 . blocks , [ b3a_hash . clone ( ) , b2_hash . clone ( ) ] ) ;
assert_eq! ( r3a_1 . index , 2 ) ;
2016-02-27 01:37:12 +01:00
let r3b_1 = bc . tree_route ( b3b_hash . clone ( ) , b1_hash . clone ( ) ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3b_1 . ancestor , b1_hash ) ;
assert_eq! ( r3b_1 . blocks , [ b3b_hash . clone ( ) , b2_hash . clone ( ) ] ) ;
assert_eq! ( r3b_1 . index , 2 ) ;
2016-02-27 01:37:12 +01:00
let r3b_3a = bc . tree_route ( b3b_hash . clone ( ) , b3a_hash . clone ( ) ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3b_3a . ancestor , b2_hash ) ;
assert_eq! ( r3b_3a . blocks , [ b3b_hash . clone ( ) , b3a_hash . clone ( ) ] ) ;
assert_eq! ( r3b_3a . index , 1 ) ;
}
2015-12-21 16:38:31 +01:00
#[ test ]
fn test_reopen_blockchain_db ( ) {
2016-03-02 04:25:03 +01:00
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let first = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let genesis_hash = BlockView ::new ( & genesis ) . header_view ( ) . sha3 ( ) ;
let first_hash = BlockView ::new ( & first ) . header_view ( ) . sha3 ( ) ;
2015-12-21 16:38:31 +01:00
2016-01-27 16:41:50 +01:00
let temp = RandomTempPath ::new ( ) ;
2015-12-21 16:38:31 +01:00
{
2016-05-26 18:24:51 +02:00
let bc = BlockChain ::new ( Config ::default ( ) , & genesis , temp . as_path ( ) ) ;
2015-12-21 16:38:31 +01:00
assert_eq! ( bc . best_block_hash ( ) , genesis_hash ) ;
2016-03-02 04:25:03 +01:00
bc . insert_block ( & first , vec! [ ] ) ;
assert_eq! ( bc . best_block_hash ( ) , first_hash ) ;
2015-12-21 16:38:31 +01:00
}
{
2016-05-26 18:24:51 +02:00
let bc = BlockChain ::new ( Config ::default ( ) , & genesis , temp . as_path ( ) ) ;
2016-03-02 04:25:03 +01:00
assert_eq! ( bc . best_block_hash ( ) , first_hash ) ;
2015-12-21 16:38:31 +01:00
}
}
2016-01-27 17:32:53 +01:00
#[ test ]
2016-01-27 18:31:14 +01:00
fn can_contain_arbitrary_block_sequence ( ) {
2016-01-28 15:38:42 +01:00
let bc_result = generate_dummy_blockchain ( 50 ) ;
let bc = bc_result . reference ( ) ;
2016-01-27 18:31:14 +01:00
assert_eq! ( bc . best_block_number ( ) , 49 ) ;
2016-01-27 17:32:53 +01:00
}
2016-01-28 11:55:03 +01:00
#[ test ]
fn can_collect_garbage ( ) {
2016-01-28 15:38:42 +01:00
let bc_result = generate_dummy_blockchain ( 3000 ) ;
let bc = bc_result . reference ( ) ;
2016-01-28 11:55:03 +01:00
assert_eq! ( bc . best_block_number ( ) , 2999 ) ;
let best_hash = bc . best_block_hash ( ) ;
let mut block_header = bc . block_header ( & best_hash ) ;
while ! block_header . is_none ( ) {
block_header = bc . block_header ( & block_header . unwrap ( ) . parent_hash ) ;
}
assert! ( bc . cache_size ( ) . blocks > 1024 * 1024 ) ;
2016-01-29 16:28:13 +01:00
for _ in 0 .. 2 {
2016-02-02 01:59:14 +01:00
bc . collect_garbage ( ) ;
2016-01-29 16:28:13 +01:00
}
2016-01-28 11:55:03 +01:00
assert! ( bc . cache_size ( ) . blocks < 1024 * 1024 ) ;
}
2016-01-28 15:38:42 +01:00
#[ test ]
fn can_contain_arbitrary_block_sequence_with_extra ( ) {
let bc_result = generate_dummy_blockchain_with_extra ( 25 ) ;
let bc = bc_result . reference ( ) ;
assert_eq! ( bc . best_block_number ( ) , 24 ) ;
}
#[ test ]
fn can_contain_only_genesis_block ( ) {
let bc_result = generate_dummy_empty_blockchain ( ) ;
let bc = bc_result . reference ( ) ;
assert_eq! ( bc . best_block_number ( ) , 0 ) ;
}
2016-02-08 15:53:22 +01:00
#[ test ]
fn find_transaction_by_hash ( ) {
let genesis = " f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0af81e09f8c46ca322193edfda764fa7e88e81923f802f1d325ec0b0308ac2cd0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200008083023e38808454c98c8142a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421880102030405060708c0c0 " . from_hex ( ) . unwrap ( ) ;
let b1 = " f904a8f901faa0ce1f26f798dd03c8782d63b3e42e79a64eaea5694ea686ac5d7ce3df5171d1aea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0a65c2364cd0f1542d761823dc0109c6b072f14c20459598c5455c274601438f4a070616ebd7ad2ed6fb7860cf7e9df00163842351c38a87cac2c1cb193895035a2a05c5b4fc43c2d45787f54e1ae7d27afdb4ad16dfc567c5692070d5c4556e0b1d7b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000183023ec683021536845685109780a029f07836e4e59229b3a065913afc27702642c683bba689910b2b2fd45db310d3888957e6d004a31802f902a7f85f800a8255f094aaaf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca0575da4e21b66fa764be5f74da9389e67693d066fb0d1312e19e17e501da00ecda06baf5a5327595f6619dfc2fcb3f2e6fb410b5810af3cb52d0e7508038e91a188f85f010a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba04fa966bf34b93abc1bcd665554b7f316b50f928477b50be0f3285ead29d18c5ba017bba0eeec1625ab433746955e125d46d80b7fdc97386c51266f842d8e02192ef85f020a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca004377418ae981cc32b1312b4a427a1d69a821b28db8584f5f2bd8c6d42458adaa053a1dba1af177fac92f3b6af0a9fa46a22adf56e686c93794b6a012bf254abf5f85f030a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca04fe13febd28a05f4fcb2f451d7ddc2dda56486d9f8c79a62b0ba4da775122615a0651b2382dd402df9ebc27f8cb4b2e0f3cea68dda2dca0ee9603608f0b6f51668f85f040a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba078e6a0ba086a08f8450e208a399bb2f2d2a0d984acd2517c7c7df66ccfab567da013254002cd45a97fac049ae00afbc43ed0d9961d0c56a3b2382c80ce41c198ddf85f050a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba0a7174d8f43ea71c8e3ca9477691add8d80ac8e0ed89d8d8b572041eef81f4a54a0534ea2e28ec4da3b5b944b18c51ec84a5cf35f5b3343c5fb86521fd2d388f506f85f060a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba034bd04065833536a10c77ee2a43a5371bc6d34837088b861dd9d4b7f44074b59a078807715786a13876d3455716a6b9cb2186b7a4887a5c31160fc877454958616c0 " . from_hex ( ) . unwrap ( ) ;
let b1_hash = H256 ::from_str ( " f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3 " ) . unwrap ( ) ;
let temp = RandomTempPath ::new ( ) ;
2016-05-26 18:24:51 +02:00
let bc = BlockChain ::new ( Config ::default ( ) , & genesis , temp . as_path ( ) ) ;
2016-02-17 12:35:37 +01:00
bc . insert_block ( & b1 , vec! [ ] ) ;
2016-03-01 13:44:09 +01:00
2016-02-08 15:53:22 +01:00
let transactions = bc . transactions ( & b1_hash ) . unwrap ( ) ;
assert_eq! ( transactions . len ( ) , 7 ) ;
for t in transactions {
2016-02-10 22:16:25 +01:00
assert_eq! ( bc . transaction ( & bc . transaction_address ( & t . hash ( ) ) . unwrap ( ) ) . unwrap ( ) , t ) ;
2016-02-08 15:53:22 +01:00
}
}
2016-02-16 11:41:34 +01:00
#[ test ]
fn test_bloom_filter_simple ( ) {
2016-03-02 04:25:03 +01:00
// TODO: From here
2016-02-16 16:54:58 +01:00
let bloom_b1 = H2048 ::from_str ( " 00000020000000000000000000000000000000000000000002000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000400000000000000000000002000 " ) . unwrap ( ) ;
let bloom_b2 = H2048 ::from_str ( " 00000000000000000000000000000000000000000000020000001000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 " ) . unwrap ( ) ;
2016-02-16 11:41:34 +01:00
2016-02-22 09:12:15 +01:00
let bloom_ba = H2048 ::from_str ( " 00000000000000000000000000000000000000000000020000000800000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 " ) . unwrap ( ) ;
2016-03-02 04:25:03 +01:00
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let mut fork = canon_chain . fork ( 1 ) ;
let mut fork_finalizer = finalizer . fork ( ) ;
let b1 = fork . with_bloom ( bloom_b1 . clone ( ) ) . generate ( & mut fork_finalizer ) . unwrap ( ) ;
let b2 = fork . with_bloom ( bloom_b2 . clone ( ) ) . generate ( & mut fork_finalizer ) . unwrap ( ) ;
let b3 = fork . with_bloom ( bloom_ba . clone ( ) ) . generate ( & mut fork_finalizer ) . unwrap ( ) ;
let b1a = canon_chain . with_bloom ( bloom_ba . clone ( ) ) . generate ( & mut finalizer ) . unwrap ( ) ;
let b2a = canon_chain . with_bloom ( bloom_ba . clone ( ) ) . generate ( & mut finalizer ) . unwrap ( ) ;
2016-02-16 11:41:34 +01:00
let temp = RandomTempPath ::new ( ) ;
2016-05-26 18:24:51 +02:00
let bc = BlockChain ::new ( Config ::default ( ) , & genesis , temp . as_path ( ) ) ;
2016-02-16 16:54:58 +01:00
2016-02-22 09:54:56 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
2016-02-16 16:54:58 +01:00
assert_eq! ( blocks_b1 , vec! [ ] ) ;
assert_eq! ( blocks_b2 , vec! [ ] ) ;
2016-03-01 13:44:09 +01:00
2016-02-17 12:35:37 +01:00
bc . insert_block ( & b1 , vec! [ ] ) ;
2016-02-22 09:54:56 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
2016-02-16 16:54:58 +01:00
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
assert_eq! ( blocks_b2 , vec! [ ] ) ;
2016-02-16 14:46:21 +01:00
2016-02-17 12:35:37 +01:00
bc . insert_block ( & b2 , vec! [ ] ) ;
2016-02-22 09:54:56 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
2016-02-16 16:54:58 +01:00
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
assert_eq! ( blocks_b2 , vec! [ 2 ] ) ;
2016-02-22 09:12:15 +01:00
// hasn't been forked yet
bc . insert_block ( & b1a , vec! [ ] ) ;
2016-02-22 09:54:56 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
let blocks_ba = bc . blocks_with_bloom ( & bloom_ba , 0 , 5 ) ;
2016-02-22 09:12:15 +01:00
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
assert_eq! ( blocks_b2 , vec! [ 2 ] ) ;
assert_eq! ( blocks_ba , vec! [ ] ) ;
// fork has happend
bc . insert_block ( & b2a , vec! [ ] ) ;
2016-02-22 09:54:56 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
let blocks_ba = bc . blocks_with_bloom ( & bloom_ba , 0 , 5 ) ;
2016-02-22 09:12:15 +01:00
assert_eq! ( blocks_b1 , vec! [ ] ) ;
assert_eq! ( blocks_b2 , vec! [ ] ) ;
assert_eq! ( blocks_ba , vec! [ 1 , 2 ] ) ;
2016-02-22 09:54:56 +01:00
// fork back
bc . insert_block ( & b3 , vec! [ ] ) ;
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
let blocks_ba = bc . blocks_with_bloom ( & bloom_ba , 0 , 5 ) ;
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
assert_eq! ( blocks_b2 , vec! [ 2 ] ) ;
assert_eq! ( blocks_ba , vec! [ 3 ] ) ;
2016-02-16 14:46:21 +01:00
}
2016-07-14 19:16:01 +02:00
#[ test ]
fn test_best_block_update ( ) {
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let temp = RandomTempPath ::new ( ) ;
{
let bc = BlockChain ::new ( Config ::default ( ) , & genesis , temp . as_path ( ) ) ;
let uncle = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
// create a longer fork
for _ in 0 .. 5 {
let canon_block = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
bc . insert_block ( & canon_block , vec! [ ] ) ;
}
assert_eq! ( bc . best_block_number ( ) , 5 ) ;
bc . insert_block ( & uncle , vec! [ ] ) ;
}
// re-loading the blockchain should load the correct best block.
let bc = BlockChain ::new ( Config ::default ( ) , & genesis , temp . as_path ( ) ) ;
assert_eq! ( bc . best_block_number ( ) , 5 ) ;
}
2016-07-17 23:03:29 +02:00
#[ test ]
fn test_rewind ( ) {
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let first = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let second = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let genesis_hash = BlockView ::new ( & genesis ) . header_view ( ) . sha3 ( ) ;
let first_hash = BlockView ::new ( & first ) . header_view ( ) . sha3 ( ) ;
let second_hash = BlockView ::new ( & second ) . header_view ( ) . sha3 ( ) ;
let temp = RandomTempPath ::new ( ) ;
let bc = BlockChain ::new ( Config ::default ( ) , & genesis , temp . as_path ( ) ) ;
bc . insert_block ( & first , vec! [ ] ) ;
bc . insert_block ( & second , vec! [ ] ) ;
assert_eq! ( bc . rewind ( ) , Some ( first_hash . clone ( ) ) ) ;
assert! ( ! bc . is_known ( & second_hash ) ) ;
assert_eq! ( bc . best_block_number ( ) , 1 ) ;
assert_eq! ( bc . best_block_hash ( ) , first_hash . clone ( ) ) ;
assert_eq! ( bc . rewind ( ) , Some ( genesis_hash . clone ( ) ) ) ;
assert_eq! ( bc . rewind ( ) , None ) ;
}
2015-12-17 17:20:10 +01:00
}