2017-01-25 18:51:41 +01:00
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
2016-02-05 13:40:41 +01:00
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
2016-02-02 15:29:53 +01:00
//! Blockchain database.
2015-12-17 17:20:10 +01:00
2018-03-12 21:15:55 +01:00
use std ::collections ::{ HashMap , HashSet , hash_map } ;
2017-07-29 21:56:42 +02:00
use std ::sync ::Arc ;
2017-07-29 17:12:07 +02:00
use std ::mem ;
2017-08-17 16:05:26 +02:00
use itertools ::Itertools ;
2018-03-12 21:15:55 +01:00
use bloomchain as bc ;
2017-08-30 16:04:47 +02:00
use heapsize ::HeapSizeOf ;
2018-01-14 22:43:28 +01:00
use ethereum_types ::{ H256 , Bloom , U256 } ;
2017-09-02 20:09:13 +02:00
use parking_lot ::{ Mutex , RwLock } ;
2017-09-06 20:47:45 +02:00
use bytes ::Bytes ;
2016-09-01 14:29:59 +02:00
use rlp ::* ;
2018-02-23 10:12:52 +01:00
use rlp_compress ::{ compress , decompress , blocks_swapper } ;
2015-12-21 02:34:41 +01:00
use header ::* ;
2015-12-14 17:12:47 +01:00
use transaction ::* ;
2015-12-17 02:13:14 +01:00
use views ::* ;
2016-09-14 12:02:30 +02:00
use log_entry ::{ LogEntry , LocalizedLogEntry } ;
2016-02-11 14:35:03 +01:00
use receipt ::Receipt ;
2018-03-12 21:15:55 +01:00
use blooms ::{ BloomGroup , GroupPosition } ;
2016-10-18 18:16:00 +02:00
use blockchain ::best_block ::{ BestBlock , BestAncientBlock } ;
2018-02-19 10:52:33 +01:00
use blockchain ::block_info ::{ BlockInfo , BlockLocation , BranchBecomingCanonChainData } ;
use blockchain ::extras ::{ BlockReceipts , BlockDetails , TransactionAddress , EPOCH_KEY_PREFIX , EpochTransitions } ;
2016-10-18 18:16:00 +02:00
use types ::blockchain_info ::BlockChainInfo ;
2016-05-16 18:33:32 +02:00
use types ::tree_route ::TreeRoute ;
2016-02-27 02:16:39 +01:00
use blockchain ::update ::ExtrasUpdate ;
2016-05-26 18:24:51 +02:00
use blockchain ::{ CacheSize , ImportRoute , Config } ;
2016-08-18 18:24:49 +02:00
use db ::{ self , Writable , Readable , CacheUpdatePolicy } ;
2016-07-31 00:19:27 +02:00
use cache_manager ::CacheManager ;
2016-12-28 13:44:51 +01:00
use encoded ;
2017-06-28 13:17:36 +02:00
use engines ::epoch ::{ Transition as EpochTransition , PendingTransition as PendingEpochTransition } ;
2017-09-10 18:03:35 +02:00
use rayon ::prelude ::* ;
2017-09-01 16:57:57 +02:00
use ansi_term ::Colour ;
2017-10-10 20:01:27 +02:00
use kvdb ::{ DBTransaction , KeyValueDB } ;
2016-02-16 14:46:21 +01:00
2018-03-12 21:15:55 +01:00
const LOG_BLOOMS_LEVELS : usize = 3 ;
const LOG_BLOOMS_ELEMENTS_PER_INDEX : usize = 16 ;
2016-01-12 13:14:01 +01:00
/// Interface for querying blocks by hash and by number.
pub trait BlockProvider {
/// Returns true if the given block is known
/// (though not necessarily a part of the canon chain).
fn is_known ( & self , hash : & H256 ) -> bool ;
2016-10-18 18:16:00 +02:00
/// Get the first block of the best part of the chain.
/// Return `None` if there is no gap and the first block is the genesis.
2016-08-18 22:01:57 +02:00
/// Any queries of blocks which precede this one are not guaranteed to
/// succeed.
2016-10-18 18:16:00 +02:00
fn first_block ( & self ) -> Option < H256 > ;
2016-08-18 22:01:57 +02:00
/// Get the number of the first block.
2016-10-18 18:16:00 +02:00
fn first_block_number ( & self ) -> Option < BlockNumber > {
self . first_block ( ) . map ( | b | self . block_number ( & b ) . expect ( " First block is always set to an existing block or `None`. Existing block always has a number; qed " ) )
2016-08-18 22:01:57 +02:00
}
2016-10-18 18:16:00 +02:00
/// Get the best block of an first block sequence if there is a gap.
fn best_ancient_block ( & self ) -> Option < H256 > ;
/// Get the number of the first block.
fn best_ancient_number ( & self ) -> Option < BlockNumber > {
self . best_ancient_block ( ) . map ( | h | self . block_number ( & h ) . expect ( " Ancient block is always set to an existing block or `None`. Existing block always has a number; qed " ) )
}
2016-01-12 13:14:01 +01:00
/// Get raw block data
2016-12-28 13:44:51 +01:00
fn block ( & self , hash : & H256 ) -> Option < encoded ::Block > ;
2016-01-12 13:14:01 +01:00
/// Get the familial details concerning a block.
fn block_details ( & self , hash : & H256 ) -> Option < BlockDetails > ;
/// Get the hash of given block's number.
fn block_hash ( & self , index : BlockNumber ) -> Option < H256 > ;
2016-02-08 15:53:22 +01:00
/// Get the address of transaction with given hash.
fn transaction_address ( & self , hash : & H256 ) -> Option < TransactionAddress > ;
2016-02-17 12:35:37 +01:00
/// Get receipts of block with given hash.
fn block_receipts ( & self , hash : & H256 ) -> Option < BlockReceipts > ;
2016-01-12 13:14:01 +01:00
/// Get the partial-header of a block.
fn block_header ( & self , hash : & H256 ) -> Option < Header > {
2016-12-28 13:44:51 +01:00
self . block_header_data ( hash ) . map ( | header | header . decode ( ) )
2016-01-12 13:14:01 +01:00
}
2016-07-28 23:46:24 +02:00
/// Get the header RLP of a block.
2016-12-28 13:44:51 +01:00
fn block_header_data ( & self , hash : & H256 ) -> Option < encoded ::Header > ;
2016-07-28 23:46:24 +02:00
/// Get the block body (uncles and transactions).
2016-12-28 13:44:51 +01:00
fn block_body ( & self , hash : & H256 ) -> Option < encoded ::Body > ;
2016-07-28 23:46:24 +02:00
2016-01-12 13:14:01 +01:00
/// Get a list of uncles for a given block.
2016-03-02 18:05:47 +01:00
/// Returns None if block does not exist.
2016-01-12 13:14:01 +01:00
fn uncles ( & self , hash : & H256 ) -> Option < Vec < Header > > {
2016-12-28 13:44:51 +01:00
self . block_body ( hash ) . map ( | body | body . uncles ( ) )
2016-01-12 13:14:01 +01:00
}
/// Get a list of uncle hashes for a given block.
/// Returns None if block does not exist.
fn uncle_hashes ( & self , hash : & H256 ) -> Option < Vec < H256 > > {
2016-12-28 13:44:51 +01:00
self . block_body ( hash ) . map ( | body | body . uncle_hashes ( ) )
2016-01-12 13:14:01 +01:00
}
/// Get the number of given block's hash.
fn block_number ( & self , hash : & H256 ) -> Option < BlockNumber > {
2016-07-28 23:46:24 +02:00
self . block_details ( hash ) . map ( | details | details . number )
2016-01-12 13:14:01 +01:00
}
2016-02-08 15:53:22 +01:00
/// Get transaction with given transaction hash.
2016-02-10 19:29:27 +01:00
fn transaction ( & self , address : & TransactionAddress ) -> Option < LocalizedTransaction > {
2016-07-28 23:46:24 +02:00
self . block_body ( & address . block_hash )
2016-12-28 13:44:51 +01:00
. and_then ( | body | self . block_number ( & address . block_hash )
. and_then ( | n | body . view ( ) . localized_transaction_at ( & address . block_hash , n , address . index ) ) )
2016-02-08 15:53:22 +01:00
}
2016-03-20 17:29:39 +01:00
/// Get transaction receipt.
fn transaction_receipt ( & self , address : & TransactionAddress ) -> Option < Receipt > {
self . block_receipts ( & address . block_hash ) . and_then ( | br | br . receipts . into_iter ( ) . nth ( address . index ) )
}
2016-01-12 13:14:01 +01:00
/// Get a list of transactions for a given block.
2016-02-10 11:28:40 +01:00
/// Returns None if block does not exist.
2016-02-09 15:17:01 +01:00
fn transactions ( & self , hash : & H256 ) -> Option < Vec < LocalizedTransaction > > {
2016-07-28 23:46:24 +02:00
self . block_body ( hash )
2016-12-28 13:44:51 +01:00
. and_then ( | body | self . block_number ( hash )
. map ( | n | body . view ( ) . localized_transactions ( hash , n ) ) )
2016-01-12 13:14:01 +01:00
}
/// Returns reference to genesis hash.
fn genesis_hash ( & self ) -> H256 {
self . block_hash ( 0 ) . expect ( " Genesis hash should always exist " )
}
2016-01-26 15:00:22 +01:00
/// Returns the header of the genesis block.
fn genesis_header ( & self ) -> Header {
2016-10-20 23:41:15 +02:00
self . block_header ( & self . genesis_hash ( ) )
. expect ( " Genesis header always stored; qed " )
2016-01-26 15:00:22 +01:00
}
2016-02-12 00:40:45 +01:00
/// Returns numbers of blocks containing given bloom.
2018-03-12 21:15:55 +01:00
fn blocks_with_bloom ( & self , bloom : & Bloom , from_block : BlockNumber , to_block : BlockNumber ) -> Vec < BlockNumber > ;
2016-09-14 12:02:30 +02:00
/// Returns logs matching given filter.
2016-11-22 18:03:35 +01:00
fn logs < F > ( & self , blocks : Vec < BlockNumber > , matches : F , limit : Option < usize > ) -> Vec < LocalizedLogEntry >
2017-09-10 18:03:35 +02:00
where F : Fn ( & LogEntry ) -> bool + Send + Sync , Self : Sized ;
2016-01-12 13:14:01 +01:00
}
2016-01-18 15:48:38 +01:00
#[ derive(Debug, Hash, Eq, PartialEq, Clone) ]
2016-12-09 23:01:43 +01:00
enum CacheId {
2016-07-28 23:46:24 +02:00
BlockHeader ( H256 ) ,
BlockBody ( H256 ) ,
2016-05-26 18:24:51 +02:00
BlockDetails ( H256 ) ,
BlockHashes ( BlockNumber ) ,
TransactionAddresses ( H256 ) ,
2018-03-12 21:15:55 +01:00
BlocksBlooms ( GroupPosition ) ,
2016-05-26 18:24:51 +02:00
BlockReceipts ( H256 ) ,
2016-01-18 15:48:38 +01:00
}
2018-03-12 21:15:55 +01:00
impl bc ::group ::BloomGroupDatabase for BlockChain {
fn blooms_at ( & self , position : & bc ::group ::GroupPosition ) -> Option < bc ::group ::BloomGroup > {
let position = GroupPosition ::from ( position . clone ( ) ) ;
let result = self . db . read_with_cache ( db ::COL_EXTRA , & self . blocks_blooms , & position ) . map ( Into ::into ) ;
self . cache_man . lock ( ) . note_used ( CacheId ::BlocksBlooms ( position ) ) ;
result
}
}
2015-12-17 17:20:10 +01:00
/// Structure providing fast access to blockchain data.
2015-12-26 15:47:07 +01:00
///
2015-12-21 15:25:58 +01:00
/// **Does not do input data verification.**
2015-12-09 19:03:25 +01:00
pub struct BlockChain {
2016-02-22 00:36:59 +01:00
// All locks must be captured in the order declared here.
2018-03-12 21:15:55 +01:00
blooms_config : bc ::Config ,
2016-01-07 16:08:12 +01:00
best_block : RwLock < BestBlock > ,
2016-10-18 18:16:00 +02:00
// Stores best block of the first uninterrupted sequence of blocks. `None` if there are no gaps.
// Only updated with `insert_unordered_block`.
best_ancient_block : RwLock < Option < BestAncientBlock > > ,
// Stores the last block of the last sequence of blocks. `None` if there are no gaps.
// This is calculated on start and does not get updated.
first_block : Option < H256 > ,
2015-12-13 22:39:01 +01:00
2015-12-14 14:15:27 +01:00
// block cache
2016-07-28 23:46:24 +02:00
block_headers : RwLock < HashMap < H256 , Bytes > > ,
block_bodies : RwLock < HashMap < H256 , Bytes > > ,
2015-12-14 13:32:22 +01:00
2015-12-14 14:15:27 +01:00
// extra caches
2016-01-07 16:08:12 +01:00
block_details : RwLock < HashMap < H256 , BlockDetails > > ,
2016-01-11 01:07:58 +01:00
block_hashes : RwLock < HashMap < BlockNumber , H256 > > ,
2016-01-07 16:08:12 +01:00
transaction_addresses : RwLock < HashMap < H256 , TransactionAddress > > ,
2018-03-12 21:15:55 +01:00
blocks_blooms : RwLock < HashMap < GroupPosition , BloomGroup > > ,
2016-02-17 12:35:37 +01:00
block_receipts : RwLock < HashMap < H256 , BlockReceipts > > ,
2015-12-14 14:15:27 +01:00
2017-02-20 17:21:55 +01:00
db : Arc < KeyValueDB > ,
2016-01-18 15:48:38 +01:00
2016-12-09 23:01:43 +01:00
cache_man : Mutex < CacheManager < CacheId > > ,
2016-08-01 19:10:13 +02:00
pending_best_block : RwLock < Option < BestBlock > > ,
pending_block_hashes : RwLock < HashMap < BlockNumber , H256 > > ,
2016-10-27 15:26:29 +02:00
pending_block_details : RwLock < HashMap < H256 , BlockDetails > > ,
2016-09-28 15:49:42 +02:00
pending_transaction_addresses : RwLock < HashMap < H256 , Option < TransactionAddress > > > ,
2015-12-09 19:03:25 +01:00
}
2016-01-12 13:14:01 +01:00
impl BlockProvider for BlockChain {
/// Returns true if the given block is known
/// (though not necessarily a part of the canon chain).
fn is_known ( & self , hash : & H256 ) -> bool {
2016-08-18 18:24:49 +02:00
self . db . exists_with_cache ( db ::COL_EXTRA , & self . block_details , hash )
2016-01-12 13:14:01 +01:00
}
2016-10-18 18:16:00 +02:00
fn first_block ( & self ) -> Option < H256 > {
self . first_block . clone ( )
}
fn best_ancient_block ( & self ) -> Option < H256 > {
2018-02-16 10:11:29 +01:00
self . best_ancient_block . read ( ) . as_ref ( ) . map ( | b | b . hash )
2016-10-18 18:16:00 +02:00
}
fn best_ancient_number ( & self ) -> Option < BlockNumber > {
self . best_ancient_block . read ( ) . as_ref ( ) . map ( | b | b . number )
2016-08-18 22:01:57 +02:00
}
2016-01-12 13:14:01 +01:00
/// Get raw block data
2016-12-28 13:44:51 +01:00
fn block ( & self , hash : & H256 ) -> Option < encoded ::Block > {
2018-03-06 19:44:05 +01:00
let header = self . block_header_data ( hash ) ? ;
let body = self . block_body ( hash ) ? ;
let mut block = RlpStream ::new_list ( 3 ) ;
let body_rlp = body . rlp ( ) ;
block . append_raw ( header . rlp ( ) . as_raw ( ) , 1 ) ;
block . append_raw ( body_rlp . at ( 0 ) . as_raw ( ) , 1 ) ;
block . append_raw ( body_rlp . at ( 1 ) . as_raw ( ) , 1 ) ;
Some ( encoded ::Block ::new ( block . out ( ) ) )
2016-07-28 23:46:24 +02:00
}
/// Get block header data
2016-12-28 13:44:51 +01:00
fn block_header_data ( & self , hash : & H256 ) -> Option < encoded ::Header > {
2016-07-28 23:46:24 +02:00
// Check cache first
{
let read = self . block_headers . read ( ) ;
if let Some ( v ) = read . get ( hash ) {
2016-12-28 13:44:51 +01:00
return Some ( encoded ::Header ::new ( v . clone ( ) ) ) ;
2016-07-28 23:46:24 +02:00
}
}
// Check if it's the best block
{
let best_block = self . best_block . read ( ) ;
if & best_block . hash = = hash {
2016-12-28 13:44:51 +01:00
return Some ( encoded ::Header ::new (
Rlp ::new ( & best_block . block ) . at ( 0 ) . as_raw ( ) . to_vec ( )
) )
2016-07-28 23:46:24 +02:00
}
}
// Read from DB and populate cache
2018-03-06 19:44:05 +01:00
let b = self . db . get ( db ::COL_HEADERS , hash )
. expect ( " Low level database error. Some issue with disk? " ) ? ;
let bytes = decompress ( & b , blocks_swapper ( ) ) . into_vec ( ) ;
let mut write = self . block_headers . write ( ) ;
write . insert ( * hash , bytes . clone ( ) ) ;
2016-08-08 16:14:37 +02:00
2018-02-16 10:11:29 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlockHeader ( * hash ) ) ;
2018-03-06 19:44:05 +01:00
Some ( encoded ::Header ::new ( bytes ) )
2016-07-28 23:46:24 +02:00
}
/// Get block body data
2016-12-28 13:44:51 +01:00
fn block_body ( & self , hash : & H256 ) -> Option < encoded ::Body > {
2016-07-28 23:46:24 +02:00
// Check cache first
2016-01-12 13:14:01 +01:00
{
2016-07-28 23:46:24 +02:00
let read = self . block_bodies . read ( ) ;
2016-01-17 15:56:09 +01:00
if let Some ( v ) = read . get ( hash ) {
2016-12-28 13:44:51 +01:00
return Some ( encoded ::Body ::new ( v . clone ( ) ) ) ;
2016-01-12 13:14:01 +01:00
}
}
2016-07-28 23:46:24 +02:00
// Check if it's the best block
{
let best_block = self . best_block . read ( ) ;
if & best_block . hash = = hash {
2016-12-28 13:44:51 +01:00
return Some ( encoded ::Body ::new ( Self ::block_to_body ( & best_block . block ) ) ) ;
2016-07-28 23:46:24 +02:00
}
}
// Read from DB and populate cache
2018-03-06 19:44:05 +01:00
let b = self . db . get ( db ::COL_BODIES , hash )
. expect ( " Low level database error. Some issue with disk? " ) ? ;
2016-08-08 16:14:37 +02:00
2018-03-06 19:44:05 +01:00
let bytes = decompress ( & b , blocks_swapper ( ) ) . into_vec ( ) ;
let mut write = self . block_bodies . write ( ) ;
write . insert ( * hash , bytes . clone ( ) ) ;
2016-08-08 16:14:37 +02:00
2018-03-06 19:44:05 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlockBody ( * hash ) ) ;
Some ( encoded ::Body ::new ( bytes ) )
2016-01-12 13:14:01 +01:00
}
/// Get the familial details concerning a block.
fn block_details ( & self , hash : & H256 ) -> Option < BlockDetails > {
2018-03-06 19:44:05 +01:00
let result = self . db . read_with_cache ( db ::COL_EXTRA , & self . block_details , hash ) ? ;
2018-02-16 10:11:29 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlockDetails ( * hash ) ) ;
2018-03-06 19:44:05 +01:00
Some ( result )
2016-01-12 13:14:01 +01:00
}
/// Get the hash of given block's number.
fn block_hash ( & self , index : BlockNumber ) -> Option < H256 > {
2018-03-06 19:44:05 +01:00
let result = self . db . read_with_cache ( db ::COL_EXTRA , & self . block_hashes , & index ) ? ;
2016-12-09 23:01:43 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlockHashes ( index ) ) ;
2018-03-06 19:44:05 +01:00
Some ( result )
2016-01-12 13:14:01 +01:00
}
2016-02-08 15:53:22 +01:00
/// Get the address of transaction with given hash.
fn transaction_address ( & self , hash : & H256 ) -> Option < TransactionAddress > {
2018-03-06 19:44:05 +01:00
let result = self . db . read_with_cache ( db ::COL_EXTRA , & self . transaction_addresses , hash ) ? ;
2018-02-16 10:11:29 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::TransactionAddresses ( * hash ) ) ;
2018-03-06 19:44:05 +01:00
Some ( result )
2016-02-08 15:53:22 +01:00
}
2016-02-12 00:40:45 +01:00
2016-02-17 12:35:37 +01:00
/// Get receipts of block with given hash.
fn block_receipts ( & self , hash : & H256 ) -> Option < BlockReceipts > {
2018-03-06 19:44:05 +01:00
let result = self . db . read_with_cache ( db ::COL_EXTRA , & self . block_receipts , hash ) ? ;
2018-02-16 10:11:29 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlockReceipts ( * hash ) ) ;
2018-03-06 19:44:05 +01:00
Some ( result )
2016-02-17 12:35:37 +01:00
}
2018-03-12 21:15:55 +01:00
/// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom ( & self , bloom : & Bloom , from_block : BlockNumber , to_block : BlockNumber ) -> Vec < BlockNumber > {
let range = from_block as bc ::Number .. to_block as bc ::Number ;
let chain = bc ::group ::BloomGroupChain ::new ( self . blooms_config , self ) ;
chain . with_bloom ( & range , bloom )
. into_iter ( )
. map ( | b | b as BlockNumber )
2016-05-26 18:24:51 +02:00
. collect ( )
2016-02-12 00:40:45 +01:00
}
2016-09-14 12:02:30 +02:00
fn logs < F > ( & self , mut blocks : Vec < BlockNumber > , matches : F , limit : Option < usize > ) -> Vec < LocalizedLogEntry >
2017-09-10 18:03:35 +02:00
where F : Fn ( & LogEntry ) -> bool + Send + Sync , Self : Sized {
2016-09-14 12:02:30 +02:00
// sort in reverse order
blocks . sort_by ( | a , b | b . cmp ( a ) ) ;
2017-09-10 18:03:35 +02:00
let mut logs = blocks
. chunks ( 128 )
. flat_map ( move | blocks_chunk | {
blocks_chunk . into_par_iter ( )
. filter_map ( | number | self . block_hash ( * number ) . map ( | hash | ( * number , hash ) ) )
. filter_map ( | ( number , hash ) | self . block_receipts ( & hash ) . map ( | r | ( number , hash , r . receipts ) ) )
. filter_map ( | ( number , hash , receipts ) | self . block_body ( & hash ) . map ( | ref b | ( number , hash , receipts , b . transaction_hashes ( ) ) ) )
2018-03-12 21:15:55 +01:00
. flat_map ( | ( number , hash , mut receipts , mut hashes ) | {
if receipts . len ( ) ! = hashes . len ( ) {
warn! ( " Block {} ({}) has different number of receipts ({}) to transactions ({}). Database corrupt? " , number , hash , receipts . len ( ) , hashes . len ( ) ) ;
assert! ( false ) ;
}
let mut log_index = receipts . iter ( ) . fold ( 0 , | sum , receipt | sum + receipt . logs . len ( ) ) ;
2017-09-10 18:03:35 +02:00
let receipts_len = receipts . len ( ) ;
2018-03-12 21:15:55 +01:00
hashes . reverse ( ) ;
receipts . reverse ( ) ;
2017-09-10 18:03:35 +02:00
receipts . into_iter ( )
. map ( | receipt | receipt . logs )
. zip ( hashes )
2016-09-14 12:02:30 +02:00
. enumerate ( )
2017-09-10 18:03:35 +02:00
. flat_map ( move | ( index , ( mut logs , tx_hash ) ) | {
let current_log_index = log_index ;
let no_of_logs = logs . len ( ) ;
log_index - = no_of_logs ;
logs . reverse ( ) ;
logs . into_iter ( )
. enumerate ( )
. map ( move | ( i , log ) | LocalizedLogEntry {
entry : log ,
block_hash : hash ,
block_number : number ,
transaction_hash : tx_hash ,
// iterating in reverse order
transaction_index : receipts_len - index - 1 ,
transaction_log_index : no_of_logs - i - 1 ,
log_index : current_log_index - i - 1 ,
} )
2016-09-14 12:02:30 +02:00
} )
2017-09-10 18:03:35 +02:00
. filter ( | log_entry | matches ( & log_entry . entry ) )
. take ( limit . unwrap_or ( ::std ::usize ::MAX ) )
. collect ::< Vec < _ > > ( )
2016-09-14 12:02:30 +02:00
} )
2017-09-10 18:03:35 +02:00
. collect ::< Vec < _ > > ( )
2016-09-14 12:02:30 +02:00
} )
. take ( limit . unwrap_or ( ::std ::usize ::MAX ) )
. collect ::< Vec < LocalizedLogEntry > > ( ) ;
logs . reverse ( ) ;
logs
}
2016-01-12 13:14:01 +01:00
}
2016-10-24 18:27:23 +02:00
/// An iterator which walks the blockchain towards the genesis.
#[ derive(Clone) ]
2016-03-02 17:04:44 +01:00
pub struct AncestryIter < ' a > {
current : H256 ,
chain : & ' a BlockChain ,
}
2016-03-02 17:31:42 +01:00
2016-03-02 17:04:44 +01:00
impl < ' a > Iterator for AncestryIter < ' a > {
type Item = H256 ;
fn next ( & mut self ) -> Option < H256 > {
if self . current . is_zero ( ) {
2016-10-24 18:27:23 +02:00
None
2016-03-02 17:04:44 +01:00
} else {
2016-10-24 18:27:23 +02:00
self . chain . block_details ( & self . current )
. map ( | details | mem ::replace ( & mut self . current , details . parent ) )
2016-03-02 17:04:44 +01:00
}
}
}
2017-04-19 16:27:45 +02:00
/// An iterator which walks all epoch transitions.
/// Returns epoch transitions.
pub struct EpochTransitionIter < ' a > {
chain : & ' a BlockChain ,
prefix_iter : Box < Iterator < Item = ( Box < [ u8 ] > , Box < [ u8 ] > ) > + ' a > ,
}
impl < ' a > Iterator for EpochTransitionIter < ' a > {
type Item = ( u64 , EpochTransition ) ;
fn next ( & mut self ) -> Option < Self ::Item > {
loop {
2018-03-06 19:44:05 +01:00
// some epochs never occurred on the main chain.
let ( key , val ) = self . prefix_iter . next ( ) ? ;
2017-04-19 16:27:45 +02:00
2018-03-06 19:44:05 +01:00
// iterator may continue beyond values beginning with this
// prefix.
if ! key . starts_with ( & EPOCH_KEY_PREFIX [ .. ] ) {
return None
}
let transitions : EpochTransitions = ::rlp ::decode ( & val [ .. ] ) ;
// if there are multiple candidates, at most one will be on the
// canon chain.
for transition in transitions . candidates . into_iter ( ) {
let is_in_canon_chain = self . chain . block_hash ( transition . block_number )
. map_or ( false , | hash | hash = = transition . block_hash ) ;
// if the transition is within the block gap, there will only be
// one candidate, and it will be from a snapshot restored from.
let is_ancient = self . chain . first_block_number ( )
. map_or ( false , | first | first > transition . block_number ) ;
if is_ancient | | is_in_canon_chain {
return Some ( ( transitions . number , transition ) )
2017-04-19 16:27:45 +02:00
}
}
}
}
}
2015-12-09 19:03:25 +01:00
impl BlockChain {
2017-01-23 15:27:11 +01:00
/// Create new instance of blockchain from given Genesis.
2017-02-20 17:21:55 +01:00
pub fn new ( config : Config , genesis : & [ u8 ] , db : Arc < KeyValueDB > ) -> BlockChain {
2016-07-31 00:19:27 +02:00
// 400 is the avarage size of the key
let cache_man = CacheManager ::new ( config . pref_cache_size , config . max_cache_size , 400 ) ;
2016-01-18 19:23:28 +01:00
2016-08-18 22:01:57 +02:00
let mut bc = BlockChain {
2018-03-12 21:15:55 +01:00
blooms_config : bc ::Config {
levels : LOG_BLOOMS_LEVELS ,
elements_per_index : LOG_BLOOMS_ELEMENTS_PER_INDEX ,
} ,
2016-10-18 18:16:00 +02:00
first_block : None ,
2016-02-27 02:16:39 +01:00
best_block : RwLock ::new ( BestBlock ::default ( ) ) ,
2016-10-18 18:16:00 +02:00
best_ancient_block : RwLock ::new ( None ) ,
2016-07-28 23:46:24 +02:00
block_headers : RwLock ::new ( HashMap ::new ( ) ) ,
block_bodies : RwLock ::new ( HashMap ::new ( ) ) ,
2016-01-07 16:08:12 +01:00
block_details : RwLock ::new ( HashMap ::new ( ) ) ,
block_hashes : RwLock ::new ( HashMap ::new ( ) ) ,
transaction_addresses : RwLock ::new ( HashMap ::new ( ) ) ,
2018-03-12 21:15:55 +01:00
blocks_blooms : RwLock ::new ( HashMap ::new ( ) ) ,
2016-02-17 12:35:37 +01:00
block_receipts : RwLock ::new ( HashMap ::new ( ) ) ,
2016-07-28 23:46:24 +02:00
db : db . clone ( ) ,
2016-08-08 16:25:48 +02:00
cache_man : Mutex ::new ( cache_man ) ,
2016-08-01 19:10:13 +02:00
pending_best_block : RwLock ::new ( None ) ,
pending_block_hashes : RwLock ::new ( HashMap ::new ( ) ) ,
2016-10-27 15:26:29 +02:00
pending_block_details : RwLock ::new ( HashMap ::new ( ) ) ,
2016-08-01 19:10:13 +02:00
pending_transaction_addresses : RwLock ::new ( HashMap ::new ( ) ) ,
2015-12-17 01:54:24 +01:00
} ;
2015-12-17 15:11:42 +01:00
// load best block
2016-08-18 18:24:49 +02:00
let best_block_hash = match bc . db . get ( db ::COL_EXTRA , b " best " ) . unwrap ( ) {
2016-07-04 18:24:14 +02:00
Some ( best ) = > {
2016-07-28 23:46:24 +02:00
H256 ::from_slice ( & best )
2016-07-04 18:24:14 +02:00
}
2015-12-17 15:11:42 +01:00
None = > {
// best block does not exist
// we need to insert genesis into the cache
2015-12-17 17:20:10 +01:00
let block = BlockView ::new ( genesis ) ;
2015-12-17 15:11:42 +01:00
let header = block . header_view ( ) ;
2017-08-30 19:18:28 +02:00
let hash = block . hash ( ) ;
2015-12-17 15:11:42 +01:00
let details = BlockDetails {
number : header . number ( ) ,
total_difficulty : header . difficulty ( ) ,
parent : header . parent_hash ( ) ,
2017-06-28 13:17:36 +02:00
children : vec ! [ ] ,
2015-12-17 15:11:42 +01:00
} ;
2017-02-20 17:21:55 +01:00
let mut batch = DBTransaction ::new ( ) ;
2016-08-18 18:24:49 +02:00
batch . put ( db ::COL_HEADERS , & hash , block . header_rlp ( ) . as_raw ( ) ) ;
batch . put ( db ::COL_BODIES , & hash , & Self ::block_to_body ( genesis ) ) ;
2015-12-26 15:47:07 +01:00
2016-08-18 18:24:49 +02:00
batch . write ( db ::COL_EXTRA , & hash , & details ) ;
batch . write ( db ::COL_EXTRA , & header . number ( ) , & hash ) ;
2016-08-18 22:01:57 +02:00
2016-08-18 18:24:49 +02:00
batch . put ( db ::COL_EXTRA , b " best " , & hash ) ;
2016-07-28 23:46:24 +02:00
bc . db . write ( batch ) . expect ( " Low level database error. Some issue with disk? " ) ;
2015-12-17 15:11:42 +01:00
hash
}
} ;
2015-12-21 02:57:02 +01:00
{
2016-07-28 23:46:24 +02:00
// Fetch best block details
let best_block_number = bc . block_number ( & best_block_hash ) . unwrap ( ) ;
let best_block_total_difficulty = bc . block_details ( & best_block_hash ) . unwrap ( ) . total_difficulty ;
2016-12-28 13:44:51 +01:00
let best_block_rlp = bc . block ( & best_block_hash ) . unwrap ( ) . into_inner ( ) ;
2017-02-03 19:32:10 +01:00
let best_block_timestamp = BlockView ::new ( & best_block_rlp ) . header ( ) . timestamp ( ) ;
2016-07-28 23:46:24 +02:00
2017-06-28 14:16:53 +02:00
let raw_first = bc . db . get ( db ::COL_EXTRA , b " first " ) . unwrap ( ) . map ( | v | v . into_vec ( ) ) ;
2016-10-18 18:16:00 +02:00
let mut best_ancient = bc . db . get ( db ::COL_EXTRA , b " ancient " ) . unwrap ( ) . map ( | h | H256 ::from_slice ( & h ) ) ;
let best_ancient_number ;
if best_ancient . is_none ( ) & & best_block_number > 1 & & bc . block_hash ( 1 ) . is_none ( ) {
best_ancient = Some ( bc . genesis_hash ( ) ) ;
best_ancient_number = Some ( 0 ) ;
} else {
best_ancient_number = best_ancient . as_ref ( ) . and_then ( | h | bc . block_number ( h ) ) ;
}
2016-08-18 22:01:57 +02:00
// binary search for the first block.
2016-10-18 18:16:00 +02:00
match raw_first {
None = > {
let ( mut f , mut hash ) = ( best_block_number , best_block_hash ) ;
let mut l = best_ancient_number . unwrap_or ( 0 ) ;
2016-08-18 22:01:57 +02:00
2016-10-18 18:16:00 +02:00
loop {
if l > = f { break ; }
2016-08-18 22:01:57 +02:00
2016-10-18 18:16:00 +02:00
let step = ( f - l ) > > 1 ;
let m = l + step ;
2016-08-18 22:01:57 +02:00
2016-10-18 18:16:00 +02:00
match bc . block_hash ( m ) {
Some ( h ) = > { f = m ; hash = h } ,
None = > { l = m + 1 } ,
}
2016-08-18 22:01:57 +02:00
}
2016-10-18 18:16:00 +02:00
if hash ! = bc . genesis_hash ( ) {
trace! ( " First block calculated: {:?} " , hash ) ;
let mut batch = db . transaction ( ) ;
batch . put ( db ::COL_EXTRA , b " first " , & hash ) ;
db . write ( batch ) . expect ( " Low level database error. " ) ;
bc . first_block = Some ( hash ) ;
}
} ,
Some ( raw_first ) = > {
bc . first_block = Some ( H256 ::from_slice ( & raw_first ) ) ;
} ,
2016-08-18 22:01:57 +02:00
}
2016-07-28 23:46:24 +02:00
// and write them
2016-07-13 19:59:59 +02:00
let mut best_block = bc . best_block . write ( ) ;
2016-07-28 23:46:24 +02:00
* best_block = BestBlock {
number : best_block_number ,
total_difficulty : best_block_total_difficulty ,
hash : best_block_hash ,
2017-02-03 19:32:10 +01:00
timestamp : best_block_timestamp ,
2016-07-28 23:46:24 +02:00
block : best_block_rlp ,
} ;
2016-10-18 18:16:00 +02:00
if let ( Some ( hash ) , Some ( number ) ) = ( best_ancient , best_ancient_number ) {
let mut best_ancient_block = bc . best_ancient_block . write ( ) ;
* best_ancient_block = Some ( BestAncientBlock {
hash : hash ,
number : number ,
} ) ;
}
2015-12-21 02:57:02 +01:00
}
2015-12-17 15:11:42 +01:00
2015-12-17 01:54:24 +01:00
bc
2015-12-11 03:51:23 +01:00
}
2016-07-17 09:18:15 +02:00
/// Returns true if the given parent block has given child
/// (though not necessarily a part of the canon chain).
fn is_known_child ( & self , parent : & H256 , hash : & H256 ) -> bool {
2016-08-18 18:24:49 +02:00
self . db . read_with_cache ( db ::COL_EXTRA , & self . block_details , parent ) . map_or ( false , | d | d . children . contains ( hash ) )
2016-07-17 09:18:15 +02:00
}
2015-12-17 15:11:42 +01:00
/// Returns a tree route between `from` and `to`, which is a tuple of:
2015-12-26 15:47:07 +01:00
///
2015-12-17 15:11:42 +01:00
/// - a vector of hashes of all blocks, ordered from `from` to `to`.
2015-12-17 17:20:10 +01:00
///
2015-12-17 15:11:42 +01:00
/// - common ancestor of these blocks.
2015-12-17 17:20:10 +01:00
///
2015-12-17 15:11:42 +01:00
/// - an index where best common ancestor would be
2015-12-26 15:47:07 +01:00
///
2015-12-17 17:20:10 +01:00
/// 1.) from newer to older
2015-12-26 15:47:07 +01:00
///
2015-12-17 17:20:10 +01:00
/// - bc: `A1 -> A2 -> A3 -> A4 -> A5`
/// - from: A5, to: A4
2015-12-26 15:47:07 +01:00
/// - route:
2015-12-17 17:20:10 +01:00
///
/// ```json
/// { blocks: [A5], ancestor: A4, index: 1 }
/// ```
2015-12-26 15:47:07 +01:00
///
2015-12-17 17:20:10 +01:00
/// 2.) from older to newer
2015-12-26 15:47:07 +01:00
///
2015-12-17 17:20:10 +01:00
/// - bc: `A1 -> A2 -> A3 -> A4 -> A5`
/// - from: A3, to: A4
2015-12-26 15:47:07 +01:00
/// - route:
///
2015-12-17 17:20:10 +01:00
/// ```json
/// { blocks: [A4], ancestor: A3, index: 0 }
/// ```
2015-12-17 15:11:42 +01:00
///
/// 3.) fork:
///
2015-12-26 15:47:07 +01:00
/// - bc:
2015-12-17 17:20:10 +01:00
///
/// ```text
/// A1 -> A2 -> A3 -> A4
2015-12-17 15:11:42 +01:00
/// -> B3 -> B4
2015-12-26 15:47:07 +01:00
/// ```
2015-12-17 17:20:10 +01:00
/// - from: B4, to: A4
2015-12-26 15:47:07 +01:00
/// - route:
///
2015-12-17 17:20:10 +01:00
/// ```json
/// { blocks: [B4, B3, A3, A4], ancestor: A2, index: 2 }
/// ```
2017-04-20 16:21:53 +02:00
///
/// If the tree route verges into pruned or unknown blocks,
/// `None` is returned.
pub fn tree_route ( & self , from : H256 , to : H256 ) -> Option < TreeRoute > {
2015-12-17 15:11:42 +01:00
let mut from_branch = vec! [ ] ;
let mut to_branch = vec! [ ] ;
2018-02-16 16:37:12 +01:00
let mut from_details = self . block_details ( & from ) ? ;
let mut to_details = self . block_details ( & to ) ? ;
2016-02-27 01:37:12 +01:00
let mut current_from = from ;
let mut current_to = to ;
2015-12-17 15:11:42 +01:00
// reset from && to to the same level
while from_details . number > to_details . number {
from_branch . push ( current_from ) ;
current_from = from_details . parent . clone ( ) ;
2018-02-16 16:37:12 +01:00
from_details = self . block_details ( & from_details . parent ) ? ;
2015-12-17 15:11:42 +01:00
}
while to_details . number > from_details . number {
to_branch . push ( current_to ) ;
current_to = to_details . parent . clone ( ) ;
2018-02-16 16:37:12 +01:00
to_details = self . block_details ( & to_details . parent ) ? ;
2015-12-17 15:11:42 +01:00
}
assert_eq! ( from_details . number , to_details . number ) ;
// move to shared parent
2015-12-18 11:34:55 +01:00
while current_from ! = current_to {
2015-12-17 15:11:42 +01:00
from_branch . push ( current_from ) ;
current_from = from_details . parent . clone ( ) ;
2018-02-16 16:37:12 +01:00
from_details = self . block_details ( & from_details . parent ) ? ;
2015-12-17 15:11:42 +01:00
to_branch . push ( current_to ) ;
current_to = to_details . parent . clone ( ) ;
2018-02-16 16:37:12 +01:00
to_details = self . block_details ( & to_details . parent ) ? ;
2015-12-17 15:11:42 +01:00
}
let index = from_branch . len ( ) ;
2015-12-17 20:37:04 +01:00
from_branch . extend ( to_branch . into_iter ( ) . rev ( ) ) ;
2015-12-17 15:11:42 +01:00
2017-04-20 16:21:53 +02:00
Some ( TreeRoute {
2015-12-17 15:11:42 +01:00
blocks : from_branch ,
2015-12-21 16:31:51 +01:00
ancestor : current_from ,
2015-12-17 15:11:42 +01:00
index : index
2017-04-20 16:21:53 +02:00
} )
2015-12-17 15:11:42 +01:00
}
2016-08-05 17:00:46 +02:00
/// Inserts a verified, known block from the canonical chain.
///
/// Can be performed out-of-order, but care must be taken that the final chain is in a correct state.
2016-10-18 18:16:00 +02:00
/// This is used by snapshot restoration and when downloading missing blocks for the chain gap.
/// `is_best` forces the best block to be updated to this block.
/// `is_ancient` forces the best block of the first block sequence to be updated to this block.
2018-02-22 11:22:56 +01:00
/// `parent_td` is a parent total diffuculty
2016-08-05 17:00:46 +02:00
/// Supply a dummy parent total difficulty when the parent block may not be in the chain.
/// Returns true if the block is disconnected.
2016-10-18 18:16:00 +02:00
pub fn insert_unordered_block ( & self , batch : & mut DBTransaction , bytes : & [ u8 ] , receipts : Vec < Receipt > , parent_td : Option < U256 > , is_best : bool , is_ancient : bool ) -> bool {
2016-08-05 17:00:46 +02:00
let block = BlockView ::new ( bytes ) ;
let header = block . header_view ( ) ;
2017-08-30 19:18:28 +02:00
let hash = header . hash ( ) ;
2016-08-05 17:00:46 +02:00
if self . is_known ( & hash ) {
return false ;
}
assert! ( self . pending_best_block . read ( ) . is_none ( ) ) ;
2018-02-23 10:12:52 +01:00
let compressed_header = compress ( block . header_rlp ( ) . as_raw ( ) , blocks_swapper ( ) ) ;
let compressed_body = compress ( & Self ::block_to_body ( bytes ) , blocks_swapper ( ) ) ;
2016-08-05 17:00:46 +02:00
// store block in db
2016-08-18 18:24:49 +02:00
batch . put ( db ::COL_HEADERS , & hash , & compressed_header ) ;
batch . put ( db ::COL_BODIES , & hash , & compressed_body ) ;
2016-08-05 17:00:46 +02:00
let maybe_parent = self . block_details ( & header . parent_hash ( ) ) ;
if let Some ( parent_details ) = maybe_parent {
// parent known to be in chain.
let info = BlockInfo {
2018-02-16 10:11:29 +01:00
hash : hash ,
2016-08-05 17:00:46 +02:00
number : header . number ( ) ,
total_difficulty : parent_details . total_difficulty + header . difficulty ( ) ,
location : BlockLocation ::CanonChain ,
} ;
2016-10-18 18:16:00 +02:00
self . prepare_update ( batch , ExtrasUpdate {
2016-08-05 17:00:46 +02:00
block_hashes : self . prepare_block_hashes_update ( bytes , & info ) ,
block_details : self . prepare_block_details_update ( bytes , & info ) ,
block_receipts : self . prepare_block_receipts_update ( receipts , & info ) ,
2018-03-12 21:15:55 +01:00
blocks_blooms : self . prepare_block_blooms_update ( bytes , & info ) ,
2016-09-28 15:49:42 +02:00
transactions_addresses : self . prepare_transaction_addresses_update ( bytes , & info ) ,
2016-08-05 17:00:46 +02:00
info : info ,
2017-02-03 19:32:10 +01:00
timestamp : header . timestamp ( ) ,
2016-08-05 17:00:46 +02:00
block : bytes
} , is_best ) ;
2016-10-18 18:16:00 +02:00
if is_ancient {
let mut best_ancient_block = self . best_ancient_block . write ( ) ;
let ancient_number = best_ancient_block . as_ref ( ) . map_or ( 0 , | b | b . number ) ;
if self . block_hash ( header . number ( ) + 1 ) . is_some ( ) {
batch . delete ( db ::COL_EXTRA , b " ancient " ) ;
* best_ancient_block = None ;
} else if header . number ( ) > ancient_number {
batch . put ( db ::COL_EXTRA , b " ancient " , & hash ) ;
* best_ancient_block = Some ( BestAncientBlock {
hash : hash ,
number : header . number ( ) ,
} ) ;
}
}
2016-08-05 17:00:46 +02:00
false
} else {
// parent not in the chain yet. we need the parent difficulty to proceed.
let d = parent_td
. expect ( " parent total difficulty always supplied for first block in chunk. only first block can have missing parent; qed " ) ;
let info = BlockInfo {
hash : hash ,
number : header . number ( ) ,
total_difficulty : d + header . difficulty ( ) ,
location : BlockLocation ::CanonChain ,
} ;
let block_details = BlockDetails {
number : header . number ( ) ,
total_difficulty : info . total_difficulty ,
parent : header . parent_hash ( ) ,
children : Vec ::new ( ) ,
} ;
let mut update = HashMap ::new ( ) ;
update . insert ( hash , block_details ) ;
2016-10-18 18:16:00 +02:00
self . prepare_update ( batch , ExtrasUpdate {
2016-08-05 17:00:46 +02:00
block_hashes : self . prepare_block_hashes_update ( bytes , & info ) ,
block_details : update ,
block_receipts : self . prepare_block_receipts_update ( receipts , & info ) ,
2018-03-12 21:15:55 +01:00
blocks_blooms : self . prepare_block_blooms_update ( bytes , & info ) ,
2016-09-28 15:49:42 +02:00
transactions_addresses : self . prepare_transaction_addresses_update ( bytes , & info ) ,
2016-08-05 17:00:46 +02:00
info : info ,
2017-02-03 19:32:10 +01:00
timestamp : header . timestamp ( ) ,
2016-08-05 17:00:46 +02:00
block : bytes ,
} , is_best ) ;
true
}
}
2017-04-19 14:58:19 +02:00
/// Insert an epoch transition. Provide an epoch number being transitioned to
/// and epoch transition object.
///
/// The block the transition occurred at should have already been inserted into the chain.
pub fn insert_epoch_transition ( & self , batch : & mut DBTransaction , epoch_num : u64 , transition : EpochTransition ) {
let mut transitions = match self . db . read ( db ::COL_EXTRA , & epoch_num ) {
Some ( existing ) = > existing ,
None = > EpochTransitions {
number : epoch_num ,
candidates : Vec ::with_capacity ( 1 ) ,
}
} ;
2017-04-19 15:35:12 +02:00
// ensure we don't write any duplicates.
if transitions . candidates . iter ( ) . find ( | c | c . block_hash = = transition . block_hash ) . is_none ( ) {
transitions . candidates . push ( transition ) ;
batch . write ( db ::COL_EXTRA , & epoch_num , & transitions ) ;
}
2017-04-19 14:58:19 +02:00
}
2017-04-19 16:27:45 +02:00
/// Iterate over all epoch transitions.
2017-05-17 12:41:33 +02:00
/// This will only return transitions within the canonical chain.
2017-04-19 16:27:45 +02:00
pub fn epoch_transitions ( & self ) -> EpochTransitionIter {
let iter = self . db . iter_from_prefix ( db ::COL_EXTRA , & EPOCH_KEY_PREFIX [ .. ] ) ;
EpochTransitionIter {
chain : self ,
prefix_iter : iter ,
}
}
2017-06-28 13:17:36 +02:00
/// Get a specific epoch transition by block number and provided block hash.
pub fn epoch_transition ( & self , block_num : u64 , block_hash : H256 ) -> Option < EpochTransition > {
trace! ( target : " blockchain " , " Loading epoch transition at block {}, {} " ,
block_num , block_hash ) ;
2017-05-17 12:41:33 +02:00
2017-06-28 13:17:36 +02:00
self . db . read ( db ::COL_EXTRA , & block_num ) . and_then ( | transitions : EpochTransitions | {
2017-05-17 12:41:33 +02:00
transitions . candidates . into_iter ( ) . find ( | c | c . block_hash = = block_hash )
} )
}
2017-06-28 13:17:36 +02:00
/// Get the transition to the epoch the given parent hash is part of
/// or transitions to.
/// This will give the epoch that any children of this parent belong to.
///
/// The block corresponding the the parent hash must be stored already.
pub fn epoch_transition_for ( & self , parent_hash : H256 ) -> Option < EpochTransition > {
// slow path: loop back block by block
2018-02-16 16:37:12 +01:00
for hash in self . ancestry_iter ( parent_hash ) ? {
let details = self . block_details ( & hash ) ? ;
2017-06-28 13:17:36 +02:00
// look for transition in database.
if let Some ( transition ) = self . epoch_transition ( details . number , hash ) {
return Some ( transition )
}
// canonical hash -> fast breakout:
// get the last epoch transition up to this block.
//
// if `block_hash` is canonical it will only return transitions up to
// the parent.
2018-02-16 16:37:12 +01:00
if self . block_hash ( details . number ) ? = = hash {
2017-06-28 13:17:36 +02:00
return self . epoch_transitions ( )
. map ( | ( _ , t ) | t )
. take_while ( | t | t . block_number < = details . number )
. last ( )
}
}
// should never happen as the loop will encounter genesis before concluding.
None
}
/// Write a pending epoch transition by block hash.
pub fn insert_pending_transition ( & self , batch : & mut DBTransaction , hash : H256 , t : PendingEpochTransition ) {
batch . write ( db ::COL_EXTRA , & hash , & t ) ;
}
/// Get a pending epoch transition by block hash.
// TODO: implement removal safely: this can only be done upon finality of a block
// that _uses_ the pending transition.
pub fn get_pending_transition ( & self , hash : H256 ) -> Option < PendingEpochTransition > {
self . db . read ( db ::COL_EXTRA , & hash )
}
2016-08-05 17:00:46 +02:00
/// Add a child to a given block. Assumes that the block hash is in
/// the chain and the child's parent is this block.
///
/// Used in snapshots to glue the chunks together at the end.
2016-10-28 16:10:30 +02:00
pub fn add_child ( & self , batch : & mut DBTransaction , block_hash : H256 , child_hash : H256 ) {
2016-08-05 17:00:46 +02:00
let mut parent_details = self . block_details ( & block_hash )
. unwrap_or_else ( | | panic! ( " Invalid block hash: {:?} " , block_hash ) ) ;
parent_details . children . push ( child_hash ) ;
let mut update = HashMap ::new ( ) ;
update . insert ( block_hash , parent_details ) ;
let mut write_details = self . block_details . write ( ) ;
2016-08-18 18:24:49 +02:00
batch . extend_with_cache ( db ::COL_EXTRA , & mut * write_details , update , CacheUpdatePolicy ::Overwrite ) ;
2016-08-05 17:00:46 +02:00
2016-12-09 23:01:43 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlockDetails ( block_hash ) ) ;
2016-08-05 17:00:46 +02:00
}
2015-12-17 01:54:24 +01:00
/// Inserts the block into backing cache database.
/// Expects the block to be valid and already verified.
/// If the block is already known, does nothing.
2016-08-25 16:43:56 +02:00
pub fn insert_block ( & self , batch : & mut DBTransaction , bytes : & [ u8 ] , receipts : Vec < Receipt > ) -> ImportRoute {
2015-12-17 15:11:42 +01:00
// create views onto rlp
2015-12-17 01:54:24 +01:00
let block = BlockView ::new ( bytes ) ;
let header = block . header_view ( ) ;
2017-08-30 19:18:28 +02:00
let hash = header . hash ( ) ;
2015-12-17 01:54:24 +01:00
2016-07-17 09:18:15 +02:00
if self . is_known_child ( & header . parent_hash ( ) , & hash ) {
2016-03-09 21:55:23 +01:00
return ImportRoute ::none ( ) ;
2015-12-17 01:54:24 +01:00
}
2016-08-01 19:10:13 +02:00
assert! ( self . pending_best_block . read ( ) . is_none ( ) ) ;
2018-02-23 10:12:52 +01:00
let compressed_header = compress ( block . header_rlp ( ) . as_raw ( ) , blocks_swapper ( ) ) ;
let compressed_body = compress ( & Self ::block_to_body ( bytes ) , blocks_swapper ( ) ) ;
2015-12-21 15:22:24 +01:00
// store block in db
2018-02-23 10:12:52 +01:00
batch . put ( db ::COL_HEADERS , & hash , & compressed_header ) ;
batch . put ( db ::COL_BODIES , & hash , & compressed_body ) ;
2016-02-27 01:37:12 +01:00
2016-08-03 22:03:40 +02:00
let info = self . block_info ( & header ) ;
2016-02-27 01:37:12 +01:00
2016-07-26 00:20:37 +02:00
if let BlockLocation ::BranchBecomingCanonChain ( ref d ) = info . location {
2016-07-28 23:45:56 +02:00
info! ( target : " reorg " , " Reorg to {} ({} {} {}) " ,
Colour ::Yellow . bold ( ) . paint ( format! ( " # {} {} " , info . number , info . hash ) ) ,
2016-07-29 00:22:46 +02:00
Colour ::Red . paint ( d . retracted . iter ( ) . join ( " " ) ) ,
Colour ::White . paint ( format! ( " # {} {} " , self . block_details ( & d . ancestor ) . expect ( " `ancestor` is in the route; qed " ) . number , d . ancestor ) ) ,
Colour ::Green . paint ( d . enacted . iter ( ) . join ( " " ) )
2016-07-28 23:45:56 +02:00
) ;
2016-07-26 00:20:37 +02:00
}
2016-08-01 19:10:13 +02:00
self . prepare_update ( batch , ExtrasUpdate {
2016-02-27 10:19:33 +01:00
block_hashes : self . prepare_block_hashes_update ( bytes , & info ) ,
block_details : self . prepare_block_details_update ( bytes , & info ) ,
block_receipts : self . prepare_block_receipts_update ( receipts , & info ) ,
2018-03-12 21:15:55 +01:00
blocks_blooms : self . prepare_block_blooms_update ( bytes , & info ) ,
2016-09-28 15:49:42 +02:00
transactions_addresses : self . prepare_transaction_addresses_update ( bytes , & info ) ,
2016-03-09 21:55:23 +01:00
info : info . clone ( ) ,
2017-02-03 19:32:10 +01:00
timestamp : header . timestamp ( ) ,
2016-07-28 23:46:24 +02:00
block : bytes ,
2016-08-05 17:00:46 +02:00
} , true ) ;
2016-03-09 21:55:23 +01:00
ImportRoute ::from ( info )
2016-02-16 14:46:21 +01:00
}
2015-12-21 15:22:24 +01:00
2016-07-26 00:20:37 +02:00
/// Get inserted block info which is critical to prepare extras updates.
2016-08-03 22:03:40 +02:00
fn block_info ( & self , header : & HeaderView ) -> BlockInfo {
2017-08-30 19:18:28 +02:00
let hash = header . hash ( ) ;
2016-07-26 00:20:37 +02:00
let number = header . number ( ) ;
let parent_hash = header . parent_hash ( ) ;
let parent_details = self . block_details ( & parent_hash ) . unwrap_or_else ( | | panic! ( " Invalid parent hash: {:?} " , parent_hash ) ) ;
2017-01-23 15:27:11 +01:00
let is_new_best = parent_details . total_difficulty + header . difficulty ( ) > self . best_block_total_difficulty ( ) ;
2016-07-26 00:20:37 +02:00
BlockInfo {
hash : hash ,
number : number ,
2016-12-05 16:20:32 +01:00
total_difficulty : parent_details . total_difficulty + header . difficulty ( ) ,
2016-07-26 00:20:37 +02:00
location : if is_new_best {
// on new best block we need to make sure that all ancestors
// are moved to "canon chain"
// find the route between old best block and the new one
let best_hash = self . best_block_hash ( ) ;
2017-04-20 16:21:53 +02:00
let route = self . tree_route ( best_hash , parent_hash )
. expect ( " blocks being imported always within recent history; qed " ) ;
2016-07-26 00:20:37 +02:00
assert_eq! ( number , parent_details . number + 1 ) ;
match route . blocks . len ( ) {
0 = > BlockLocation ::CanonChain ,
_ = > {
let retracted = route . blocks . iter ( ) . take ( route . index ) . cloned ( ) . collect ::< Vec < _ > > ( ) . into_iter ( ) . collect ::< Vec < _ > > ( ) ;
let enacted = route . blocks . into_iter ( ) . skip ( route . index ) . collect ::< Vec < _ > > ( ) ;
BlockLocation ::BranchBecomingCanonChain ( BranchBecomingCanonChainData {
ancestor : route . ancestor ,
enacted : enacted ,
retracted : retracted ,
} )
}
}
} else {
BlockLocation ::Branch
}
}
}
2016-08-01 19:10:13 +02:00
/// Prepares extras update.
2016-08-25 16:43:56 +02:00
fn prepare_update ( & self , batch : & mut DBTransaction , update : ExtrasUpdate , is_best : bool ) {
2016-03-10 21:01:17 +01:00
{
2016-07-13 19:59:59 +02:00
let mut write_receipts = self . block_receipts . write ( ) ;
2016-08-18 18:24:49 +02:00
batch . extend_with_cache ( db ::COL_EXTRA , & mut * write_receipts , update . block_receipts , CacheUpdatePolicy ::Remove ) ;
2016-03-10 21:01:17 +01:00
}
2018-03-12 21:15:55 +01:00
{
let mut write_blocks_blooms = self . blocks_blooms . write ( ) ;
// update best block
match update . info . location {
BlockLocation ::Branch = > ( ) ,
BlockLocation ::BranchBecomingCanonChain ( _ ) = > {
// clear all existing blooms, cause they may be created for block
// number higher than current best block
* write_blocks_blooms = update . blocks_blooms ;
for ( key , value ) in write_blocks_blooms . iter ( ) {
batch . write ( db ::COL_EXTRA , key , value ) ;
}
} ,
BlockLocation ::CanonChain = > {
// update all existing blooms groups
for ( key , value ) in update . blocks_blooms {
match write_blocks_blooms . entry ( key ) {
hash_map ::Entry ::Occupied ( mut entry ) = > {
entry . get_mut ( ) . accrue_bloom_group ( & value ) ;
batch . write ( db ::COL_EXTRA , entry . key ( ) , entry . get ( ) ) ;
} ,
hash_map ::Entry ::Vacant ( entry ) = > {
batch . write ( db ::COL_EXTRA , entry . key ( ) , & value ) ;
entry . insert ( value ) ;
} ,
}
}
} ,
}
}
2016-10-27 15:26:29 +02:00
// These cached values must be updated last with all four locks taken to avoid
2016-07-17 23:03:29 +02:00
// cache decoherence
2016-02-29 19:49:29 +01:00
{
2016-08-01 19:10:13 +02:00
let mut best_block = self . pending_best_block . write ( ) ;
2018-03-12 21:15:55 +01:00
if is_best & & update . info . location ! = BlockLocation ::Branch {
batch . put ( db ::COL_EXTRA , b " best " , & update . info . hash ) ;
* best_block = Some ( BestBlock {
hash : update . info . hash ,
number : update . info . number ,
total_difficulty : update . info . total_difficulty ,
timestamp : update . timestamp ,
block : update . block . to_vec ( ) ,
} ) ;
2016-02-22 00:36:59 +01:00
}
2018-02-22 11:22:56 +01:00
2016-08-01 19:10:13 +02:00
let mut write_hashes = self . pending_block_hashes . write ( ) ;
2016-10-27 15:26:29 +02:00
let mut write_details = self . pending_block_details . write ( ) ;
2016-08-01 19:10:13 +02:00
let mut write_txs = self . pending_transaction_addresses . write ( ) ;
2016-07-14 19:16:01 +02:00
2016-10-27 15:26:29 +02:00
batch . extend_with_cache ( db ::COL_EXTRA , & mut * write_details , update . block_details , CacheUpdatePolicy ::Overwrite ) ;
2016-08-18 18:24:49 +02:00
batch . extend_with_cache ( db ::COL_EXTRA , & mut * write_hashes , update . block_hashes , CacheUpdatePolicy ::Overwrite ) ;
2016-09-28 15:49:42 +02:00
batch . extend_with_option_cache ( db ::COL_EXTRA , & mut * write_txs , update . transactions_addresses , CacheUpdatePolicy ::Overwrite ) ;
2016-03-15 10:59:58 +01:00
}
2015-12-21 15:22:24 +01:00
}
2016-08-31 16:55:43 +02:00
/// Apply pending insertion updates
2016-08-01 19:10:13 +02:00
pub fn commit ( & self ) {
let mut pending_best_block = self . pending_best_block . write ( ) ;
let mut pending_write_hashes = self . pending_block_hashes . write ( ) ;
2016-10-27 15:26:29 +02:00
let mut pending_block_details = self . pending_block_details . write ( ) ;
2016-08-01 19:10:13 +02:00
let mut pending_write_txs = self . pending_transaction_addresses . write ( ) ;
2016-08-08 13:47:00 +02:00
let mut best_block = self . best_block . write ( ) ;
2016-10-27 15:26:29 +02:00
let mut write_block_details = self . block_details . write ( ) ;
2016-08-08 13:47:00 +02:00
let mut write_hashes = self . block_hashes . write ( ) ;
let mut write_txs = self . transaction_addresses . write ( ) ;
2016-08-01 19:10:13 +02:00
// update best block
if let Some ( block ) = pending_best_block . take ( ) {
* best_block = block ;
}
2016-09-28 15:49:42 +02:00
let pending_txs = mem ::replace ( & mut * pending_write_txs , HashMap ::new ( ) ) ;
let ( retracted_txs , enacted_txs ) = pending_txs . into_iter ( ) . partition ::< HashMap < _ , _ > , _ > ( | & ( _ , ref value ) | value . is_none ( ) ) ;
2016-08-08 16:14:37 +02:00
let pending_hashes_keys : Vec < _ > = pending_write_hashes . keys ( ) . cloned ( ) . collect ( ) ;
2016-09-28 15:49:42 +02:00
let enacted_txs_keys : Vec < _ > = enacted_txs . keys ( ) . cloned ( ) . collect ( ) ;
2016-10-27 15:26:29 +02:00
let pending_block_hashes : Vec < _ > = pending_block_details . keys ( ) . cloned ( ) . collect ( ) ;
2016-08-08 16:14:37 +02:00
2016-08-01 19:10:13 +02:00
write_hashes . extend ( mem ::replace ( & mut * pending_write_hashes , HashMap ::new ( ) ) ) ;
2016-09-28 15:49:42 +02:00
write_txs . extend ( enacted_txs . into_iter ( ) . map ( | ( k , v ) | ( k , v . expect ( " Transactions were partitioned; qed " ) ) ) ) ;
2016-10-27 15:26:29 +02:00
write_block_details . extend ( mem ::replace ( & mut * pending_block_details , HashMap ::new ( ) ) ) ;
2016-09-28 15:49:42 +02:00
for hash in retracted_txs . keys ( ) {
write_txs . remove ( hash ) ;
}
2016-08-08 16:14:37 +02:00
2016-08-08 16:25:48 +02:00
let mut cache_man = self . cache_man . lock ( ) ;
for n in pending_hashes_keys {
2016-12-09 23:01:43 +01:00
cache_man . note_used ( CacheId ::BlockHashes ( n ) ) ;
2016-08-08 16:14:37 +02:00
}
2016-09-28 15:49:42 +02:00
for hash in enacted_txs_keys {
2016-12-09 23:01:43 +01:00
cache_man . note_used ( CacheId ::TransactionAddresses ( hash ) ) ;
2016-08-08 16:14:37 +02:00
}
2016-10-27 15:26:29 +02:00
for hash in pending_block_hashes {
2016-12-09 23:01:43 +01:00
cache_man . note_used ( CacheId ::BlockDetails ( hash ) ) ;
2016-10-27 15:26:29 +02:00
}
2016-08-01 19:10:13 +02:00
}
2016-03-02 18:32:54 +01:00
/// Iterator that lists `first` and then all of `first`'s ancestors, by hash.
2016-03-02 18:05:47 +01:00
pub fn ancestry_iter ( & self , first : H256 ) -> Option < AncestryIter > {
2016-03-02 18:32:54 +01:00
if self . is_known ( & first ) {
Some ( AncestryIter {
current : first ,
2016-07-26 20:31:25 +02:00
chain : self ,
2016-03-02 18:32:54 +01:00
} )
} else {
None
2016-03-02 17:04:44 +01:00
}
}
2016-03-02 19:38:00 +01:00
/// Given a block's `parent`, find every block header which represents a valid possible uncle.
pub fn find_uncle_headers ( & self , parent : & H256 , uncle_generations : usize ) -> Option < Vec < Header > > {
2016-05-24 21:56:17 +02:00
self . find_uncle_hashes ( parent , uncle_generations ) . map ( | v | v . into_iter ( ) . filter_map ( | h | self . block_header ( & h ) ) . collect ( ) )
}
/// Given a block's `parent`, find every block hash which represents a valid possible uncle.
pub fn find_uncle_hashes ( & self , parent : & H256 , uncle_generations : usize ) -> Option < Vec < H256 > > {
2018-03-06 19:44:05 +01:00
if ! self . is_known ( parent ) {
return None ;
}
2016-03-02 19:38:00 +01:00
let mut excluded = HashSet ::new ( ) ;
2018-03-06 19:44:05 +01:00
let ancestry = self . ancestry_iter ( parent . clone ( ) ) ? ;
2016-10-24 18:27:23 +02:00
for a in ancestry . clone ( ) . take ( uncle_generations ) {
if let Some ( uncles ) = self . uncle_hashes ( & a ) {
excluded . extend ( uncles ) ;
excluded . insert ( a ) ;
} else {
break
}
2016-03-02 19:38:00 +01:00
}
let mut ret = Vec ::new ( ) ;
2016-10-24 18:27:23 +02:00
for a in ancestry . skip ( 1 ) . take ( uncle_generations ) {
if let Some ( details ) = self . block_details ( & a ) {
ret . extend ( details . children . iter ( ) . filter ( | h | ! excluded . contains ( h ) ) )
} else {
break
}
2016-03-02 18:32:54 +01:00
}
2016-10-24 18:27:23 +02:00
2016-03-02 19:38:00 +01:00
Some ( ret )
2016-03-01 19:59:12 +01:00
}
2016-02-27 10:19:33 +01:00
/// This function returns modified block hashes.
fn prepare_block_hashes_update ( & self , block_bytes : & [ u8 ] , info : & BlockInfo ) -> HashMap < BlockNumber , H256 > {
let mut block_hashes = HashMap ::new ( ) ;
2016-02-27 01:37:12 +01:00
let block = BlockView ::new ( block_bytes ) ;
let header = block . header_view ( ) ;
let number = header . number ( ) ;
match info . location {
2016-02-27 10:19:33 +01:00
BlockLocation ::Branch = > ( ) ,
2016-02-27 01:37:12 +01:00
BlockLocation ::CanonChain = > {
2018-02-16 10:11:29 +01:00
block_hashes . insert ( number , info . hash ) ;
2016-02-27 01:37:12 +01:00
} ,
2016-04-17 17:18:25 +02:00
BlockLocation ::BranchBecomingCanonChain ( ref data ) = > {
2016-07-28 23:46:24 +02:00
let ancestor_number = self . block_number ( & data . ancestor ) . expect ( " Block number of ancestor is always in DB " ) ;
2016-02-27 01:37:12 +01:00
let start_number = ancestor_number + 1 ;
2016-04-17 17:18:25 +02:00
for ( index , hash ) in data . enacted . iter ( ) . cloned ( ) . enumerate ( ) {
2016-02-27 10:19:33 +01:00
block_hashes . insert ( start_number + index as BlockNumber , hash ) ;
2016-02-27 01:37:12 +01:00
}
2018-02-16 10:11:29 +01:00
block_hashes . insert ( number , info . hash ) ;
2016-02-27 01:37:12 +01:00
}
2016-02-27 10:19:33 +01:00
}
block_hashes
2016-02-27 01:37:12 +01:00
}
2016-02-27 10:19:33 +01:00
/// This function returns modified block details.
2016-08-05 17:00:46 +02:00
/// Uses the given parent details or attempts to load them from the database.
2016-02-27 10:19:33 +01:00
fn prepare_block_details_update ( & self , block_bytes : & [ u8 ] , info : & BlockInfo ) -> HashMap < H256 , BlockDetails > {
2016-02-27 01:37:12 +01:00
let block = BlockView ::new ( block_bytes ) ;
let header = block . header_view ( ) ;
2015-12-17 17:20:10 +01:00
let parent_hash = header . parent_hash ( ) ;
2015-12-17 15:11:42 +01:00
2016-02-27 01:37:12 +01:00
// update parent
2016-07-19 09:23:53 +02:00
let mut parent_details = self . block_details ( & parent_hash ) . unwrap_or_else ( | | panic! ( " Invalid parent hash: {:?} " , parent_hash ) ) ;
2018-02-16 10:11:29 +01:00
parent_details . children . push ( info . hash ) ;
2016-02-27 01:37:12 +01:00
2017-06-28 13:17:36 +02:00
// create current block details.
2015-12-17 15:11:42 +01:00
let details = BlockDetails {
number : header . number ( ) ,
2016-02-27 01:37:12 +01:00
total_difficulty : info . total_difficulty ,
2018-02-16 10:11:29 +01:00
parent : parent_hash ,
2017-06-28 13:17:36 +02:00
children : vec ! [ ] ,
2015-12-17 15:11:42 +01:00
} ;
2015-12-26 15:47:07 +01:00
2016-02-27 01:37:12 +01:00
// write to batch
2016-02-27 10:19:33 +01:00
let mut block_details = HashMap ::new ( ) ;
block_details . insert ( parent_hash , parent_details ) ;
2018-02-16 10:11:29 +01:00
block_details . insert ( info . hash , details ) ;
2016-02-27 10:19:33 +01:00
block_details
2016-02-27 01:37:12 +01:00
}
2015-12-17 15:11:42 +01:00
2016-02-27 10:19:33 +01:00
/// This function returns modified block receipts.
fn prepare_block_receipts_update ( & self , receipts : Vec < Receipt > , info : & BlockInfo ) -> HashMap < H256 , BlockReceipts > {
let mut block_receipts = HashMap ::new ( ) ;
2018-02-16 10:11:29 +01:00
block_receipts . insert ( info . hash , BlockReceipts ::new ( receipts ) ) ;
2016-02-27 10:19:33 +01:00
block_receipts
2016-02-27 01:37:12 +01:00
}
2015-12-21 15:22:24 +01:00
2016-02-27 10:19:33 +01:00
/// This function returns modified transaction addresses.
2016-09-28 15:49:42 +02:00
fn prepare_transaction_addresses_update ( & self , block_bytes : & [ u8 ] , info : & BlockInfo ) -> HashMap < H256 , Option < TransactionAddress > > {
2016-02-27 01:37:12 +01:00
let block = BlockView ::new ( block_bytes ) ;
2016-02-29 19:49:29 +01:00
let transaction_hashes = block . transaction_hashes ( ) ;
2015-12-17 15:11:42 +01:00
2016-08-31 16:55:43 +02:00
match info . location {
BlockLocation ::CanonChain = > {
transaction_hashes . into_iter ( )
. enumerate ( )
. map ( | ( i , tx_hash ) | {
2016-09-28 15:49:42 +02:00
( tx_hash , Some ( TransactionAddress {
2018-02-16 10:11:29 +01:00
block_hash : info . hash ,
2016-08-31 16:55:43 +02:00
index : i
2016-09-28 15:49:42 +02:00
} ) )
2016-08-31 16:55:43 +02:00
} )
. collect ( )
} ,
BlockLocation ::BranchBecomingCanonChain ( ref data ) = > {
let addresses = data . enacted . iter ( )
2016-08-31 17:36:49 +02:00
. flat_map ( | hash | {
2016-12-28 13:44:51 +01:00
let body = self . block_body ( hash ) . expect ( " Enacted block must be in database. " ) ;
let hashes = body . transaction_hashes ( ) ;
2016-08-31 16:55:43 +02:00
hashes . into_iter ( )
. enumerate ( )
2016-09-28 15:49:42 +02:00
. map ( | ( i , tx_hash ) | ( tx_hash , Some ( TransactionAddress {
2018-02-16 10:11:29 +01:00
block_hash : * hash ,
2016-08-31 16:55:43 +02:00
index : i ,
2016-09-28 15:49:42 +02:00
} ) ) )
. collect ::< HashMap < H256 , Option < TransactionAddress > > > ( )
2016-08-31 16:55:43 +02:00
} ) ;
let current_addresses = transaction_hashes . into_iter ( )
. enumerate ( )
. map ( | ( i , tx_hash ) | {
2016-09-28 15:49:42 +02:00
( tx_hash , Some ( TransactionAddress {
2018-02-16 10:11:29 +01:00
block_hash : info . hash ,
2016-08-31 16:55:43 +02:00
index : i
2016-09-28 15:49:42 +02:00
} ) )
2016-08-31 16:55:43 +02:00
} ) ;
2016-09-28 15:49:42 +02:00
let retracted = data . retracted . iter ( ) . flat_map ( | hash | {
2016-12-28 13:44:51 +01:00
let body = self . block_body ( hash ) . expect ( " Retracted block must be in database. " ) ;
let hashes = body . transaction_hashes ( ) ;
2016-09-28 15:49:42 +02:00
hashes . into_iter ( ) . map ( | hash | ( hash , None ) ) . collect ::< HashMap < H256 , Option < TransactionAddress > > > ( )
} ) ;
// The order here is important! Don't remove transaction if it was part of enacted blocks as well.
retracted . chain ( addresses ) . chain ( current_addresses ) . collect ( )
2016-08-31 16:55:43 +02:00
} ,
BlockLocation ::Branch = > HashMap ::new ( ) ,
}
2016-02-27 01:37:12 +01:00
}
2016-02-12 14:03:23 +01:00
2018-03-12 21:15:55 +01:00
/// This functions returns modified blocks blooms.
///
/// To accelerate blooms lookups, blomms are stored in multiple
/// layers (BLOOM_LEVELS, currently 3).
/// ChainFilter is responsible for building and rebuilding these layers.
/// It returns them in HashMap, where values are Blooms and
/// keys are BloomIndexes. BloomIndex represents bloom location on one
/// of these layers.
///
/// To reduce number of queries to databse, block blooms are stored
/// in BlocksBlooms structure which contains info about several
/// (BLOOM_INDEX_SIZE, currently 16) consecutive blocks blooms.
///
/// Later, BloomIndexer is used to map bloom location on filter layer (BloomIndex)
/// to bloom location in database (BlocksBloomLocation).
///
fn prepare_block_blooms_update ( & self , block_bytes : & [ u8 ] , info : & BlockInfo ) -> HashMap < GroupPosition , BloomGroup > {
let block = BlockView ::new ( block_bytes ) ;
let header = block . header_view ( ) ;
let log_blooms = match info . location {
BlockLocation ::Branch = > HashMap ::new ( ) ,
BlockLocation ::CanonChain = > {
let log_bloom = header . log_bloom ( ) ;
if log_bloom . is_zero ( ) {
HashMap ::new ( )
} else {
let chain = bc ::group ::BloomGroupChain ::new ( self . blooms_config , self ) ;
chain . insert ( info . number as bc ::Number , log_bloom )
}
} ,
BlockLocation ::BranchBecomingCanonChain ( ref data ) = > {
let ancestor_number = self . block_number ( & data . ancestor ) . unwrap ( ) ;
let start_number = ancestor_number + 1 ;
let range = start_number as bc ::Number .. self . best_block_number ( ) as bc ::Number ;
let mut blooms : Vec < Bloom > = data . enacted . iter ( )
. map ( | hash | self . block_header_data ( hash ) . unwrap ( ) )
. map ( | h | h . log_bloom ( ) )
. collect ( ) ;
blooms . push ( header . log_bloom ( ) ) ;
let chain = bc ::group ::BloomGroupChain ::new ( self . blooms_config , self ) ;
chain . replace ( & range , blooms )
}
} ;
log_blooms . into_iter ( )
. map ( | p | ( From ::from ( p . 0 ) , From ::from ( p . 1 ) ) )
. collect ( )
}
2015-12-17 17:20:10 +01:00
/// Get best block hash.
2015-12-17 15:11:42 +01:00
pub fn best_block_hash ( & self ) -> H256 {
2018-02-16 10:11:29 +01:00
self . best_block . read ( ) . hash
2015-12-17 15:11:42 +01:00
}
2015-12-17 17:20:10 +01:00
/// Get best block number.
2016-01-11 01:07:58 +01:00
pub fn best_block_number ( & self ) -> BlockNumber {
2016-07-13 19:59:59 +02:00
self . best_block . read ( ) . number
2015-12-17 15:11:42 +01:00
}
2017-02-03 19:32:10 +01:00
/// Get best block timestamp.
pub fn best_block_timestamp ( & self ) -> u64 {
self . best_block . read ( ) . timestamp
}
2015-12-17 17:20:10 +01:00
/// Get best block total difficulty.
2015-12-17 15:11:42 +01:00
pub fn best_block_total_difficulty ( & self ) -> U256 {
2016-07-13 19:59:59 +02:00
self . best_block . read ( ) . total_difficulty
2015-12-16 17:39:15 +01:00
}
2016-07-28 23:46:24 +02:00
/// Get best block header
2016-12-28 13:44:51 +01:00
pub fn best_block_header ( & self ) -> encoded ::Header {
2016-07-28 23:46:24 +02:00
let block = self . best_block . read ( ) ;
2016-12-28 13:44:51 +01:00
let raw = BlockView ::new ( & block . block ) . header_view ( ) . rlp ( ) . as_raw ( ) . to_vec ( ) ;
encoded ::Header ::new ( raw )
2016-07-28 23:46:24 +02:00
}
2015-12-17 17:20:10 +01:00
/// Get current cache size.
2015-12-16 17:39:15 +01:00
pub fn cache_size ( & self ) -> CacheSize {
CacheSize {
2016-07-28 23:46:24 +02:00
blocks : self . block_headers . read ( ) . heap_size_of_children ( ) + self . block_bodies . read ( ) . heap_size_of_children ( ) ,
2016-07-13 19:59:59 +02:00
block_details : self . block_details . read ( ) . heap_size_of_children ( ) ,
transaction_addresses : self . transaction_addresses . read ( ) . heap_size_of_children ( ) ,
2018-03-12 21:15:55 +01:00
blocks_blooms : self . blocks_blooms . read ( ) . heap_size_of_children ( ) ,
2016-07-13 19:59:59 +02:00
block_receipts : self . block_receipts . read ( ) . heap_size_of_children ( ) ,
2015-12-16 17:39:15 +01:00
}
}
2016-01-18 15:48:38 +01:00
/// Ticks our cache system and throws out any old data.
2016-02-02 01:59:14 +01:00
pub fn collect_garbage ( & self ) {
2016-08-08 16:14:37 +02:00
let current_size = self . cache_size ( ) . total ( ) ;
2016-07-31 00:19:27 +02:00
2016-08-08 16:14:37 +02:00
let mut block_headers = self . block_headers . write ( ) ;
let mut block_bodies = self . block_bodies . write ( ) ;
let mut block_details = self . block_details . write ( ) ;
let mut block_hashes = self . block_hashes . write ( ) ;
let mut transaction_addresses = self . transaction_addresses . write ( ) ;
2018-03-12 21:15:55 +01:00
let mut blocks_blooms = self . blocks_blooms . write ( ) ;
2016-08-08 16:14:37 +02:00
let mut block_receipts = self . block_receipts . write ( ) ;
2016-08-08 16:25:48 +02:00
let mut cache_man = self . cache_man . lock ( ) ;
2016-08-08 16:14:37 +02:00
cache_man . collect_garbage ( current_size , | ids | {
2016-07-31 00:19:27 +02:00
for id in & ids {
match * id {
2016-12-09 23:01:43 +01:00
CacheId ::BlockHeader ( ref h ) = > { block_headers . remove ( h ) ; } ,
CacheId ::BlockBody ( ref h ) = > { block_bodies . remove ( h ) ; } ,
CacheId ::BlockDetails ( ref h ) = > { block_details . remove ( h ) ; }
CacheId ::BlockHashes ( ref h ) = > { block_hashes . remove ( h ) ; }
CacheId ::TransactionAddresses ( ref h ) = > { transaction_addresses . remove ( h ) ; }
2018-03-12 21:15:55 +01:00
CacheId ::BlocksBlooms ( ref h ) = > { blocks_blooms . remove ( h ) ; }
2016-12-09 23:01:43 +01:00
CacheId ::BlockReceipts ( ref h ) = > { block_receipts . remove ( h ) ; }
2016-02-02 01:59:14 +01:00
}
2016-01-18 19:23:28 +01:00
}
2016-08-08 16:14:37 +02:00
2016-07-31 00:19:27 +02:00
block_headers . shrink_to_fit ( ) ;
block_bodies . shrink_to_fit ( ) ;
block_details . shrink_to_fit ( ) ;
block_hashes . shrink_to_fit ( ) ;
transaction_addresses . shrink_to_fit ( ) ;
2018-03-12 21:15:55 +01:00
blocks_blooms . shrink_to_fit ( ) ;
2016-07-31 00:19:27 +02:00
block_receipts . shrink_to_fit ( ) ;
2016-08-08 16:14:37 +02:00
block_headers . heap_size_of_children ( ) +
block_bodies . heap_size_of_children ( ) +
block_details . heap_size_of_children ( ) +
block_hashes . heap_size_of_children ( ) +
transaction_addresses . heap_size_of_children ( ) +
2018-03-12 21:15:55 +01:00
blocks_blooms . heap_size_of_children ( ) +
2016-08-08 16:14:37 +02:00
block_receipts . heap_size_of_children ( )
2016-07-31 00:19:27 +02:00
} ) ;
2016-01-18 15:48:38 +01:00
}
2016-07-28 23:46:24 +02:00
/// Create a block body from a block.
pub fn block_to_body ( block : & [ u8 ] ) -> Bytes {
let mut body = RlpStream ::new_list ( 2 ) ;
let block_rlp = Rlp ::new ( block ) ;
body . append_raw ( block_rlp . at ( 1 ) . as_raw ( ) , 1 ) ;
body . append_raw ( block_rlp . at ( 2 ) . as_raw ( ) , 1 ) ;
body . out ( )
}
2016-10-18 18:16:00 +02:00
/// Returns general blockchain information
pub fn chain_info ( & self ) -> BlockChainInfo {
// ensure data consistencly by locking everything first
let best_block = self . best_block . read ( ) ;
let best_ancient_block = self . best_ancient_block . read ( ) ;
BlockChainInfo {
total_difficulty : best_block . total_difficulty . clone ( ) ,
pending_total_difficulty : best_block . total_difficulty . clone ( ) ,
genesis_hash : self . genesis_hash ( ) ,
2018-02-16 10:11:29 +01:00
best_block_hash : best_block . hash ,
2016-10-18 18:16:00 +02:00
best_block_number : best_block . number ,
2017-02-03 19:32:10 +01:00
best_block_timestamp : best_block . timestamp ,
2016-10-18 18:16:00 +02:00
first_block_hash : self . first_block ( ) ,
first_block_number : From ::from ( self . first_block_number ( ) ) ,
2018-02-16 10:11:29 +01:00
ancient_block_hash : best_ancient_block . as_ref ( ) . map ( | b | b . hash ) ,
2016-10-18 18:16:00 +02:00
ancient_block_number : best_ancient_block . as_ref ( ) . map ( | b | b . number ) ,
}
}
2015-12-09 19:03:25 +01:00
}
2015-12-13 22:39:01 +01:00
2015-12-17 17:20:10 +01:00
#[ cfg(test) ]
mod tests {
2018-02-16 10:11:29 +01:00
use std ::iter ;
2016-07-28 23:46:24 +02:00
use std ::sync ::Arc ;
2017-07-06 11:26:14 +02:00
use rustc_hex ::FromHex ;
2017-08-30 19:18:28 +02:00
use hash ::keccak ;
2017-10-12 15:36:27 +02:00
use kvdb ::KeyValueDB ;
2017-10-15 16:17:15 +02:00
use kvdb_memorydb ;
2018-01-10 13:35:18 +01:00
use ethereum_types ::* ;
2017-09-21 10:11:53 +02:00
use receipt ::{ Receipt , TransactionOutcome } ;
2016-05-26 18:24:51 +02:00
use blockchain ::{ BlockProvider , BlockChain , Config , ImportRoute } ;
2018-03-12 18:05:52 +01:00
use tests ::helpers ::{
generate_dummy_blockchain , generate_dummy_blockchain_with_extra ,
generate_dummy_empty_blockchain
} ;
2018-02-16 10:11:29 +01:00
use blockchain ::generator ::{ BlockGenerator , BlockBuilder , BlockOptions } ;
2016-08-31 16:55:43 +02:00
use blockchain ::extras ::TransactionAddress ;
use transaction ::{ Transaction , Action } ;
2016-09-14 12:02:30 +02:00
use log_entry ::{ LogEntry , LocalizedLogEntry } ;
2017-01-11 12:16:47 +01:00
use ethkey ::Secret ;
2016-07-28 23:46:24 +02:00
2017-02-20 17:21:55 +01:00
fn new_db ( ) -> Arc < KeyValueDB > {
2017-10-15 16:17:15 +02:00
Arc ::new ( kvdb_memorydb ::create ( ::db ::NUM_COLUMNS . unwrap_or ( 0 ) ) )
2016-07-28 23:46:24 +02:00
}
2017-02-20 17:21:55 +01:00
fn new_chain ( genesis : & [ u8 ] , db : Arc < KeyValueDB > ) -> BlockChain {
2017-01-23 15:27:11 +01:00
BlockChain ::new ( Config ::default ( ) , genesis , db )
2016-12-05 16:20:32 +01:00
}
2016-07-28 23:46:24 +02:00
#[ test ]
fn should_cache_best_block ( ) {
// given
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let first = genesis . add_block ( ) ;
2016-07-28 23:46:24 +02:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
2016-07-28 23:46:24 +02:00
assert_eq! ( bc . best_block_number ( ) , 0 ) ;
// when
2017-02-20 17:21:55 +01:00
let mut batch = db . transaction ( ) ;
2018-02-16 10:11:29 +01:00
bc . insert_block ( & mut batch , & first . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
assert_eq! ( bc . best_block_number ( ) , 0 ) ;
bc . commit ( ) ;
2016-07-28 23:46:24 +02:00
// NOTE no db.write here (we want to check if best block is cached)
// then
assert_eq! ( bc . best_block_number ( ) , 1 ) ;
assert! ( bc . block ( & bc . best_block_hash ( ) ) . is_some ( ) , " Best block should be queryable even without DB write. " ) ;
}
2015-12-17 17:20:10 +01:00
#[ test ]
2016-03-01 13:44:09 +01:00
fn basic_blockchain_insert ( ) {
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let first = genesis . add_block ( ) ;
let genesis = genesis . last ( ) ;
let first = first . last ( ) ;
let genesis_hash = genesis . hash ( ) ;
let first_hash = first . hash ( ) ;
2015-12-17 17:20:10 +01:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . encoded ( ) , db . clone ( ) ) ;
2015-12-26 15:47:07 +01:00
2018-02-16 10:11:29 +01:00
assert_eq! ( bc . genesis_hash ( ) , genesis_hash ) ;
assert_eq! ( bc . best_block_hash ( ) , genesis_hash ) ;
assert_eq! ( bc . block_hash ( 0 ) , Some ( genesis_hash ) ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_hash ( 1 ) , None ) ;
2016-02-12 00:40:45 +01:00
assert_eq! ( bc . block_details ( & genesis_hash ) . unwrap ( ) . children , vec! [ ] ) ;
2015-12-17 17:20:10 +01:00
2017-02-20 17:21:55 +01:00
let mut batch = db . transaction ( ) ;
2018-02-16 10:11:29 +01:00
bc . insert_block ( & mut batch , & first . encoded ( ) , vec! [ ] ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2015-12-17 17:20:10 +01:00
2018-02-16 10:11:29 +01:00
assert_eq! ( bc . block_hash ( 0 ) , Some ( genesis_hash ) ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . best_block_number ( ) , 1 ) ;
2018-02-16 10:11:29 +01:00
assert_eq! ( bc . best_block_hash ( ) , first_hash ) ;
assert_eq! ( bc . block_hash ( 1 ) , Some ( first_hash ) ) ;
assert_eq! ( bc . block_details ( & first_hash ) . unwrap ( ) . parent , genesis_hash ) ;
assert_eq! ( bc . block_details ( & genesis_hash ) . unwrap ( ) . children , vec! [ first_hash ] ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_hash ( 2 ) , None ) ;
2015-12-17 17:20:10 +01:00
}
2015-12-21 16:31:51 +01:00
2016-03-02 17:31:42 +01:00
#[ test ]
fn check_ancestry_iter ( ) {
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let first_10 = genesis . add_blocks ( 10 ) ;
let generator = BlockGenerator ::new ( vec! [ first_10 ] ) ;
2016-03-02 17:31:42 +01:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
2016-03-02 17:31:42 +01:00
2018-02-16 10:11:29 +01:00
let mut block_hashes = vec! [ genesis . last ( ) . hash ( ) ] ;
2016-09-14 12:02:30 +02:00
let mut batch = db . transaction ( ) ;
2018-02-16 10:11:29 +01:00
for block in generator {
block_hashes . push ( block . hash ( ) ) ;
bc . insert_block ( & mut batch , & block . encoded ( ) , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-03-02 17:31:42 +01:00
}
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-03-02 17:31:42 +01:00
block_hashes . reverse ( ) ;
2018-02-16 10:11:29 +01:00
assert_eq! ( bc . ancestry_iter ( block_hashes [ 0 ] . clone ( ) ) . unwrap ( ) . collect ::< Vec < _ > > ( ) , block_hashes ) ;
assert_eq! ( block_hashes . len ( ) , 11 ) ;
2016-03-02 17:31:42 +01:00
}
2015-12-21 16:31:51 +01:00
#[ test ]
2016-03-02 23:41:15 +01:00
fn test_find_uncles ( ) {
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let b1a = genesis . add_block ( ) ;
let b2a = b1a . add_block ( ) ;
let b3a = b2a . add_block ( ) ;
let b4a = b3a . add_block ( ) ;
let b5a = b4a . add_block ( ) ;
let b1b = genesis . add_block_with_difficulty ( 9 ) ;
let b2b = b1a . add_block_with_difficulty ( 9 ) ;
let b3b = b2a . add_block_with_difficulty ( 9 ) ;
let b4b = b3a . add_block_with_difficulty ( 9 ) ;
let b5b = b4a . add_block_with_difficulty ( 9 ) ;
let uncle_headers = vec! [ b4b . last ( ) . header ( ) , b3b . last ( ) . header ( ) , b2b . last ( ) . header ( ) ] ;
let b4a_hash = b4a . last ( ) . hash ( ) ;
let generator = BlockGenerator ::new (
vec! [ b1a , b1b , b2a , b2b , b3a , b3b , b4a , b4b , b5a , b5b ]
) ;
2016-03-01 16:22:06 +01:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
2016-07-28 23:46:24 +02:00
2018-02-16 10:11:29 +01:00
let mut batch = db . transaction ( ) ;
for b in generator {
bc . insert_block ( & mut batch , & b . encoded ( ) , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
}
2016-03-02 23:41:15 +01:00
2018-02-16 10:11:29 +01:00
db . write ( batch ) . unwrap ( ) ;
2016-03-02 23:41:15 +01:00
2018-02-16 10:11:29 +01:00
assert_eq! ( uncle_headers , bc . find_uncle_headers ( & b4a_hash , 3 ) . unwrap ( ) ) ;
2016-03-02 23:41:15 +01:00
// TODO: insert block that already includes one of them as an uncle to check it's not allowed.
}
2017-01-11 12:16:47 +01:00
fn secret ( ) -> Secret {
2017-08-30 19:18:28 +02:00
keccak ( " " ) . into ( )
2017-01-11 12:16:47 +01:00
}
2016-09-28 15:49:42 +02:00
#[ test ]
fn test_fork_transaction_addresses ( ) {
let t1 = Transaction {
nonce : 0. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
value : 100. into ( ) ,
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-09-28 15:49:42 +02:00
2018-02-16 10:11:29 +01:00
let t1_hash = t1 . hash ( ) ;
2016-09-28 15:49:42 +02:00
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let b1a = genesis . add_block_with_transactions ( iter ::once ( t1 ) ) ;
let b1b = genesis . add_block_with_difficulty ( 9 ) ;
let b2 = b1b . add_block ( ) ;
2016-09-28 15:49:42 +02:00
2018-02-16 10:11:29 +01:00
let b1a_hash = b1a . last ( ) . hash ( ) ;
let b2_hash = b2 . last ( ) . hash ( ) ;
2016-09-28 15:49:42 +02:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
2016-09-28 15:49:42 +02:00
let mut batch = db . transaction ( ) ;
2018-02-16 10:11:29 +01:00
let _ = bc . insert_block ( & mut batch , & b1a . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-09-28 15:49:42 +02:00
bc . commit ( ) ;
2018-02-16 10:11:29 +01:00
let _ = bc . insert_block ( & mut batch , & b1b . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-09-28 15:49:42 +02:00
bc . commit ( ) ;
db . write ( batch ) . unwrap ( ) ;
assert_eq! ( bc . best_block_hash ( ) , b1a_hash ) ;
assert_eq! ( bc . transaction_address ( & t1_hash ) , Some ( TransactionAddress {
2018-02-16 10:11:29 +01:00
block_hash : b1a_hash ,
2016-09-28 15:49:42 +02:00
index : 0 ,
} ) ) ;
// now let's make forked chain the canon chain
let mut batch = db . transaction ( ) ;
2018-02-16 10:11:29 +01:00
let _ = bc . insert_block ( & mut batch , & b2 . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-09-28 15:49:42 +02:00
bc . commit ( ) ;
db . write ( batch ) . unwrap ( ) ;
// Transaction should be retracted
assert_eq! ( bc . best_block_hash ( ) , b2_hash ) ;
assert_eq! ( bc . transaction_address ( & t1_hash ) , None ) ;
}
2016-08-31 16:55:43 +02:00
#[ test ]
fn test_overwriting_transaction_addresses ( ) {
let t1 = Transaction {
nonce : 0. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
value : 100. into ( ) ,
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-08-31 16:55:43 +02:00
let t2 = Transaction {
nonce : 1. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
value : 100. into ( ) ,
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-08-31 16:55:43 +02:00
let t3 = Transaction {
nonce : 2. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
value : 100. into ( ) ,
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-08-31 16:55:43 +02:00
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let b1a = genesis . add_block_with_transactions ( vec! [ t1 . clone ( ) , t2 . clone ( ) ] ) ;
// insert transactions in different order,
// the block has lower difficulty, so the hash is also different
let b1b = genesis . add_block_with ( | | BlockOptions {
difficulty : 9. into ( ) ,
transactions : vec ! [ t2 . clone ( ) , t1 . clone ( ) ] ,
.. Default ::default ( )
} ) ;
let b2 = b1b . add_block_with_transactions ( iter ::once ( t3 . clone ( ) ) ) ;
2016-08-31 16:55:43 +02:00
2018-02-16 10:11:29 +01:00
let b1a_hash = b1a . last ( ) . hash ( ) ;
let b1b_hash = b1b . last ( ) . hash ( ) ;
let b2_hash = b2 . last ( ) . hash ( ) ;
2016-08-31 16:55:43 +02:00
let t1_hash = t1 . hash ( ) ;
let t2_hash = t2 . hash ( ) ;
let t3_hash = t3 . hash ( ) ;
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
2016-08-31 16:55:43 +02:00
let mut batch = db . transaction ( ) ;
2018-02-16 10:11:29 +01:00
let _ = bc . insert_block ( & mut batch , & b1a . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-08-31 16:55:43 +02:00
bc . commit ( ) ;
2018-02-16 10:11:29 +01:00
let _ = bc . insert_block ( & mut batch , & b1b . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-08-31 16:55:43 +02:00
bc . commit ( ) ;
db . write ( batch ) . unwrap ( ) ;
assert_eq! ( bc . best_block_hash ( ) , b1a_hash ) ;
2016-09-28 15:49:42 +02:00
assert_eq! ( bc . transaction_address ( & t1_hash ) , Some ( TransactionAddress {
2018-02-16 10:11:29 +01:00
block_hash : b1a_hash ,
2016-08-31 16:55:43 +02:00
index : 0 ,
2016-09-28 15:49:42 +02:00
} ) ) ;
assert_eq! ( bc . transaction_address ( & t2_hash ) , Some ( TransactionAddress {
2018-02-16 10:11:29 +01:00
block_hash : b1a_hash ,
2016-08-31 16:55:43 +02:00
index : 1 ,
2016-09-28 15:49:42 +02:00
} ) ) ;
2016-08-31 16:55:43 +02:00
// now let's make forked chain the canon chain
let mut batch = db . transaction ( ) ;
2018-02-16 10:11:29 +01:00
let _ = bc . insert_block ( & mut batch , & b2 . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-08-31 16:55:43 +02:00
bc . commit ( ) ;
db . write ( batch ) . unwrap ( ) ;
assert_eq! ( bc . best_block_hash ( ) , b2_hash ) ;
2016-09-28 15:49:42 +02:00
assert_eq! ( bc . transaction_address ( & t1_hash ) , Some ( TransactionAddress {
2018-02-16 10:11:29 +01:00
block_hash : b1b_hash ,
2016-08-31 16:55:43 +02:00
index : 1 ,
2016-09-28 15:49:42 +02:00
} ) ) ;
assert_eq! ( bc . transaction_address ( & t2_hash ) , Some ( TransactionAddress {
2018-02-16 10:11:29 +01:00
block_hash : b1b_hash ,
2016-08-31 16:55:43 +02:00
index : 0 ,
2016-09-28 15:49:42 +02:00
} ) ) ;
assert_eq! ( bc . transaction_address ( & t3_hash ) , Some ( TransactionAddress {
2018-02-16 10:11:29 +01:00
block_hash : b2_hash ,
2016-08-31 16:55:43 +02:00
index : 0 ,
2016-09-28 15:49:42 +02:00
} ) ) ;
2016-08-31 16:55:43 +02:00
}
2015-12-21 16:31:51 +01:00
#[ test ]
fn test_small_fork ( ) {
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let b1 = genesis . add_block ( ) ;
let b2 = b1 . add_block ( ) ;
let b3a = b2 . add_block ( ) ;
let b3b = b2 . add_block_with_difficulty ( 9 ) ;
let genesis_hash = genesis . last ( ) . hash ( ) ;
let b1_hash = b1 . last ( ) . hash ( ) ;
let b2_hash = b2 . last ( ) . hash ( ) ;
let b3a_hash = b3a . last ( ) . hash ( ) ;
let b3b_hash = b3b . last ( ) . hash ( ) ;
2015-12-21 16:31:51 +01:00
// b3a is a part of canon chain, whereas b3b is part of sidechain
2018-02-16 10:11:29 +01:00
let best_block_hash = b3a_hash ;
2015-12-21 16:31:51 +01:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
2016-07-28 23:46:24 +02:00
2016-08-31 16:55:43 +02:00
let mut batch = db . transaction ( ) ;
2018-02-16 10:11:29 +01:00
let ir1 = bc . insert_block ( & mut batch , & b1 . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2018-02-16 10:11:29 +01:00
let ir2 = bc . insert_block ( & mut batch , & b2 . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2018-02-16 10:11:29 +01:00
let ir3b = bc . insert_block ( & mut batch , & b3b . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-08-03 10:35:04 +02:00
assert_eq! ( bc . block_hash ( 3 ) . unwrap ( ) , b3b_hash ) ;
2018-02-16 10:11:29 +01:00
let mut batch = db . transaction ( ) ;
let ir3a = bc . insert_block ( & mut batch , & b3a . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-03-09 21:55:23 +01:00
assert_eq! ( ir1 , ImportRoute {
2016-03-10 10:17:17 +01:00
enacted : vec ! [ b1_hash ] ,
retracted : vec ! [ ] ,
2016-04-24 23:16:06 +02:00
omitted : vec ! [ ] ,
2016-03-09 21:55:23 +01:00
} ) ;
assert_eq! ( ir2 , ImportRoute {
2016-03-10 10:17:17 +01:00
enacted : vec ! [ b2_hash ] ,
retracted : vec ! [ ] ,
2016-04-24 23:16:06 +02:00
omitted : vec ! [ ] ,
2016-03-09 21:55:23 +01:00
} ) ;
assert_eq! ( ir3b , ImportRoute {
2016-03-10 10:17:17 +01:00
enacted : vec ! [ b3b_hash ] ,
retracted : vec ! [ ] ,
2016-04-24 23:16:06 +02:00
omitted : vec ! [ ] ,
2016-03-09 21:55:23 +01:00
} ) ;
assert_eq! ( ir3a , ImportRoute {
2016-03-10 10:17:17 +01:00
enacted : vec ! [ b3a_hash ] ,
retracted : vec ! [ b3b_hash ] ,
2016-04-24 23:16:06 +02:00
omitted : vec ! [ ] ,
2016-03-09 21:55:23 +01:00
} ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( bc . best_block_hash ( ) , best_block_hash ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_number ( & genesis_hash ) . unwrap ( ) , 0 ) ;
assert_eq! ( bc . block_number ( & b1_hash ) . unwrap ( ) , 1 ) ;
assert_eq! ( bc . block_number ( & b2_hash ) . unwrap ( ) , 2 ) ;
assert_eq! ( bc . block_number ( & b3a_hash ) . unwrap ( ) , 3 ) ;
assert_eq! ( bc . block_number ( & b3b_hash ) . unwrap ( ) , 3 ) ;
assert_eq! ( bc . block_hash ( 0 ) . unwrap ( ) , genesis_hash ) ;
assert_eq! ( bc . block_hash ( 1 ) . unwrap ( ) , b1_hash ) ;
assert_eq! ( bc . block_hash ( 2 ) . unwrap ( ) , b2_hash ) ;
assert_eq! ( bc . block_hash ( 3 ) . unwrap ( ) , b3a_hash ) ;
2015-12-21 16:31:51 +01:00
// test trie route
2018-02-16 10:11:29 +01:00
let r0_1 = bc . tree_route ( genesis_hash , b1_hash ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r0_1 . ancestor , genesis_hash ) ;
2018-02-16 10:11:29 +01:00
assert_eq! ( r0_1 . blocks , [ b1_hash ] ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r0_1 . index , 0 ) ;
2018-02-16 10:11:29 +01:00
let r0_2 = bc . tree_route ( genesis_hash , b2_hash ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r0_2 . ancestor , genesis_hash ) ;
2018-02-16 10:11:29 +01:00
assert_eq! ( r0_2 . blocks , [ b1_hash , b2_hash ] ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r0_2 . index , 0 ) ;
2018-02-16 10:11:29 +01:00
let r1_3a = bc . tree_route ( b1_hash , b3a_hash ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r1_3a . ancestor , b1_hash ) ;
2018-02-16 10:11:29 +01:00
assert_eq! ( r1_3a . blocks , [ b2_hash , b3a_hash ] ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r1_3a . index , 0 ) ;
2018-02-16 10:11:29 +01:00
let r1_3b = bc . tree_route ( b1_hash , b3b_hash ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r1_3b . ancestor , b1_hash ) ;
2018-02-16 10:11:29 +01:00
assert_eq! ( r1_3b . blocks , [ b2_hash , b3b_hash ] ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r1_3b . index , 0 ) ;
2018-02-16 10:11:29 +01:00
let r3a_3b = bc . tree_route ( b3a_hash , b3b_hash ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3a_3b . ancestor , b2_hash ) ;
2018-02-16 10:11:29 +01:00
assert_eq! ( r3a_3b . blocks , [ b3a_hash , b3b_hash ] ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3a_3b . index , 1 ) ;
2018-02-16 10:11:29 +01:00
let r1_0 = bc . tree_route ( b1_hash , genesis_hash ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r1_0 . ancestor , genesis_hash ) ;
2018-02-16 10:11:29 +01:00
assert_eq! ( r1_0 . blocks , [ b1_hash ] ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r1_0 . index , 1 ) ;
2018-02-16 10:11:29 +01:00
let r2_0 = bc . tree_route ( b2_hash , genesis_hash ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r2_0 . ancestor , genesis_hash ) ;
2018-02-16 10:11:29 +01:00
assert_eq! ( r2_0 . blocks , [ b2_hash , b1_hash ] ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r2_0 . index , 2 ) ;
2015-12-26 15:47:07 +01:00
2018-02-16 10:11:29 +01:00
let r3a_1 = bc . tree_route ( b3a_hash , b1_hash ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3a_1 . ancestor , b1_hash ) ;
2018-02-16 10:11:29 +01:00
assert_eq! ( r3a_1 . blocks , [ b3a_hash , b2_hash ] ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3a_1 . index , 2 ) ;
2018-02-16 10:11:29 +01:00
let r3b_1 = bc . tree_route ( b3b_hash , b1_hash ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3b_1 . ancestor , b1_hash ) ;
2018-02-16 10:11:29 +01:00
assert_eq! ( r3b_1 . blocks , [ b3b_hash , b2_hash ] ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3b_1 . index , 2 ) ;
2018-02-16 10:11:29 +01:00
let r3b_3a = bc . tree_route ( b3b_hash , b3a_hash ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3b_3a . ancestor , b2_hash ) ;
2018-02-16 10:11:29 +01:00
assert_eq! ( r3b_3a . blocks , [ b3b_hash , b3a_hash ] ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3b_3a . index , 1 ) ;
}
2015-12-21 16:38:31 +01:00
#[ test ]
fn test_reopen_blockchain_db ( ) {
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let first = genesis . add_block ( ) ;
let genesis_hash = genesis . last ( ) . hash ( ) ;
let first_hash = first . last ( ) . hash ( ) ;
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2015-12-21 16:38:31 +01:00
{
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
2015-12-21 16:38:31 +01:00
assert_eq! ( bc . best_block_hash ( ) , genesis_hash ) ;
2018-02-16 10:11:29 +01:00
let mut batch = db . transaction ( ) ;
bc . insert_block ( & mut batch , & first . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-03-02 04:25:03 +01:00
assert_eq! ( bc . best_block_hash ( ) , first_hash ) ;
2015-12-21 16:38:31 +01:00
}
{
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
2016-07-28 23:46:24 +02:00
2016-03-02 04:25:03 +01:00
assert_eq! ( bc . best_block_hash ( ) , first_hash ) ;
2015-12-21 16:38:31 +01:00
}
}
2016-01-27 17:32:53 +01:00
#[ test ]
2016-01-27 18:31:14 +01:00
fn can_contain_arbitrary_block_sequence ( ) {
2017-04-06 19:26:17 +02:00
let bc = generate_dummy_blockchain ( 50 ) ;
2016-01-27 18:31:14 +01:00
assert_eq! ( bc . best_block_number ( ) , 49 ) ;
2016-01-27 17:32:53 +01:00
}
2016-01-28 11:55:03 +01:00
#[ test ]
fn can_collect_garbage ( ) {
2017-04-06 19:26:17 +02:00
let bc = generate_dummy_blockchain ( 3000 ) ;
2016-01-28 15:38:42 +01:00
2016-01-28 11:55:03 +01:00
assert_eq! ( bc . best_block_number ( ) , 2999 ) ;
let best_hash = bc . best_block_hash ( ) ;
let mut block_header = bc . block_header ( & best_hash ) ;
while ! block_header . is_none ( ) {
2016-09-16 23:03:26 +02:00
block_header = bc . block_header ( block_header . unwrap ( ) . parent_hash ( ) ) ;
2016-01-28 11:55:03 +01:00
}
assert! ( bc . cache_size ( ) . blocks > 1024 * 1024 ) ;
2016-01-29 16:28:13 +01:00
for _ in 0 .. 2 {
2016-02-02 01:59:14 +01:00
bc . collect_garbage ( ) ;
2016-01-29 16:28:13 +01:00
}
2016-01-28 11:55:03 +01:00
assert! ( bc . cache_size ( ) . blocks < 1024 * 1024 ) ;
}
2016-01-28 15:38:42 +01:00
#[ test ]
fn can_contain_arbitrary_block_sequence_with_extra ( ) {
2017-04-06 19:26:17 +02:00
let bc = generate_dummy_blockchain_with_extra ( 25 ) ;
2016-01-28 15:38:42 +01:00
assert_eq! ( bc . best_block_number ( ) , 24 ) ;
}
#[ test ]
fn can_contain_only_genesis_block ( ) {
2017-04-06 19:26:17 +02:00
let bc = generate_dummy_empty_blockchain ( ) ;
2016-01-28 15:38:42 +01:00
assert_eq! ( bc . best_block_number ( ) , 0 ) ;
}
2016-02-08 15:53:22 +01:00
#[ test ]
fn find_transaction_by_hash ( ) {
let genesis = " f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0af81e09f8c46ca322193edfda764fa7e88e81923f802f1d325ec0b0308ac2cd0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200008083023e38808454c98c8142a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421880102030405060708c0c0 " . from_hex ( ) . unwrap ( ) ;
let b1 = " f904a8f901faa0ce1f26f798dd03c8782d63b3e42e79a64eaea5694ea686ac5d7ce3df5171d1aea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0a65c2364cd0f1542d761823dc0109c6b072f14c20459598c5455c274601438f4a070616ebd7ad2ed6fb7860cf7e9df00163842351c38a87cac2c1cb193895035a2a05c5b4fc43c2d45787f54e1ae7d27afdb4ad16dfc567c5692070d5c4556e0b1d7b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000183023ec683021536845685109780a029f07836e4e59229b3a065913afc27702642c683bba689910b2b2fd45db310d3888957e6d004a31802f902a7f85f800a8255f094aaaf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca0575da4e21b66fa764be5f74da9389e67693d066fb0d1312e19e17e501da00ecda06baf5a5327595f6619dfc2fcb3f2e6fb410b5810af3cb52d0e7508038e91a188f85f010a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba04fa966bf34b93abc1bcd665554b7f316b50f928477b50be0f3285ead29d18c5ba017bba0eeec1625ab433746955e125d46d80b7fdc97386c51266f842d8e02192ef85f020a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca004377418ae981cc32b1312b4a427a1d69a821b28db8584f5f2bd8c6d42458adaa053a1dba1af177fac92f3b6af0a9fa46a22adf56e686c93794b6a012bf254abf5f85f030a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca04fe13febd28a05f4fcb2f451d7ddc2dda56486d9f8c79a62b0ba4da775122615a0651b2382dd402df9ebc27f8cb4b2e0f3cea68dda2dca0ee9603608f0b6f51668f85f040a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba078e6a0ba086a08f8450e208a399bb2f2d2a0d984acd2517c7c7df66ccfab567da013254002cd45a97fac049ae00afbc43ed0d9961d0c56a3b2382c80ce41c198ddf85f050a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba0a7174d8f43ea71c8e3ca9477691add8d80ac8e0ed89d8d8b572041eef81f4a54a0534ea2e28ec4da3b5b944b18c51ec84a5cf35f5b3343c5fb86521fd2d388f506f85f060a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba034bd04065833536a10c77ee2a43a5371bc6d34837088b861dd9d4b7f44074b59a078807715786a13876d3455716a6b9cb2186b7a4887a5c31160fc877454958616c0 " . from_hex ( ) . unwrap ( ) ;
2016-08-31 16:55:43 +02:00
let b1_hash : H256 = " f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3 " . into ( ) ;
2016-02-08 15:53:22 +01:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-08-25 16:43:56 +02:00
let mut batch = db . transaction ( ) ;
bc . insert_block ( & mut batch , & b1 , vec! [ ] ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-03-01 13:44:09 +01:00
2016-02-08 15:53:22 +01:00
let transactions = bc . transactions ( & b1_hash ) . unwrap ( ) ;
assert_eq! ( transactions . len ( ) , 7 ) ;
for t in transactions {
2016-02-10 22:16:25 +01:00
assert_eq! ( bc . transaction ( & bc . transaction_address ( & t . hash ( ) ) . unwrap ( ) ) . unwrap ( ) , t ) ;
2016-02-08 15:53:22 +01:00
}
}
2016-02-16 11:41:34 +01:00
2017-02-20 17:21:55 +01:00
fn insert_block ( db : & Arc < KeyValueDB > , bc : & BlockChain , bytes : & [ u8 ] , receipts : Vec < Receipt > ) -> ImportRoute {
2016-09-14 12:02:30 +02:00
let mut batch = db . transaction ( ) ;
2016-08-25 16:43:56 +02:00
let res = bc . insert_block ( & mut batch , bytes , receipts ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-07-28 23:46:24 +02:00
res
}
2016-09-14 12:02:30 +02:00
#[ test ]
fn test_logs ( ) {
let t1 = Transaction {
nonce : 0. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
2016-09-30 11:14:30 +02:00
value : 101. into ( ) ,
2016-09-14 12:02:30 +02:00
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-09-14 12:02:30 +02:00
let t2 = Transaction {
nonce : 0. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
2016-09-30 11:14:30 +02:00
value : 102. into ( ) ,
2016-09-14 12:02:30 +02:00
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-09-14 12:02:30 +02:00
let t3 = Transaction {
nonce : 0. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
2016-09-30 11:14:30 +02:00
value : 103. into ( ) ,
2016-09-14 12:02:30 +02:00
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-09-14 12:02:30 +02:00
let tx_hash1 = t1 . hash ( ) ;
let tx_hash2 = t2 . hash ( ) ;
let tx_hash3 = t3 . hash ( ) ;
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let b1 = genesis . add_block_with_transactions ( vec! [ t1 , t2 ] ) ;
let b2 = b1 . add_block_with_transactions ( iter ::once ( t3 ) ) ;
let b1_hash = b1 . last ( ) . hash ( ) ;
let b1_number = b1 . last ( ) . number ( ) ;
let b2_hash = b2 . last ( ) . hash ( ) ;
let b2_number = b2 . last ( ) . number ( ) ;
2016-09-14 12:02:30 +02:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
insert_block ( & db , & bc , & b1 . last ( ) . encoded ( ) , vec! [ Receipt {
2017-09-21 10:11:53 +02:00
outcome : TransactionOutcome ::StateRoot ( H256 ::default ( ) ) ,
2016-09-14 12:02:30 +02:00
gas_used : 10_000. into ( ) ,
log_bloom : Default ::default ( ) ,
logs : vec ! [
LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 1 ] , } ,
LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 2 ] , } ,
] ,
} ,
Receipt {
2017-09-21 10:11:53 +02:00
outcome : TransactionOutcome ::StateRoot ( H256 ::default ( ) ) ,
2016-09-14 12:02:30 +02:00
gas_used : 10_000. into ( ) ,
log_bloom : Default ::default ( ) ,
logs : vec ! [
LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 3 ] , } ,
] ,
} ] ) ;
2018-02-16 10:11:29 +01:00
insert_block ( & db , & bc , & b2 . last ( ) . encoded ( ) , vec! [
2016-09-14 12:02:30 +02:00
Receipt {
2017-09-21 10:11:53 +02:00
outcome : TransactionOutcome ::StateRoot ( H256 ::default ( ) ) ,
2016-09-14 12:02:30 +02:00
gas_used : 10_000. into ( ) ,
log_bloom : Default ::default ( ) ,
logs : vec ! [
LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 4 ] , } ,
] ,
}
] ) ;
// when
let logs1 = bc . logs ( vec! [ 1 , 2 ] , | _ | true , None ) ;
let logs2 = bc . logs ( vec! [ 1 , 2 ] , | _ | true , Some ( 1 ) ) ;
// then
assert_eq! ( logs1 , vec! [
LocalizedLogEntry {
entry : LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 1 ] } ,
2018-02-16 10:11:29 +01:00
block_hash : b1_hash ,
block_number : b1_number ,
transaction_hash : tx_hash1 ,
2016-09-14 12:02:30 +02:00
transaction_index : 0 ,
2016-12-29 19:48:28 +01:00
transaction_log_index : 0 ,
2016-09-14 12:02:30 +02:00
log_index : 0 ,
} ,
LocalizedLogEntry {
entry : LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 2 ] } ,
2018-02-16 10:11:29 +01:00
block_hash : b1_hash ,
block_number : b1_number ,
transaction_hash : tx_hash1 ,
2016-09-14 12:02:30 +02:00
transaction_index : 0 ,
2016-12-29 19:48:28 +01:00
transaction_log_index : 1 ,
2016-09-14 12:02:30 +02:00
log_index : 1 ,
} ,
LocalizedLogEntry {
entry : LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 3 ] } ,
2018-02-16 10:11:29 +01:00
block_hash : b1_hash ,
block_number : b1_number ,
transaction_hash : tx_hash2 ,
2016-09-14 12:02:30 +02:00
transaction_index : 1 ,
2016-12-29 19:48:28 +01:00
transaction_log_index : 0 ,
2016-09-14 12:02:30 +02:00
log_index : 2 ,
} ,
LocalizedLogEntry {
entry : LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 4 ] } ,
2018-02-16 10:11:29 +01:00
block_hash : b2_hash ,
block_number : b2_number ,
transaction_hash : tx_hash3 ,
2016-09-14 12:02:30 +02:00
transaction_index : 0 ,
2016-12-29 19:48:28 +01:00
transaction_log_index : 0 ,
2016-09-14 12:02:30 +02:00
log_index : 0 ,
}
] ) ;
assert_eq! ( logs2 , vec! [
LocalizedLogEntry {
entry : LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 4 ] } ,
2018-02-16 10:11:29 +01:00
block_hash : b2_hash ,
block_number : b2_number ,
transaction_hash : tx_hash3 ,
2016-09-14 12:02:30 +02:00
transaction_index : 0 ,
2016-12-29 19:48:28 +01:00
transaction_log_index : 0 ,
2016-09-14 12:02:30 +02:00
log_index : 0 ,
}
] ) ;
}
2016-02-16 11:41:34 +01:00
#[ test ]
fn test_bloom_filter_simple ( ) {
2018-01-14 22:43:28 +01:00
let bloom_b1 : Bloom = " 00000020000000000000000000000000000000000000000002000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000400000000000000000000002000 " . into ( ) ;
2016-02-16 16:54:58 +01:00
2018-01-14 22:43:28 +01:00
let bloom_b2 : Bloom = " 00000000000000000000000000000000000000000000020000001000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 " . into ( ) ;
2016-02-16 11:41:34 +01:00
2018-01-14 22:43:28 +01:00
let bloom_ba : Bloom = " 00000000000000000000000000000000000000000000020000000800000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 " . into ( ) ;
2016-02-22 09:12:15 +01:00
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let b1 = genesis . add_block_with ( | | BlockOptions {
bloom : bloom_b1 . clone ( ) ,
difficulty : 9. into ( ) ,
.. Default ::default ( )
} ) ;
let b2 = b1 . add_block_with_bloom ( bloom_b2 ) ;
let b3 = b2 . add_block_with_bloom ( bloom_ba ) ;
let b1a = genesis . add_block_with_bloom ( bloom_ba ) ;
let b2a = b1a . add_block_with_bloom ( bloom_ba ) ;
2016-03-02 04:25:03 +01:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
2016-02-16 16:54:58 +01:00
2018-03-12 21:15:55 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
2018-02-16 10:11:29 +01:00
assert! ( blocks_b1 . is_empty ( ) ) ;
assert! ( blocks_b2 . is_empty ( ) ) ;
2016-03-01 13:44:09 +01:00
2018-02-16 10:11:29 +01:00
insert_block ( & db , & bc , & b1 . last ( ) . encoded ( ) , vec! [ ] ) ;
2018-03-12 21:15:55 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
2016-02-16 16:54:58 +01:00
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
2018-02-16 10:11:29 +01:00
assert! ( blocks_b2 . is_empty ( ) ) ;
2016-02-16 14:46:21 +01:00
2018-02-16 10:11:29 +01:00
insert_block ( & db , & bc , & b2 . last ( ) . encoded ( ) , vec! [ ] ) ;
2018-03-12 21:15:55 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
2016-02-16 16:54:58 +01:00
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
assert_eq! ( blocks_b2 , vec! [ 2 ] ) ;
2016-02-22 09:12:15 +01:00
// hasn't been forked yet
2018-02-16 10:11:29 +01:00
insert_block ( & db , & bc , & b1a . last ( ) . encoded ( ) , vec! [ ] ) ;
2018-03-12 21:15:55 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
let blocks_ba = bc . blocks_with_bloom ( & bloom_ba , 0 , 5 ) ;
2016-02-22 09:12:15 +01:00
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
assert_eq! ( blocks_b2 , vec! [ 2 ] ) ;
2018-02-16 10:11:29 +01:00
assert! ( blocks_ba . is_empty ( ) ) ;
2016-02-22 09:12:15 +01:00
// fork has happend
2018-02-16 10:11:29 +01:00
insert_block ( & db , & bc , & b2a . last ( ) . encoded ( ) , vec! [ ] ) ;
2018-03-12 21:15:55 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
let blocks_ba = bc . blocks_with_bloom ( & bloom_ba , 0 , 5 ) ;
2018-02-16 10:11:29 +01:00
assert! ( blocks_b1 . is_empty ( ) ) ;
assert! ( blocks_b2 . is_empty ( ) ) ;
2016-02-22 09:12:15 +01:00
assert_eq! ( blocks_ba , vec! [ 1 , 2 ] ) ;
2016-02-22 09:54:56 +01:00
// fork back
2018-02-16 10:11:29 +01:00
insert_block ( & db , & bc , & b3 . last ( ) . encoded ( ) , vec! [ ] ) ;
2018-03-12 21:15:55 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
let blocks_ba = bc . blocks_with_bloom ( & bloom_ba , 0 , 5 ) ;
2016-02-22 09:54:56 +01:00
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
assert_eq! ( blocks_b2 , vec! [ 2 ] ) ;
assert_eq! ( blocks_ba , vec! [ 3 ] ) ;
2016-02-16 14:46:21 +01:00
}
2016-07-14 19:16:01 +02:00
2018-02-22 11:22:56 +01:00
#[ test ]
fn test_insert_unordered ( ) {
let bloom_b1 : Bloom = " 00000020000000000000000000000000000000000000000002000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000400000000000000000000002000 " . into ( ) ;
let bloom_b2 : Bloom = " 00000000000000000000000000000000000000000000020000001000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 " . into ( ) ;
let bloom_b3 : Bloom = " 00000000000000000000000000000000000000000000020000000800000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 " . into ( ) ;
let genesis = BlockBuilder ::genesis ( ) ;
let b1 = genesis . add_block_with_bloom ( bloom_b1 ) ;
let b2 = b1 . add_block_with_bloom ( bloom_b2 ) ;
let b3 = b2 . add_block_with_bloom ( bloom_b3 ) ;
let b1_total_difficulty = genesis . last ( ) . difficulty ( ) + b1 . last ( ) . difficulty ( ) ;
let db = new_db ( ) ;
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
let mut batch = db . transaction ( ) ;
bc . insert_unordered_block ( & mut batch , & b2 . last ( ) . encoded ( ) , vec! [ ] , Some ( b1_total_difficulty ) , false , false ) ;
bc . commit ( ) ;
bc . insert_unordered_block ( & mut batch , & b3 . last ( ) . encoded ( ) , vec! [ ] , None , true , false ) ;
bc . commit ( ) ;
bc . insert_unordered_block ( & mut batch , & b1 . last ( ) . encoded ( ) , vec! [ ] , None , false , false ) ;
bc . commit ( ) ;
db . write ( batch ) . unwrap ( ) ;
assert_eq! ( bc . best_block_hash ( ) , b3 . last ( ) . hash ( ) ) ;
assert_eq! ( bc . block_hash ( 1 ) . unwrap ( ) , b1 . last ( ) . hash ( ) ) ;
assert_eq! ( bc . block_hash ( 2 ) . unwrap ( ) , b2 . last ( ) . hash ( ) ) ;
assert_eq! ( bc . block_hash ( 3 ) . unwrap ( ) , b3 . last ( ) . hash ( ) ) ;
2018-03-12 21:15:55 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 3 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 3 ) ;
let blocks_b3 = bc . blocks_with_bloom ( & bloom_b3 , 0 , 3 ) ;
2018-02-22 11:22:56 +01:00
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
assert_eq! ( blocks_b2 , vec! [ 2 ] ) ;
assert_eq! ( blocks_b3 , vec! [ 3 ] ) ;
}
2016-07-14 19:16:01 +02:00
#[ test ]
fn test_best_block_update ( ) {
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let next_5 = genesis . add_blocks ( 5 ) ;
let uncle = genesis . add_block_with_difficulty ( 9 ) ;
let generator = BlockGenerator ::new ( iter ::once ( next_5 ) ) ;
2016-07-14 19:16:01 +02:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-07-14 19:16:01 +02:00
{
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
2016-07-14 19:16:01 +02:00
2017-02-20 17:21:55 +01:00
let mut batch = db . transaction ( ) ;
2016-07-14 19:16:01 +02:00
// create a longer fork
2018-02-16 10:11:29 +01:00
for block in generator {
bc . insert_block ( & mut batch , & block . encoded ( ) , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-07-14 19:16:01 +02:00
}
assert_eq! ( bc . best_block_number ( ) , 5 ) ;
2018-02-16 10:11:29 +01:00
bc . insert_block ( & mut batch , & uncle . last ( ) . encoded ( ) , vec! [ ] ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-07-14 19:16:01 +02:00
}
// re-loading the blockchain should load the correct best block.
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db ) ;
2016-07-14 19:16:01 +02:00
assert_eq! ( bc . best_block_number ( ) , 5 ) ;
}
2016-07-17 23:03:29 +02:00
2017-04-19 16:27:45 +02:00
#[ test ]
fn epoch_transitions_iter ( ) {
2017-06-28 13:17:36 +02:00
use ::engines ::EpochTransition ;
2017-04-19 16:27:45 +02:00
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let next_5 = genesis . add_blocks ( 5 ) ;
let uncle = genesis . add_block_with_difficulty ( 9 ) ;
let generator = BlockGenerator ::new ( iter ::once ( next_5 ) ) ;
2017-04-19 16:27:45 +02:00
let db = new_db ( ) ;
{
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
2017-04-19 16:27:45 +02:00
let mut batch = db . transaction ( ) ;
// create a longer fork
2018-02-16 10:11:29 +01:00
for ( i , block ) in generator . into_iter ( ) . enumerate ( ) {
bc . insert_block ( & mut batch , & block . encoded ( ) , vec! [ ] ) ;
bc . insert_epoch_transition ( & mut batch , i as u64 , EpochTransition {
block_hash : block . hash ( ) ,
block_number : i as u64 + 1 ,
2017-04-19 16:27:45 +02:00
proof : vec ! [ ] ,
} ) ;
bc . commit ( ) ;
}
assert_eq! ( bc . best_block_number ( ) , 5 ) ;
2018-02-16 10:11:29 +01:00
bc . insert_block ( & mut batch , & uncle . last ( ) . encoded ( ) , vec! [ ] ) ;
2017-04-19 16:27:45 +02:00
bc . insert_epoch_transition ( & mut batch , 999 , EpochTransition {
2018-02-16 10:11:29 +01:00
block_hash : uncle . last ( ) . hash ( ) ,
2017-04-19 16:27:45 +02:00
block_number : 1 ,
proof : vec ! [ ] ,
} ) ;
db . write ( batch ) . unwrap ( ) ;
bc . commit ( ) ;
// epoch 999 not in canonical chain.
assert_eq! ( bc . epoch_transitions ( ) . map ( | ( i , _ ) | i ) . collect ::< Vec < _ > > ( ) , vec! [ 0 , 1 , 2 , 3 , 4 ] ) ;
}
// re-loading the blockchain should load the correct best block.
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db ) ;
2017-04-19 16:27:45 +02:00
assert_eq! ( bc . best_block_number ( ) , 5 ) ;
assert_eq! ( bc . epoch_transitions ( ) . map ( | ( i , _ ) | i ) . collect ::< Vec < _ > > ( ) , vec! [ 0 , 1 , 2 , 3 , 4 ] ) ;
}
2017-06-28 13:17:36 +02:00
#[ test ]
fn epoch_transition_for ( ) {
use ::engines ::EpochTransition ;
2018-02-16 10:11:29 +01:00
let genesis = BlockBuilder ::genesis ( ) ;
let fork_7 = genesis . add_blocks_with ( 7 , | | BlockOptions {
difficulty : 9. into ( ) ,
.. Default ::default ( )
} ) ;
let next_10 = genesis . add_blocks ( 10 ) ;
let fork_generator = BlockGenerator ::new ( iter ::once ( fork_7 ) ) ;
let next_generator = BlockGenerator ::new ( iter ::once ( next_10 ) ) ;
2017-06-28 13:17:36 +02:00
let db = new_db ( ) ;
2018-02-16 10:11:29 +01:00
let bc = new_chain ( & genesis . last ( ) . encoded ( ) , db . clone ( ) ) ;
2017-06-28 13:17:36 +02:00
let mut batch = db . transaction ( ) ;
bc . insert_epoch_transition ( & mut batch , 0 , EpochTransition {
block_hash : bc . genesis_hash ( ) ,
block_number : 0 ,
proof : vec ! [ ] ,
} ) ;
db . write ( batch ) . unwrap ( ) ;
// set up a chain where we have a canonical chain of 10 blocks
// and a non-canonical fork of 8 from genesis.
let fork_hash = {
2018-02-16 10:11:29 +01:00
for block in fork_generator {
2017-06-28 13:17:36 +02:00
let mut batch = db . transaction ( ) ;
2018-02-16 10:11:29 +01:00
bc . insert_block ( & mut batch , & block . encoded ( ) , vec! [ ] ) ;
2017-06-28 13:17:36 +02:00
bc . commit ( ) ;
db . write ( batch ) . unwrap ( ) ;
}
assert_eq! ( bc . best_block_number ( ) , 7 ) ;
bc . chain_info ( ) . best_block_hash
} ;
2018-02-16 10:11:29 +01:00
for block in next_generator {
2017-06-28 13:17:36 +02:00
let mut batch = db . transaction ( ) ;
2018-02-16 10:11:29 +01:00
bc . insert_block ( & mut batch , & block . encoded ( ) , vec! [ ] ) ;
2017-06-28 13:17:36 +02:00
bc . commit ( ) ;
db . write ( batch ) . unwrap ( ) ;
}
assert_eq! ( bc . best_block_number ( ) , 10 ) ;
let mut batch = db . transaction ( ) ;
bc . insert_epoch_transition ( & mut batch , 4 , EpochTransition {
block_hash : bc . block_hash ( 4 ) . unwrap ( ) ,
block_number : 4 ,
proof : vec ! [ ] ,
} ) ;
db . write ( batch ) . unwrap ( ) ;
// blocks where the parent is one of the first 4 will be part of genesis epoch.
for i in 0 .. 4 {
let hash = bc . block_hash ( i ) . unwrap ( ) ;
assert_eq! ( bc . epoch_transition_for ( hash ) . unwrap ( ) . block_number , 0 ) ;
}
// blocks where the parent is the transition at 4 or after will be
// part of that epoch.
for i in 4 .. 11 {
let hash = bc . block_hash ( i ) . unwrap ( ) ;
assert_eq! ( bc . epoch_transition_for ( hash ) . unwrap ( ) . block_number , 4 ) ;
}
let fork_hashes = bc . ancestry_iter ( fork_hash ) . unwrap ( ) . collect ::< Vec < _ > > ( ) ;
assert_eq! ( fork_hashes . len ( ) , 8 ) ;
// non-canonical fork blocks should all have genesis transition
for fork_hash in fork_hashes {
assert_eq! ( bc . epoch_transition_for ( fork_hash ) . unwrap ( ) . block_number , 0 ) ;
}
}
2015-12-17 17:20:10 +01:00
}