2017-01-25 18:51:41 +01:00
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
2016-02-05 13:40:41 +01:00
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
2016-02-02 15:29:53 +01:00
//! Blockchain database.
2015-12-17 17:20:10 +01:00
2016-05-26 18:24:51 +02:00
use bloomchain as bc ;
2016-01-09 12:30:41 +01:00
use util ::* ;
2016-09-01 14:29:59 +02:00
use rlp ::* ;
2015-12-21 02:34:41 +01:00
use header ::* ;
2016-05-26 18:24:51 +02:00
use super ::extras ::* ;
2015-12-14 17:12:47 +01:00
use transaction ::* ;
2015-12-17 02:13:14 +01:00
use views ::* ;
2016-09-14 12:02:30 +02:00
use log_entry ::{ LogEntry , LocalizedLogEntry } ;
2016-02-11 14:35:03 +01:00
use receipt ::Receipt ;
2016-05-26 18:24:51 +02:00
use blooms ::{ Bloom , BloomGroup } ;
2016-04-17 17:18:25 +02:00
use blockchain ::block_info ::{ BlockInfo , BlockLocation , BranchBecomingCanonChainData } ;
2016-10-18 18:16:00 +02:00
use blockchain ::best_block ::{ BestBlock , BestAncientBlock } ;
use types ::blockchain_info ::BlockChainInfo ;
2016-05-16 18:33:32 +02:00
use types ::tree_route ::TreeRoute ;
2016-02-27 02:16:39 +01:00
use blockchain ::update ::ExtrasUpdate ;
2016-05-26 18:24:51 +02:00
use blockchain ::{ CacheSize , ImportRoute , Config } ;
2016-08-18 18:24:49 +02:00
use db ::{ self , Writable , Readable , CacheUpdatePolicy } ;
2016-07-31 00:19:27 +02:00
use cache_manager ::CacheManager ;
2016-12-28 13:44:51 +01:00
use encoded ;
2016-02-16 14:46:21 +01:00
2016-05-26 18:24:51 +02:00
const LOG_BLOOMS_LEVELS : usize = 3 ;
const LOG_BLOOMS_ELEMENTS_PER_INDEX : usize = 16 ;
2015-12-21 02:57:02 +01:00
2016-01-12 13:14:01 +01:00
/// Interface for querying blocks by hash and by number.
pub trait BlockProvider {
/// Returns true if the given block is known
/// (though not necessarily a part of the canon chain).
fn is_known ( & self , hash : & H256 ) -> bool ;
2016-10-18 18:16:00 +02:00
/// Get the first block of the best part of the chain.
/// Return `None` if there is no gap and the first block is the genesis.
2016-08-18 22:01:57 +02:00
/// Any queries of blocks which precede this one are not guaranteed to
/// succeed.
2016-10-18 18:16:00 +02:00
fn first_block ( & self ) -> Option < H256 > ;
2016-08-18 22:01:57 +02:00
/// Get the number of the first block.
2016-10-18 18:16:00 +02:00
fn first_block_number ( & self ) -> Option < BlockNumber > {
self . first_block ( ) . map ( | b | self . block_number ( & b ) . expect ( " First block is always set to an existing block or `None`. Existing block always has a number; qed " ) )
2016-08-18 22:01:57 +02:00
}
2016-10-18 18:16:00 +02:00
/// Get the best block of an first block sequence if there is a gap.
fn best_ancient_block ( & self ) -> Option < H256 > ;
/// Get the number of the first block.
fn best_ancient_number ( & self ) -> Option < BlockNumber > {
self . best_ancient_block ( ) . map ( | h | self . block_number ( & h ) . expect ( " Ancient block is always set to an existing block or `None`. Existing block always has a number; qed " ) )
}
2016-01-12 13:14:01 +01:00
/// Get raw block data
2016-12-28 13:44:51 +01:00
fn block ( & self , hash : & H256 ) -> Option < encoded ::Block > ;
2016-01-12 13:14:01 +01:00
/// Get the familial details concerning a block.
fn block_details ( & self , hash : & H256 ) -> Option < BlockDetails > ;
/// Get the hash of given block's number.
fn block_hash ( & self , index : BlockNumber ) -> Option < H256 > ;
2016-02-08 15:53:22 +01:00
/// Get the address of transaction with given hash.
fn transaction_address ( & self , hash : & H256 ) -> Option < TransactionAddress > ;
2016-02-17 12:35:37 +01:00
/// Get receipts of block with given hash.
fn block_receipts ( & self , hash : & H256 ) -> Option < BlockReceipts > ;
2016-01-12 13:14:01 +01:00
/// Get the partial-header of a block.
fn block_header ( & self , hash : & H256 ) -> Option < Header > {
2016-12-28 13:44:51 +01:00
self . block_header_data ( hash ) . map ( | header | header . decode ( ) )
2016-01-12 13:14:01 +01:00
}
2016-07-28 23:46:24 +02:00
/// Get the header RLP of a block.
2016-12-28 13:44:51 +01:00
fn block_header_data ( & self , hash : & H256 ) -> Option < encoded ::Header > ;
2016-07-28 23:46:24 +02:00
/// Get the block body (uncles and transactions).
2016-12-28 13:44:51 +01:00
fn block_body ( & self , hash : & H256 ) -> Option < encoded ::Body > ;
2016-07-28 23:46:24 +02:00
2016-01-12 13:14:01 +01:00
/// Get a list of uncles for a given block.
2016-03-02 18:05:47 +01:00
/// Returns None if block does not exist.
2016-01-12 13:14:01 +01:00
fn uncles ( & self , hash : & H256 ) -> Option < Vec < Header > > {
2016-12-28 13:44:51 +01:00
self . block_body ( hash ) . map ( | body | body . uncles ( ) )
2016-01-12 13:14:01 +01:00
}
/// Get a list of uncle hashes for a given block.
/// Returns None if block does not exist.
fn uncle_hashes ( & self , hash : & H256 ) -> Option < Vec < H256 > > {
2016-12-28 13:44:51 +01:00
self . block_body ( hash ) . map ( | body | body . uncle_hashes ( ) )
2016-01-12 13:14:01 +01:00
}
/// Get the number of given block's hash.
fn block_number ( & self , hash : & H256 ) -> Option < BlockNumber > {
2016-07-28 23:46:24 +02:00
self . block_details ( hash ) . map ( | details | details . number )
2016-01-12 13:14:01 +01:00
}
2016-02-08 15:53:22 +01:00
/// Get transaction with given transaction hash.
2016-02-10 19:29:27 +01:00
fn transaction ( & self , address : & TransactionAddress ) -> Option < LocalizedTransaction > {
2016-07-28 23:46:24 +02:00
self . block_body ( & address . block_hash )
2016-12-28 13:44:51 +01:00
. and_then ( | body | self . block_number ( & address . block_hash )
. and_then ( | n | body . view ( ) . localized_transaction_at ( & address . block_hash , n , address . index ) ) )
2016-02-08 15:53:22 +01:00
}
2016-03-20 17:29:39 +01:00
/// Get transaction receipt.
fn transaction_receipt ( & self , address : & TransactionAddress ) -> Option < Receipt > {
self . block_receipts ( & address . block_hash ) . and_then ( | br | br . receipts . into_iter ( ) . nth ( address . index ) )
}
2016-01-12 13:14:01 +01:00
/// Get a list of transactions for a given block.
2016-02-10 11:28:40 +01:00
/// Returns None if block does not exist.
2016-02-09 15:17:01 +01:00
fn transactions ( & self , hash : & H256 ) -> Option < Vec < LocalizedTransaction > > {
2016-07-28 23:46:24 +02:00
self . block_body ( hash )
2016-12-28 13:44:51 +01:00
. and_then ( | body | self . block_number ( hash )
. map ( | n | body . view ( ) . localized_transactions ( hash , n ) ) )
2016-01-12 13:14:01 +01:00
}
/// Returns reference to genesis hash.
fn genesis_hash ( & self ) -> H256 {
self . block_hash ( 0 ) . expect ( " Genesis hash should always exist " )
}
2016-01-26 15:00:22 +01:00
/// Returns the header of the genesis block.
fn genesis_header ( & self ) -> Header {
2016-10-20 23:41:15 +02:00
self . block_header ( & self . genesis_hash ( ) )
. expect ( " Genesis header always stored; qed " )
2016-01-26 15:00:22 +01:00
}
2016-02-12 00:40:45 +01:00
/// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom ( & self , bloom : & H2048 , from_block : BlockNumber , to_block : BlockNumber ) -> Vec < BlockNumber > ;
2016-09-14 12:02:30 +02:00
/// Returns logs matching given filter.
2016-11-22 18:03:35 +01:00
fn logs < F > ( & self , blocks : Vec < BlockNumber > , matches : F , limit : Option < usize > ) -> Vec < LocalizedLogEntry >
2016-09-14 12:02:30 +02:00
where F : Fn ( & LogEntry ) -> bool , Self : Sized ;
2016-01-12 13:14:01 +01:00
}
2016-01-18 15:48:38 +01:00
#[ derive(Debug, Hash, Eq, PartialEq, Clone) ]
2016-12-09 23:01:43 +01:00
enum CacheId {
2016-07-28 23:46:24 +02:00
BlockHeader ( H256 ) ,
BlockBody ( H256 ) ,
2016-05-26 18:24:51 +02:00
BlockDetails ( H256 ) ,
BlockHashes ( BlockNumber ) ,
TransactionAddresses ( H256 ) ,
BlocksBlooms ( LogGroupPosition ) ,
BlockReceipts ( H256 ) ,
2016-01-18 15:48:38 +01:00
}
2016-05-26 18:24:51 +02:00
impl bc ::group ::BloomGroupDatabase for BlockChain {
fn blooms_at ( & self , position : & bc ::group ::GroupPosition ) -> Option < bc ::group ::BloomGroup > {
let position = LogGroupPosition ::from ( position . clone ( ) ) ;
2016-08-18 18:24:49 +02:00
let result = self . db . read_with_cache ( db ::COL_EXTRA , & self . blocks_blooms , & position ) . map ( Into ::into ) ;
2016-12-09 23:01:43 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlocksBlooms ( position ) ) ;
2016-08-08 16:14:37 +02:00
result
2016-05-26 18:24:51 +02:00
}
}
2015-12-17 17:20:10 +01:00
/// Structure providing fast access to blockchain data.
2015-12-26 15:47:07 +01:00
///
2015-12-21 15:25:58 +01:00
/// **Does not do input data verification.**
2015-12-09 19:03:25 +01:00
pub struct BlockChain {
2016-02-22 00:36:59 +01:00
// All locks must be captured in the order declared here.
2016-05-26 18:24:51 +02:00
blooms_config : bc ::Config ,
2016-02-02 01:59:14 +01:00
2016-01-07 16:08:12 +01:00
best_block : RwLock < BestBlock > ,
2016-10-18 18:16:00 +02:00
// Stores best block of the first uninterrupted sequence of blocks. `None` if there are no gaps.
// Only updated with `insert_unordered_block`.
best_ancient_block : RwLock < Option < BestAncientBlock > > ,
// Stores the last block of the last sequence of blocks. `None` if there are no gaps.
// This is calculated on start and does not get updated.
first_block : Option < H256 > ,
2015-12-13 22:39:01 +01:00
2015-12-14 14:15:27 +01:00
// block cache
2016-07-28 23:46:24 +02:00
block_headers : RwLock < HashMap < H256 , Bytes > > ,
block_bodies : RwLock < HashMap < H256 , Bytes > > ,
2015-12-14 13:32:22 +01:00
2015-12-14 14:15:27 +01:00
// extra caches
2016-01-07 16:08:12 +01:00
block_details : RwLock < HashMap < H256 , BlockDetails > > ,
2016-01-11 01:07:58 +01:00
block_hashes : RwLock < HashMap < BlockNumber , H256 > > ,
2016-01-07 16:08:12 +01:00
transaction_addresses : RwLock < HashMap < H256 , TransactionAddress > > ,
2016-05-26 18:24:51 +02:00
blocks_blooms : RwLock < HashMap < LogGroupPosition , BloomGroup > > ,
2016-02-17 12:35:37 +01:00
block_receipts : RwLock < HashMap < H256 , BlockReceipts > > ,
2015-12-14 14:15:27 +01:00
2017-02-20 17:21:55 +01:00
db : Arc < KeyValueDB > ,
2016-01-18 15:48:38 +01:00
2016-12-09 23:01:43 +01:00
cache_man : Mutex < CacheManager < CacheId > > ,
2016-08-01 19:10:13 +02:00
pending_best_block : RwLock < Option < BestBlock > > ,
pending_block_hashes : RwLock < HashMap < BlockNumber , H256 > > ,
2016-10-27 15:26:29 +02:00
pending_block_details : RwLock < HashMap < H256 , BlockDetails > > ,
2016-09-28 15:49:42 +02:00
pending_transaction_addresses : RwLock < HashMap < H256 , Option < TransactionAddress > > > ,
2015-12-09 19:03:25 +01:00
}
2016-01-12 13:14:01 +01:00
impl BlockProvider for BlockChain {
/// Returns true if the given block is known
/// (though not necessarily a part of the canon chain).
fn is_known ( & self , hash : & H256 ) -> bool {
2016-08-18 18:24:49 +02:00
self . db . exists_with_cache ( db ::COL_EXTRA , & self . block_details , hash )
2016-01-12 13:14:01 +01:00
}
2016-10-18 18:16:00 +02:00
fn first_block ( & self ) -> Option < H256 > {
self . first_block . clone ( )
}
fn best_ancient_block ( & self ) -> Option < H256 > {
self . best_ancient_block . read ( ) . as_ref ( ) . map ( | b | b . hash . clone ( ) )
}
fn best_ancient_number ( & self ) -> Option < BlockNumber > {
self . best_ancient_block . read ( ) . as_ref ( ) . map ( | b | b . number )
2016-08-18 22:01:57 +02:00
}
2016-01-12 13:14:01 +01:00
/// Get raw block data
2016-12-28 13:44:51 +01:00
fn block ( & self , hash : & H256 ) -> Option < encoded ::Block > {
2016-07-28 23:46:24 +02:00
match ( self . block_header_data ( hash ) , self . block_body ( hash ) ) {
( Some ( header ) , Some ( body ) ) = > {
let mut block = RlpStream ::new_list ( 3 ) ;
2016-12-28 13:44:51 +01:00
let body_rlp = body . rlp ( ) ;
block . append_raw ( header . rlp ( ) . as_raw ( ) , 1 ) ;
2016-07-28 23:46:24 +02:00
block . append_raw ( body_rlp . at ( 0 ) . as_raw ( ) , 1 ) ;
block . append_raw ( body_rlp . at ( 1 ) . as_raw ( ) , 1 ) ;
2016-12-28 13:44:51 +01:00
Some ( encoded ::Block ::new ( block . out ( ) ) )
2016-07-28 23:46:24 +02:00
} ,
_ = > None ,
}
}
/// Get block header data
2016-12-28 13:44:51 +01:00
fn block_header_data ( & self , hash : & H256 ) -> Option < encoded ::Header > {
2016-07-28 23:46:24 +02:00
// Check cache first
{
let read = self . block_headers . read ( ) ;
if let Some ( v ) = read . get ( hash ) {
2016-12-28 13:44:51 +01:00
return Some ( encoded ::Header ::new ( v . clone ( ) ) ) ;
2016-07-28 23:46:24 +02:00
}
}
// Check if it's the best block
{
let best_block = self . best_block . read ( ) ;
if & best_block . hash = = hash {
2016-12-28 13:44:51 +01:00
return Some ( encoded ::Header ::new (
Rlp ::new ( & best_block . block ) . at ( 0 ) . as_raw ( ) . to_vec ( )
) )
2016-07-28 23:46:24 +02:00
}
}
// Read from DB and populate cache
2016-08-18 18:24:49 +02:00
let opt = self . db . get ( db ::COL_HEADERS , hash )
2016-07-28 23:46:24 +02:00
. expect ( " Low level database error. Some issue with disk? " ) ;
2016-08-08 16:14:37 +02:00
let result = match opt {
2016-07-28 23:46:24 +02:00
Some ( b ) = > {
let bytes : Bytes = UntrustedRlp ::new ( & b ) . decompress ( RlpType ::Blocks ) . to_vec ( ) ;
let mut write = self . block_headers . write ( ) ;
write . insert ( hash . clone ( ) , bytes . clone ( ) ) ;
2016-12-28 13:44:51 +01:00
Some ( encoded ::Header ::new ( bytes ) )
2016-07-28 23:46:24 +02:00
} ,
None = > None
2016-08-08 16:14:37 +02:00
} ;
2016-12-09 23:01:43 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlockHeader ( hash . clone ( ) ) ) ;
2016-08-08 16:14:37 +02:00
result
2016-07-28 23:46:24 +02:00
}
/// Get block body data
2016-12-28 13:44:51 +01:00
fn block_body ( & self , hash : & H256 ) -> Option < encoded ::Body > {
2016-07-28 23:46:24 +02:00
// Check cache first
2016-01-12 13:14:01 +01:00
{
2016-07-28 23:46:24 +02:00
let read = self . block_bodies . read ( ) ;
2016-01-17 15:56:09 +01:00
if let Some ( v ) = read . get ( hash ) {
2016-12-28 13:44:51 +01:00
return Some ( encoded ::Body ::new ( v . clone ( ) ) ) ;
2016-01-12 13:14:01 +01:00
}
}
2016-07-28 23:46:24 +02:00
// Check if it's the best block
{
let best_block = self . best_block . read ( ) ;
if & best_block . hash = = hash {
2016-12-28 13:44:51 +01:00
return Some ( encoded ::Body ::new ( Self ::block_to_body ( & best_block . block ) ) ) ;
2016-07-28 23:46:24 +02:00
}
}
// Read from DB and populate cache
2016-08-18 18:24:49 +02:00
let opt = self . db . get ( db ::COL_BODIES , hash )
2016-01-12 13:14:01 +01:00
. expect ( " Low level database error. Some issue with disk? " ) ;
2016-08-08 16:14:37 +02:00
let result = match opt {
2016-01-12 13:14:01 +01:00
Some ( b ) = > {
Blocks and snapshot compression (#1687)
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* new Compressible rlp trait
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* DecompressingDecoder test
* initial compressing HashDB wrapper
* remove unused test
* change CompressedDB to struct wrapper with overlay
* simplify compressor
* failed RefCell attempt
* use denote to return reference
* compiled compresseddb
* compressdb test, add overlay emplace
* fix overlay reference count handling
* add immutable compresseddb, make account use hashdb
* simplify using trait objects
* enable hashdb for account
* initial state compression attempt
* wrap state db
* add tests for analyzing db
* add account predicate
* try to compress data fields as rlp too
* remove compression for storage trie
* add a compressing migration
* more compression stats tests
* fix migration import
* nested encoding compression test
* fix decompression, move db stats tests to rlpcompression
* added malformed rlp tests, cover a few edge cases
* new CompressingEncoder struct
* extend migrations to state
* first version working on the whole db
* clean up Compressible impl
* tests cleanup
* add a testing migration
* refactor deep compression using option, add simple compression
* put tests in a module
* fix compressed overlay loading
* simple compression for snapshots
* remove unused DecompressingDecoder
* add a general compressing migration
* add more common rlps to compress
* use static slices for swapper
* add precomputed hashes and invalid rlps
* make decoder private again
* cover more cases with tests
* style
* fix weird indentation
* remove possible panic in payload_info
* make prefix checking safe
* fix db existence check
* remove db dir from test
* pass usize by value [ci skip]
* Improve comment on panic removal.
* add common blocks db rlps
* add compression to blockchain db
* add blocks db migration
* fix the migrations
* remove state compression
* add a separate snapshot swapper
* ability to use different swappers and traversal
* update tests to new interface
* clean up code ordering
* update usage
* fix compilation
* remove unnecessary changes
* move methods to functions to reduce interface
* move test to module
* update common rlps to blocks db
* move tests to tests modules
* remove redundant &
2016-07-27 17:11:41 +02:00
let bytes : Bytes = UntrustedRlp ::new ( & b ) . decompress ( RlpType ::Blocks ) . to_vec ( ) ;
2016-07-28 23:46:24 +02:00
let mut write = self . block_bodies . write ( ) ;
2016-01-12 13:14:01 +01:00
write . insert ( hash . clone ( ) , bytes . clone ( ) ) ;
2016-12-28 13:44:51 +01:00
Some ( encoded ::Body ::new ( bytes ) )
2016-01-12 13:14:01 +01:00
} ,
None = > None
2016-08-08 16:14:37 +02:00
} ;
2016-12-09 23:01:43 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlockBody ( hash . clone ( ) ) ) ;
2016-08-08 16:14:37 +02:00
result
2016-01-12 13:14:01 +01:00
}
/// Get the familial details concerning a block.
fn block_details ( & self , hash : & H256 ) -> Option < BlockDetails > {
2016-08-18 18:24:49 +02:00
let result = self . db . read_with_cache ( db ::COL_EXTRA , & self . block_details , hash ) ;
2016-12-09 23:01:43 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlockDetails ( hash . clone ( ) ) ) ;
2016-08-08 16:14:37 +02:00
result
2016-01-12 13:14:01 +01:00
}
/// Get the hash of given block's number.
fn block_hash ( & self , index : BlockNumber ) -> Option < H256 > {
2016-08-18 18:24:49 +02:00
let result = self . db . read_with_cache ( db ::COL_EXTRA , & self . block_hashes , & index ) ;
2016-12-09 23:01:43 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlockHashes ( index ) ) ;
2016-08-08 16:14:37 +02:00
result
2016-01-12 13:14:01 +01:00
}
2016-02-08 15:53:22 +01:00
/// Get the address of transaction with given hash.
fn transaction_address ( & self , hash : & H256 ) -> Option < TransactionAddress > {
2016-08-18 18:24:49 +02:00
let result = self . db . read_with_cache ( db ::COL_EXTRA , & self . transaction_addresses , hash ) ;
2016-12-09 23:01:43 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::TransactionAddresses ( hash . clone ( ) ) ) ;
2016-08-08 16:14:37 +02:00
result
2016-02-08 15:53:22 +01:00
}
2016-02-12 00:40:45 +01:00
2016-02-17 12:35:37 +01:00
/// Get receipts of block with given hash.
fn block_receipts ( & self , hash : & H256 ) -> Option < BlockReceipts > {
2016-08-18 18:24:49 +02:00
let result = self . db . read_with_cache ( db ::COL_EXTRA , & self . block_receipts , hash ) ;
2016-12-09 23:01:43 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlockReceipts ( hash . clone ( ) ) ) ;
2016-08-08 16:14:37 +02:00
result
2016-02-17 12:35:37 +01:00
}
2016-02-12 00:40:45 +01:00
/// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom ( & self , bloom : & H2048 , from_block : BlockNumber , to_block : BlockNumber ) -> Vec < BlockNumber > {
2016-05-26 18:24:51 +02:00
let range = from_block as bc ::Number .. to_block as bc ::Number ;
let chain = bc ::group ::BloomGroupChain ::new ( self . blooms_config , self ) ;
chain . with_bloom ( & range , & Bloom ::from ( bloom . clone ( ) ) . into ( ) )
. into_iter ( )
. map ( | b | b as BlockNumber )
. collect ( )
2016-02-12 00:40:45 +01:00
}
2016-09-14 12:02:30 +02:00
fn logs < F > ( & self , mut blocks : Vec < BlockNumber > , matches : F , limit : Option < usize > ) -> Vec < LocalizedLogEntry >
where F : Fn ( & LogEntry ) -> bool , Self : Sized {
// sort in reverse order
blocks . sort_by ( | a , b | b . cmp ( a ) ) ;
let mut log_index = 0 ;
let mut logs = blocks . into_iter ( )
. filter_map ( | number | self . block_hash ( number ) . map ( | hash | ( number , hash ) ) )
. filter_map ( | ( number , hash ) | self . block_receipts ( & hash ) . map ( | r | ( number , hash , r . receipts ) ) )
2016-12-28 13:44:51 +01:00
. filter_map ( | ( number , hash , receipts ) | self . block_body ( & hash ) . map ( | ref b | ( number , hash , receipts , b . transaction_hashes ( ) ) ) )
2016-09-30 11:14:30 +02:00
. flat_map ( | ( number , hash , mut receipts , mut hashes ) | {
2016-10-15 14:39:15 +02:00
if receipts . len ( ) ! = hashes . len ( ) {
warn! ( " Block {} ({}) has different number of receipts ({}) to transactions ({}). Database corrupt? " , number , hash , receipts . len ( ) , hashes . len ( ) ) ;
assert! ( false ) ;
}
2016-09-14 12:02:30 +02:00
log_index = receipts . iter ( ) . fold ( 0 , | sum , receipt | sum + receipt . logs . len ( ) ) ;
let receipts_len = receipts . len ( ) ;
2016-09-30 11:14:30 +02:00
hashes . reverse ( ) ;
2016-09-14 12:02:30 +02:00
receipts . reverse ( ) ;
receipts . into_iter ( )
. map ( | receipt | receipt . logs )
. zip ( hashes )
. enumerate ( )
. flat_map ( move | ( index , ( mut logs , tx_hash ) ) | {
let current_log_index = log_index ;
2016-12-29 19:48:28 +01:00
let no_of_logs = logs . len ( ) ;
log_index - = no_of_logs ;
2016-09-14 12:02:30 +02:00
logs . reverse ( ) ;
logs . into_iter ( )
. enumerate ( )
. map ( move | ( i , log ) | LocalizedLogEntry {
entry : log ,
block_hash : hash ,
block_number : number ,
transaction_hash : tx_hash ,
// iterating in reverse order
transaction_index : receipts_len - index - 1 ,
2016-12-29 19:48:28 +01:00
transaction_log_index : no_of_logs - i - 1 ,
2016-09-14 12:02:30 +02:00
log_index : current_log_index - i - 1 ,
} )
} )
} )
. filter ( | log_entry | matches ( & log_entry . entry ) )
. take ( limit . unwrap_or ( ::std ::usize ::MAX ) )
. collect ::< Vec < LocalizedLogEntry > > ( ) ;
logs . reverse ( ) ;
logs
}
2016-01-12 13:14:01 +01:00
}
2016-10-24 18:27:23 +02:00
/// An iterator which walks the blockchain towards the genesis.
#[ derive(Clone) ]
2016-03-02 17:04:44 +01:00
pub struct AncestryIter < ' a > {
current : H256 ,
chain : & ' a BlockChain ,
}
2016-03-02 17:31:42 +01:00
2016-03-02 17:04:44 +01:00
impl < ' a > Iterator for AncestryIter < ' a > {
type Item = H256 ;
fn next ( & mut self ) -> Option < H256 > {
if self . current . is_zero ( ) {
2016-10-24 18:27:23 +02:00
None
2016-03-02 17:04:44 +01:00
} else {
2016-10-24 18:27:23 +02:00
self . chain . block_details ( & self . current )
. map ( | details | mem ::replace ( & mut self . current , details . parent ) )
2016-03-02 17:04:44 +01:00
}
}
}
2017-04-19 16:27:45 +02:00
/// An iterator which walks all epoch transitions.
/// Returns epoch transitions.
pub struct EpochTransitionIter < ' a > {
chain : & ' a BlockChain ,
prefix_iter : Box < Iterator < Item = ( Box < [ u8 ] > , Box < [ u8 ] > ) > + ' a > ,
}
impl < ' a > Iterator for EpochTransitionIter < ' a > {
type Item = ( u64 , EpochTransition ) ;
fn next ( & mut self ) -> Option < Self ::Item > {
loop {
match self . prefix_iter . next ( ) {
Some ( ( key , val ) ) = > {
// iterator may continue beyond values beginning with this
// prefix.
if ! key . starts_with ( & EPOCH_KEY_PREFIX [ .. ] ) { return None }
let transitions : EpochTransitions = ::rlp ::decode ( & val [ .. ] ) ;
// if there are multiple candidates, at most one will be on the
// canon chain.
for transition in transitions . candidates . into_iter ( ) {
let is_in_canon_chain = self . chain . block_hash ( transition . block_number )
. map_or ( false , | hash | hash = = transition . block_hash ) ;
if is_in_canon_chain {
return Some ( ( transitions . number , transition ) )
}
}
// some epochs never occurred on the main chain.
}
None = > return None ,
}
}
}
}
2015-12-09 19:03:25 +01:00
impl BlockChain {
2017-01-23 15:27:11 +01:00
/// Create new instance of blockchain from given Genesis.
2017-02-20 17:21:55 +01:00
pub fn new ( config : Config , genesis : & [ u8 ] , db : Arc < KeyValueDB > ) -> BlockChain {
2016-07-31 00:19:27 +02:00
// 400 is the avarage size of the key
let cache_man = CacheManager ::new ( config . pref_cache_size , config . max_cache_size , 400 ) ;
2016-01-18 19:23:28 +01:00
2016-08-18 22:01:57 +02:00
let mut bc = BlockChain {
2016-05-26 18:24:51 +02:00
blooms_config : bc ::Config {
levels : LOG_BLOOMS_LEVELS ,
elements_per_index : LOG_BLOOMS_ELEMENTS_PER_INDEX ,
} ,
2016-10-18 18:16:00 +02:00
first_block : None ,
2016-02-27 02:16:39 +01:00
best_block : RwLock ::new ( BestBlock ::default ( ) ) ,
2016-10-18 18:16:00 +02:00
best_ancient_block : RwLock ::new ( None ) ,
2016-07-28 23:46:24 +02:00
block_headers : RwLock ::new ( HashMap ::new ( ) ) ,
block_bodies : RwLock ::new ( HashMap ::new ( ) ) ,
2016-01-07 16:08:12 +01:00
block_details : RwLock ::new ( HashMap ::new ( ) ) ,
block_hashes : RwLock ::new ( HashMap ::new ( ) ) ,
transaction_addresses : RwLock ::new ( HashMap ::new ( ) ) ,
blocks_blooms : RwLock ::new ( HashMap ::new ( ) ) ,
2016-02-17 12:35:37 +01:00
block_receipts : RwLock ::new ( HashMap ::new ( ) ) ,
2016-07-28 23:46:24 +02:00
db : db . clone ( ) ,
2016-08-08 16:25:48 +02:00
cache_man : Mutex ::new ( cache_man ) ,
2016-08-01 19:10:13 +02:00
pending_best_block : RwLock ::new ( None ) ,
pending_block_hashes : RwLock ::new ( HashMap ::new ( ) ) ,
2016-10-27 15:26:29 +02:00
pending_block_details : RwLock ::new ( HashMap ::new ( ) ) ,
2016-08-01 19:10:13 +02:00
pending_transaction_addresses : RwLock ::new ( HashMap ::new ( ) ) ,
2015-12-17 01:54:24 +01:00
} ;
2015-12-17 15:11:42 +01:00
// load best block
2016-08-18 18:24:49 +02:00
let best_block_hash = match bc . db . get ( db ::COL_EXTRA , b " best " ) . unwrap ( ) {
2016-07-04 18:24:14 +02:00
Some ( best ) = > {
2016-07-28 23:46:24 +02:00
H256 ::from_slice ( & best )
2016-07-04 18:24:14 +02:00
}
2015-12-17 15:11:42 +01:00
None = > {
// best block does not exist
// we need to insert genesis into the cache
2015-12-17 17:20:10 +01:00
let block = BlockView ::new ( genesis ) ;
2015-12-17 15:11:42 +01:00
let header = block . header_view ( ) ;
let hash = block . sha3 ( ) ;
let details = BlockDetails {
number : header . number ( ) ,
total_difficulty : header . difficulty ( ) ,
parent : header . parent_hash ( ) ,
children : vec ! [ ]
} ;
2017-02-20 17:21:55 +01:00
let mut batch = DBTransaction ::new ( ) ;
2016-08-18 18:24:49 +02:00
batch . put ( db ::COL_HEADERS , & hash , block . header_rlp ( ) . as_raw ( ) ) ;
batch . put ( db ::COL_BODIES , & hash , & Self ::block_to_body ( genesis ) ) ;
2015-12-26 15:47:07 +01:00
2016-08-18 18:24:49 +02:00
batch . write ( db ::COL_EXTRA , & hash , & details ) ;
batch . write ( db ::COL_EXTRA , & header . number ( ) , & hash ) ;
2016-08-18 22:01:57 +02:00
2016-08-18 18:24:49 +02:00
batch . put ( db ::COL_EXTRA , b " best " , & hash ) ;
2016-07-28 23:46:24 +02:00
bc . db . write ( batch ) . expect ( " Low level database error. Some issue with disk? " ) ;
2015-12-17 15:11:42 +01:00
hash
}
} ;
2015-12-21 02:57:02 +01:00
{
2016-07-28 23:46:24 +02:00
// Fetch best block details
let best_block_number = bc . block_number ( & best_block_hash ) . unwrap ( ) ;
let best_block_total_difficulty = bc . block_details ( & best_block_hash ) . unwrap ( ) . total_difficulty ;
2016-12-28 13:44:51 +01:00
let best_block_rlp = bc . block ( & best_block_hash ) . unwrap ( ) . into_inner ( ) ;
2017-02-03 19:32:10 +01:00
let best_block_timestamp = BlockView ::new ( & best_block_rlp ) . header ( ) . timestamp ( ) ;
2016-07-28 23:46:24 +02:00
2016-10-18 18:16:00 +02:00
let raw_first = bc . db . get ( db ::COL_EXTRA , b " first " ) . unwrap ( ) . map ( | v | v . to_vec ( ) ) ;
let mut best_ancient = bc . db . get ( db ::COL_EXTRA , b " ancient " ) . unwrap ( ) . map ( | h | H256 ::from_slice ( & h ) ) ;
let best_ancient_number ;
if best_ancient . is_none ( ) & & best_block_number > 1 & & bc . block_hash ( 1 ) . is_none ( ) {
best_ancient = Some ( bc . genesis_hash ( ) ) ;
best_ancient_number = Some ( 0 ) ;
} else {
best_ancient_number = best_ancient . as_ref ( ) . and_then ( | h | bc . block_number ( h ) ) ;
}
2016-08-18 22:01:57 +02:00
// binary search for the first block.
2016-10-18 18:16:00 +02:00
match raw_first {
None = > {
let ( mut f , mut hash ) = ( best_block_number , best_block_hash ) ;
let mut l = best_ancient_number . unwrap_or ( 0 ) ;
2016-08-18 22:01:57 +02:00
2016-10-18 18:16:00 +02:00
loop {
if l > = f { break ; }
2016-08-18 22:01:57 +02:00
2016-10-18 18:16:00 +02:00
let step = ( f - l ) > > 1 ;
let m = l + step ;
2016-08-18 22:01:57 +02:00
2016-10-18 18:16:00 +02:00
match bc . block_hash ( m ) {
Some ( h ) = > { f = m ; hash = h } ,
None = > { l = m + 1 } ,
}
2016-08-18 22:01:57 +02:00
}
2016-10-18 18:16:00 +02:00
if hash ! = bc . genesis_hash ( ) {
trace! ( " First block calculated: {:?} " , hash ) ;
let mut batch = db . transaction ( ) ;
batch . put ( db ::COL_EXTRA , b " first " , & hash ) ;
db . write ( batch ) . expect ( " Low level database error. " ) ;
bc . first_block = Some ( hash ) ;
}
} ,
Some ( raw_first ) = > {
bc . first_block = Some ( H256 ::from_slice ( & raw_first ) ) ;
} ,
2016-08-18 22:01:57 +02:00
}
2016-07-28 23:46:24 +02:00
// and write them
2016-07-13 19:59:59 +02:00
let mut best_block = bc . best_block . write ( ) ;
2016-07-28 23:46:24 +02:00
* best_block = BestBlock {
number : best_block_number ,
total_difficulty : best_block_total_difficulty ,
hash : best_block_hash ,
2017-02-03 19:32:10 +01:00
timestamp : best_block_timestamp ,
2016-07-28 23:46:24 +02:00
block : best_block_rlp ,
} ;
2016-10-18 18:16:00 +02:00
if let ( Some ( hash ) , Some ( number ) ) = ( best_ancient , best_ancient_number ) {
let mut best_ancient_block = bc . best_ancient_block . write ( ) ;
* best_ancient_block = Some ( BestAncientBlock {
hash : hash ,
number : number ,
} ) ;
}
2015-12-21 02:57:02 +01:00
}
2015-12-17 15:11:42 +01:00
2015-12-17 01:54:24 +01:00
bc
2015-12-11 03:51:23 +01:00
}
2016-07-17 09:18:15 +02:00
/// Returns true if the given parent block has given child
/// (though not necessarily a part of the canon chain).
fn is_known_child ( & self , parent : & H256 , hash : & H256 ) -> bool {
2016-08-18 18:24:49 +02:00
self . db . read_with_cache ( db ::COL_EXTRA , & self . block_details , parent ) . map_or ( false , | d | d . children . contains ( hash ) )
2016-07-17 09:18:15 +02:00
}
2016-07-17 23:03:29 +02:00
/// Rewind to a previous block
2016-07-25 10:21:02 +02:00
#[ cfg(test) ]
fn rewind ( & self ) -> Option < H256 > {
use db ::Key ;
2016-08-25 16:43:56 +02:00
let mut batch = self . db . transaction ( ) ;
2016-07-17 23:03:29 +02:00
// track back to the best block we have in the blocks database
2016-08-18 18:24:49 +02:00
if let Some ( best_block_hash ) = self . db . get ( db ::COL_EXTRA , b " best " ) . unwrap ( ) {
2016-07-17 23:03:29 +02:00
let best_block_hash = H256 ::from_slice ( & best_block_hash ) ;
if best_block_hash = = self . genesis_hash ( ) {
return None ;
}
2016-08-18 18:24:49 +02:00
if let Some ( extras ) = self . db . read ( db ::COL_EXTRA , & best_block_hash ) as Option < BlockDetails > {
2016-07-17 23:03:29 +02:00
type DetailsKey = Key < BlockDetails , Target = H264 > ;
2016-08-18 18:24:49 +02:00
batch . delete ( db ::COL_EXTRA , & ( DetailsKey ::key ( & best_block_hash ) ) ) ;
2016-07-17 23:03:29 +02:00
let hash = extras . parent ;
let range = extras . number as bc ::Number .. extras . number as bc ::Number ;
let chain = bc ::group ::BloomGroupChain ::new ( self . blooms_config , self ) ;
let changes = chain . replace ( & range , vec! [ ] ) ;
2016-10-27 08:28:12 +02:00
for ( k , v ) in changes {
2016-08-18 18:24:49 +02:00
batch . write ( db ::COL_EXTRA , & LogGroupPosition ::from ( k ) , & BloomGroup ::from ( v ) ) ;
2016-07-17 23:03:29 +02:00
}
2016-08-18 18:24:49 +02:00
batch . put ( db ::COL_EXTRA , b " best " , & hash ) ;
2016-07-28 23:46:24 +02:00
let best_block_total_difficulty = self . block_details ( & hash ) . unwrap ( ) . total_difficulty ;
2016-12-28 13:44:51 +01:00
let best_block_rlp = self . block ( & hash ) . unwrap ( ) . into_inner ( ) ;
2016-07-28 23:46:24 +02:00
2016-07-17 23:03:29 +02:00
let mut best_block = self . best_block . write ( ) ;
2016-07-28 23:46:24 +02:00
* best_block = BestBlock {
number : extras . number - 1 ,
total_difficulty : best_block_total_difficulty ,
hash : hash ,
2017-02-03 19:32:10 +01:00
timestamp : BlockView ::new ( & best_block_rlp ) . header ( ) . timestamp ( ) ,
2016-07-28 23:46:24 +02:00
block : best_block_rlp ,
} ;
2016-07-17 23:03:29 +02:00
// update parent extras
2016-08-18 18:24:49 +02:00
if let Some ( mut details ) = self . db . read ( db ::COL_EXTRA , & hash ) as Option < BlockDetails > {
2016-07-17 23:03:29 +02:00
details . children . clear ( ) ;
2016-08-18 18:24:49 +02:00
batch . write ( db ::COL_EXTRA , & hash , & details ) ;
2016-07-17 23:03:29 +02:00
}
2016-07-28 23:46:24 +02:00
self . db . write ( batch ) . expect ( " Writing to db failed " ) ;
2016-07-17 23:03:29 +02:00
self . block_details . write ( ) . clear ( ) ;
self . block_hashes . write ( ) . clear ( ) ;
2016-07-28 23:46:24 +02:00
self . block_headers . write ( ) . clear ( ) ;
self . block_bodies . write ( ) . clear ( ) ;
2016-07-17 23:03:29 +02:00
self . block_receipts . write ( ) . clear ( ) ;
return Some ( hash ) ;
}
}
2016-07-19 09:25:51 +02:00
None
2016-07-17 23:03:29 +02:00
}
2015-12-17 15:11:42 +01:00
/// Returns a tree route between `from` and `to`, which is a tuple of:
2015-12-26 15:47:07 +01:00
///
2015-12-17 15:11:42 +01:00
/// - a vector of hashes of all blocks, ordered from `from` to `to`.
2015-12-17 17:20:10 +01:00
///
2015-12-17 15:11:42 +01:00
/// - common ancestor of these blocks.
2015-12-17 17:20:10 +01:00
///
2015-12-17 15:11:42 +01:00
/// - an index where best common ancestor would be
2015-12-26 15:47:07 +01:00
///
2015-12-17 17:20:10 +01:00
/// 1.) from newer to older
2015-12-26 15:47:07 +01:00
///
2015-12-17 17:20:10 +01:00
/// - bc: `A1 -> A2 -> A3 -> A4 -> A5`
/// - from: A5, to: A4
2015-12-26 15:47:07 +01:00
/// - route:
2015-12-17 17:20:10 +01:00
///
/// ```json
/// { blocks: [A5], ancestor: A4, index: 1 }
/// ```
2015-12-26 15:47:07 +01:00
///
2015-12-17 17:20:10 +01:00
/// 2.) from older to newer
2015-12-26 15:47:07 +01:00
///
2015-12-17 17:20:10 +01:00
/// - bc: `A1 -> A2 -> A3 -> A4 -> A5`
/// - from: A3, to: A4
2015-12-26 15:47:07 +01:00
/// - route:
///
2015-12-17 17:20:10 +01:00
/// ```json
/// { blocks: [A4], ancestor: A3, index: 0 }
/// ```
2015-12-17 15:11:42 +01:00
///
/// 3.) fork:
///
2015-12-26 15:47:07 +01:00
/// - bc:
2015-12-17 17:20:10 +01:00
///
/// ```text
/// A1 -> A2 -> A3 -> A4
2015-12-17 15:11:42 +01:00
/// -> B3 -> B4
2015-12-26 15:47:07 +01:00
/// ```
2015-12-17 17:20:10 +01:00
/// - from: B4, to: A4
2015-12-26 15:47:07 +01:00
/// - route:
///
2015-12-17 17:20:10 +01:00
/// ```json
/// { blocks: [B4, B3, A3, A4], ancestor: A2, index: 2 }
/// ```
2017-04-20 16:21:53 +02:00
///
/// If the tree route verges into pruned or unknown blocks,
/// `None` is returned.
pub fn tree_route ( & self , from : H256 , to : H256 ) -> Option < TreeRoute > {
macro_rules ! otry {
( $e :expr ) = > { match $e { Some ( x ) = > x , None = > return None } }
}
2015-12-17 15:11:42 +01:00
let mut from_branch = vec! [ ] ;
let mut to_branch = vec! [ ] ;
2017-04-20 16:21:53 +02:00
let mut from_details = otry! ( self . block_details ( & from ) ) ;
let mut to_details = otry! ( self . block_details ( & to ) ) ;
2016-02-27 01:37:12 +01:00
let mut current_from = from ;
let mut current_to = to ;
2015-12-17 15:11:42 +01:00
// reset from && to to the same level
while from_details . number > to_details . number {
from_branch . push ( current_from ) ;
current_from = from_details . parent . clone ( ) ;
2017-04-20 16:21:53 +02:00
from_details = otry! ( self . block_details ( & from_details . parent ) ) ;
2015-12-17 15:11:42 +01:00
}
while to_details . number > from_details . number {
to_branch . push ( current_to ) ;
current_to = to_details . parent . clone ( ) ;
2017-04-20 16:21:53 +02:00
to_details = otry! ( self . block_details ( & to_details . parent ) ) ;
2015-12-17 15:11:42 +01:00
}
assert_eq! ( from_details . number , to_details . number ) ;
// move to shared parent
2015-12-18 11:34:55 +01:00
while current_from ! = current_to {
2015-12-17 15:11:42 +01:00
from_branch . push ( current_from ) ;
current_from = from_details . parent . clone ( ) ;
2017-04-20 16:21:53 +02:00
from_details = otry! ( self . block_details ( & from_details . parent ) ) ;
2015-12-17 15:11:42 +01:00
to_branch . push ( current_to ) ;
current_to = to_details . parent . clone ( ) ;
2017-04-20 16:21:53 +02:00
to_details = otry! ( self . block_details ( & to_details . parent ) ) ;
2015-12-17 15:11:42 +01:00
}
let index = from_branch . len ( ) ;
2015-12-17 20:37:04 +01:00
from_branch . extend ( to_branch . into_iter ( ) . rev ( ) ) ;
2015-12-17 15:11:42 +01:00
2017-04-20 16:21:53 +02:00
Some ( TreeRoute {
2015-12-17 15:11:42 +01:00
blocks : from_branch ,
2015-12-21 16:31:51 +01:00
ancestor : current_from ,
2015-12-17 15:11:42 +01:00
index : index
2017-04-20 16:21:53 +02:00
} )
2015-12-17 15:11:42 +01:00
}
2016-08-05 17:00:46 +02:00
/// Inserts a verified, known block from the canonical chain.
///
/// Can be performed out-of-order, but care must be taken that the final chain is in a correct state.
2016-10-18 18:16:00 +02:00
/// This is used by snapshot restoration and when downloading missing blocks for the chain gap.
/// `is_best` forces the best block to be updated to this block.
/// `is_ancient` forces the best block of the first block sequence to be updated to this block.
2016-08-05 17:00:46 +02:00
/// Supply a dummy parent total difficulty when the parent block may not be in the chain.
/// Returns true if the block is disconnected.
2016-10-18 18:16:00 +02:00
pub fn insert_unordered_block ( & self , batch : & mut DBTransaction , bytes : & [ u8 ] , receipts : Vec < Receipt > , parent_td : Option < U256 > , is_best : bool , is_ancient : bool ) -> bool {
2016-08-05 17:00:46 +02:00
let block = BlockView ::new ( bytes ) ;
let header = block . header_view ( ) ;
let hash = header . sha3 ( ) ;
if self . is_known ( & hash ) {
return false ;
}
assert! ( self . pending_best_block . read ( ) . is_none ( ) ) ;
let block_rlp = UntrustedRlp ::new ( bytes ) ;
let compressed_header = block_rlp . at ( 0 ) . unwrap ( ) . compress ( RlpType ::Blocks ) ;
let compressed_body = UntrustedRlp ::new ( & Self ::block_to_body ( bytes ) ) . compress ( RlpType ::Blocks ) ;
// store block in db
2016-08-18 18:24:49 +02:00
batch . put ( db ::COL_HEADERS , & hash , & compressed_header ) ;
batch . put ( db ::COL_BODIES , & hash , & compressed_body ) ;
2016-08-05 17:00:46 +02:00
let maybe_parent = self . block_details ( & header . parent_hash ( ) ) ;
if let Some ( parent_details ) = maybe_parent {
// parent known to be in chain.
let info = BlockInfo {
2016-10-18 18:16:00 +02:00
hash : hash . clone ( ) ,
2016-08-05 17:00:46 +02:00
number : header . number ( ) ,
total_difficulty : parent_details . total_difficulty + header . difficulty ( ) ,
location : BlockLocation ::CanonChain ,
} ;
2016-10-18 18:16:00 +02:00
self . prepare_update ( batch , ExtrasUpdate {
2016-08-05 17:00:46 +02:00
block_hashes : self . prepare_block_hashes_update ( bytes , & info ) ,
block_details : self . prepare_block_details_update ( bytes , & info ) ,
block_receipts : self . prepare_block_receipts_update ( receipts , & info ) ,
blocks_blooms : self . prepare_block_blooms_update ( bytes , & info ) ,
2016-09-28 15:49:42 +02:00
transactions_addresses : self . prepare_transaction_addresses_update ( bytes , & info ) ,
2016-08-05 17:00:46 +02:00
info : info ,
2017-02-03 19:32:10 +01:00
timestamp : header . timestamp ( ) ,
2016-08-05 17:00:46 +02:00
block : bytes
} , is_best ) ;
2016-10-18 18:16:00 +02:00
if is_ancient {
let mut best_ancient_block = self . best_ancient_block . write ( ) ;
let ancient_number = best_ancient_block . as_ref ( ) . map_or ( 0 , | b | b . number ) ;
if self . block_hash ( header . number ( ) + 1 ) . is_some ( ) {
batch . delete ( db ::COL_EXTRA , b " ancient " ) ;
* best_ancient_block = None ;
} else if header . number ( ) > ancient_number {
batch . put ( db ::COL_EXTRA , b " ancient " , & hash ) ;
* best_ancient_block = Some ( BestAncientBlock {
hash : hash ,
number : header . number ( ) ,
} ) ;
}
}
2016-08-05 17:00:46 +02:00
false
} else {
// parent not in the chain yet. we need the parent difficulty to proceed.
let d = parent_td
. expect ( " parent total difficulty always supplied for first block in chunk. only first block can have missing parent; qed " ) ;
let info = BlockInfo {
hash : hash ,
number : header . number ( ) ,
total_difficulty : d + header . difficulty ( ) ,
location : BlockLocation ::CanonChain ,
} ;
let block_details = BlockDetails {
number : header . number ( ) ,
total_difficulty : info . total_difficulty ,
parent : header . parent_hash ( ) ,
children : Vec ::new ( ) ,
} ;
let mut update = HashMap ::new ( ) ;
update . insert ( hash , block_details ) ;
2016-10-18 18:16:00 +02:00
self . prepare_update ( batch , ExtrasUpdate {
2016-08-05 17:00:46 +02:00
block_hashes : self . prepare_block_hashes_update ( bytes , & info ) ,
block_details : update ,
block_receipts : self . prepare_block_receipts_update ( receipts , & info ) ,
blocks_blooms : self . prepare_block_blooms_update ( bytes , & info ) ,
2016-09-28 15:49:42 +02:00
transactions_addresses : self . prepare_transaction_addresses_update ( bytes , & info ) ,
2016-08-05 17:00:46 +02:00
info : info ,
2017-02-03 19:32:10 +01:00
timestamp : header . timestamp ( ) ,
2016-08-05 17:00:46 +02:00
block : bytes ,
} , is_best ) ;
true
}
}
2017-04-19 14:58:19 +02:00
/// Insert an epoch transition. Provide an epoch number being transitioned to
/// and epoch transition object.
///
/// The block the transition occurred at should have already been inserted into the chain.
pub fn insert_epoch_transition ( & self , batch : & mut DBTransaction , epoch_num : u64 , transition : EpochTransition ) {
let mut transitions = match self . db . read ( db ::COL_EXTRA , & epoch_num ) {
Some ( existing ) = > existing ,
None = > EpochTransitions {
number : epoch_num ,
candidates : Vec ::with_capacity ( 1 ) ,
}
} ;
2017-04-19 15:35:12 +02:00
// ensure we don't write any duplicates.
if transitions . candidates . iter ( ) . find ( | c | c . block_hash = = transition . block_hash ) . is_none ( ) {
transitions . candidates . push ( transition ) ;
batch . write ( db ::COL_EXTRA , & epoch_num , & transitions ) ;
}
2017-04-19 14:58:19 +02:00
}
2017-04-19 16:27:45 +02:00
/// Iterate over all epoch transitions.
pub fn epoch_transitions ( & self ) -> EpochTransitionIter {
let iter = self . db . iter_from_prefix ( db ::COL_EXTRA , & EPOCH_KEY_PREFIX [ .. ] ) ;
EpochTransitionIter {
chain : self ,
prefix_iter : iter ,
}
}
2016-08-05 17:00:46 +02:00
/// Add a child to a given block. Assumes that the block hash is in
/// the chain and the child's parent is this block.
///
/// Used in snapshots to glue the chunks together at the end.
2016-10-28 16:10:30 +02:00
pub fn add_child ( & self , batch : & mut DBTransaction , block_hash : H256 , child_hash : H256 ) {
2016-08-05 17:00:46 +02:00
let mut parent_details = self . block_details ( & block_hash )
. unwrap_or_else ( | | panic! ( " Invalid block hash: {:?} " , block_hash ) ) ;
parent_details . children . push ( child_hash ) ;
let mut update = HashMap ::new ( ) ;
update . insert ( block_hash , parent_details ) ;
let mut write_details = self . block_details . write ( ) ;
2016-08-18 18:24:49 +02:00
batch . extend_with_cache ( db ::COL_EXTRA , & mut * write_details , update , CacheUpdatePolicy ::Overwrite ) ;
2016-08-05 17:00:46 +02:00
2016-12-09 23:01:43 +01:00
self . cache_man . lock ( ) . note_used ( CacheId ::BlockDetails ( block_hash ) ) ;
2016-08-05 17:00:46 +02:00
}
2016-04-06 10:07:24 +02:00
#[ cfg_attr(feature= " dev " , allow(similar_names)) ]
2015-12-17 01:54:24 +01:00
/// Inserts the block into backing cache database.
/// Expects the block to be valid and already verified.
/// If the block is already known, does nothing.
2016-08-25 16:43:56 +02:00
pub fn insert_block ( & self , batch : & mut DBTransaction , bytes : & [ u8 ] , receipts : Vec < Receipt > ) -> ImportRoute {
2015-12-17 15:11:42 +01:00
// create views onto rlp
2015-12-17 01:54:24 +01:00
let block = BlockView ::new ( bytes ) ;
let header = block . header_view ( ) ;
2016-01-07 16:08:12 +01:00
let hash = header . sha3 ( ) ;
2015-12-17 01:54:24 +01:00
2016-07-17 09:18:15 +02:00
if self . is_known_child ( & header . parent_hash ( ) , & hash ) {
2016-03-09 21:55:23 +01:00
return ImportRoute ::none ( ) ;
2015-12-17 01:54:24 +01:00
}
2016-08-01 19:10:13 +02:00
assert! ( self . pending_best_block . read ( ) . is_none ( ) ) ;
2015-12-21 15:22:24 +01:00
// store block in db
2016-08-18 18:24:49 +02:00
batch . put_compressed ( db ::COL_HEADERS , & hash , block . header_rlp ( ) . as_raw ( ) . to_vec ( ) ) ;
batch . put_compressed ( db ::COL_BODIES , & hash , Self ::block_to_body ( bytes ) ) ;
2016-02-27 01:37:12 +01:00
2016-08-03 22:03:40 +02:00
let info = self . block_info ( & header ) ;
2016-02-27 01:37:12 +01:00
2016-07-26 00:20:37 +02:00
if let BlockLocation ::BranchBecomingCanonChain ( ref d ) = info . location {
2016-07-28 23:45:56 +02:00
info! ( target : " reorg " , " Reorg to {} ({} {} {}) " ,
Colour ::Yellow . bold ( ) . paint ( format! ( " # {} {} " , info . number , info . hash ) ) ,
2016-07-29 00:22:46 +02:00
Colour ::Red . paint ( d . retracted . iter ( ) . join ( " " ) ) ,
Colour ::White . paint ( format! ( " # {} {} " , self . block_details ( & d . ancestor ) . expect ( " `ancestor` is in the route; qed " ) . number , d . ancestor ) ) ,
Colour ::Green . paint ( d . enacted . iter ( ) . join ( " " ) )
2016-07-28 23:45:56 +02:00
) ;
2016-07-26 00:20:37 +02:00
}
2016-08-01 19:10:13 +02:00
self . prepare_update ( batch , ExtrasUpdate {
2016-02-27 10:19:33 +01:00
block_hashes : self . prepare_block_hashes_update ( bytes , & info ) ,
block_details : self . prepare_block_details_update ( bytes , & info ) ,
block_receipts : self . prepare_block_receipts_update ( receipts , & info ) ,
blocks_blooms : self . prepare_block_blooms_update ( bytes , & info ) ,
2016-09-28 15:49:42 +02:00
transactions_addresses : self . prepare_transaction_addresses_update ( bytes , & info ) ,
2016-03-09 21:55:23 +01:00
info : info . clone ( ) ,
2017-02-03 19:32:10 +01:00
timestamp : header . timestamp ( ) ,
2016-07-28 23:46:24 +02:00
block : bytes ,
2016-08-05 17:00:46 +02:00
} , true ) ;
2016-03-09 21:55:23 +01:00
ImportRoute ::from ( info )
2016-02-16 14:46:21 +01:00
}
2015-12-21 15:22:24 +01:00
2016-07-26 00:20:37 +02:00
/// Get inserted block info which is critical to prepare extras updates.
2016-08-03 22:03:40 +02:00
fn block_info ( & self , header : & HeaderView ) -> BlockInfo {
let hash = header . sha3 ( ) ;
2016-07-26 00:20:37 +02:00
let number = header . number ( ) ;
let parent_hash = header . parent_hash ( ) ;
let parent_details = self . block_details ( & parent_hash ) . unwrap_or_else ( | | panic! ( " Invalid parent hash: {:?} " , parent_hash ) ) ;
2017-01-23 15:27:11 +01:00
let is_new_best = parent_details . total_difficulty + header . difficulty ( ) > self . best_block_total_difficulty ( ) ;
2016-07-26 00:20:37 +02:00
BlockInfo {
hash : hash ,
number : number ,
2016-12-05 16:20:32 +01:00
total_difficulty : parent_details . total_difficulty + header . difficulty ( ) ,
2016-07-26 00:20:37 +02:00
location : if is_new_best {
// on new best block we need to make sure that all ancestors
// are moved to "canon chain"
// find the route between old best block and the new one
let best_hash = self . best_block_hash ( ) ;
2017-04-20 16:21:53 +02:00
let route = self . tree_route ( best_hash , parent_hash )
. expect ( " blocks being imported always within recent history; qed " ) ;
2016-07-26 00:20:37 +02:00
assert_eq! ( number , parent_details . number + 1 ) ;
match route . blocks . len ( ) {
0 = > BlockLocation ::CanonChain ,
_ = > {
let retracted = route . blocks . iter ( ) . take ( route . index ) . cloned ( ) . collect ::< Vec < _ > > ( ) . into_iter ( ) . collect ::< Vec < _ > > ( ) ;
let enacted = route . blocks . into_iter ( ) . skip ( route . index ) . collect ::< Vec < _ > > ( ) ;
BlockLocation ::BranchBecomingCanonChain ( BranchBecomingCanonChainData {
ancestor : route . ancestor ,
enacted : enacted ,
retracted : retracted ,
} )
}
}
} else {
BlockLocation ::Branch
}
}
}
2016-08-01 19:10:13 +02:00
/// Prepares extras update.
2016-08-25 16:43:56 +02:00
fn prepare_update ( & self , batch : & mut DBTransaction , update : ExtrasUpdate , is_best : bool ) {
2016-03-10 21:01:17 +01:00
{
2016-07-13 19:59:59 +02:00
let mut write_receipts = self . block_receipts . write ( ) ;
2016-08-18 18:24:49 +02:00
batch . extend_with_cache ( db ::COL_EXTRA , & mut * write_receipts , update . block_receipts , CacheUpdatePolicy ::Remove ) ;
2016-03-10 21:01:17 +01:00
}
{
2016-07-13 19:59:59 +02:00
let mut write_blocks_blooms = self . blocks_blooms . write ( ) ;
2016-08-18 18:24:49 +02:00
batch . extend_with_cache ( db ::COL_EXTRA , & mut * write_blocks_blooms , update . blocks_blooms , CacheUpdatePolicy ::Remove ) ;
2016-03-10 21:01:17 +01:00
}
2016-10-27 15:26:29 +02:00
// These cached values must be updated last with all four locks taken to avoid
2016-07-17 23:03:29 +02:00
// cache decoherence
2016-02-29 19:49:29 +01:00
{
2016-08-01 19:10:13 +02:00
let mut best_block = self . pending_best_block . write ( ) ;
2016-02-29 19:49:29 +01:00
// update best block
match update . info . location {
BlockLocation ::Branch = > ( ) ,
2016-08-05 17:00:46 +02:00
_ = > if is_best {
2016-08-18 18:24:49 +02:00
batch . put ( db ::COL_EXTRA , b " best " , & update . info . hash ) ;
2016-08-01 19:10:13 +02:00
* best_block = Some ( BestBlock {
2016-02-29 19:49:29 +01:00
hash : update . info . hash ,
number : update . info . number ,
2016-07-28 23:46:24 +02:00
total_difficulty : update . info . total_difficulty ,
2017-02-03 19:32:10 +01:00
timestamp : update . timestamp ,
2016-07-28 23:46:24 +02:00
block : update . block . to_vec ( ) ,
2016-08-01 19:10:13 +02:00
} ) ;
2016-08-05 17:00:46 +02:00
} ,
2016-02-22 00:36:59 +01:00
}
2016-08-01 19:10:13 +02:00
let mut write_hashes = self . pending_block_hashes . write ( ) ;
2016-10-27 15:26:29 +02:00
let mut write_details = self . pending_block_details . write ( ) ;
2016-08-01 19:10:13 +02:00
let mut write_txs = self . pending_transaction_addresses . write ( ) ;
2016-07-14 19:16:01 +02:00
2016-10-27 15:26:29 +02:00
batch . extend_with_cache ( db ::COL_EXTRA , & mut * write_details , update . block_details , CacheUpdatePolicy ::Overwrite ) ;
2016-08-18 18:24:49 +02:00
batch . extend_with_cache ( db ::COL_EXTRA , & mut * write_hashes , update . block_hashes , CacheUpdatePolicy ::Overwrite ) ;
2016-09-28 15:49:42 +02:00
batch . extend_with_option_cache ( db ::COL_EXTRA , & mut * write_txs , update . transactions_addresses , CacheUpdatePolicy ::Overwrite ) ;
2016-03-15 10:59:58 +01:00
}
2015-12-21 15:22:24 +01:00
}
2016-08-31 16:55:43 +02:00
/// Apply pending insertion updates
2016-08-01 19:10:13 +02:00
pub fn commit ( & self ) {
let mut pending_best_block = self . pending_best_block . write ( ) ;
let mut pending_write_hashes = self . pending_block_hashes . write ( ) ;
2016-10-27 15:26:29 +02:00
let mut pending_block_details = self . pending_block_details . write ( ) ;
2016-08-01 19:10:13 +02:00
let mut pending_write_txs = self . pending_transaction_addresses . write ( ) ;
2016-08-08 13:47:00 +02:00
let mut best_block = self . best_block . write ( ) ;
2016-10-27 15:26:29 +02:00
let mut write_block_details = self . block_details . write ( ) ;
2016-08-08 13:47:00 +02:00
let mut write_hashes = self . block_hashes . write ( ) ;
let mut write_txs = self . transaction_addresses . write ( ) ;
2016-08-01 19:10:13 +02:00
// update best block
if let Some ( block ) = pending_best_block . take ( ) {
* best_block = block ;
}
2016-09-28 15:49:42 +02:00
let pending_txs = mem ::replace ( & mut * pending_write_txs , HashMap ::new ( ) ) ;
let ( retracted_txs , enacted_txs ) = pending_txs . into_iter ( ) . partition ::< HashMap < _ , _ > , _ > ( | & ( _ , ref value ) | value . is_none ( ) ) ;
2016-08-08 16:14:37 +02:00
let pending_hashes_keys : Vec < _ > = pending_write_hashes . keys ( ) . cloned ( ) . collect ( ) ;
2016-09-28 15:49:42 +02:00
let enacted_txs_keys : Vec < _ > = enacted_txs . keys ( ) . cloned ( ) . collect ( ) ;
2016-10-27 15:26:29 +02:00
let pending_block_hashes : Vec < _ > = pending_block_details . keys ( ) . cloned ( ) . collect ( ) ;
2016-08-08 16:14:37 +02:00
2016-08-01 19:10:13 +02:00
write_hashes . extend ( mem ::replace ( & mut * pending_write_hashes , HashMap ::new ( ) ) ) ;
2016-09-28 15:49:42 +02:00
write_txs . extend ( enacted_txs . into_iter ( ) . map ( | ( k , v ) | ( k , v . expect ( " Transactions were partitioned; qed " ) ) ) ) ;
2016-10-27 15:26:29 +02:00
write_block_details . extend ( mem ::replace ( & mut * pending_block_details , HashMap ::new ( ) ) ) ;
2016-09-28 15:49:42 +02:00
for hash in retracted_txs . keys ( ) {
write_txs . remove ( hash ) ;
}
2016-08-08 16:14:37 +02:00
2016-08-08 16:25:48 +02:00
let mut cache_man = self . cache_man . lock ( ) ;
for n in pending_hashes_keys {
2016-12-09 23:01:43 +01:00
cache_man . note_used ( CacheId ::BlockHashes ( n ) ) ;
2016-08-08 16:14:37 +02:00
}
2016-09-28 15:49:42 +02:00
for hash in enacted_txs_keys {
2016-12-09 23:01:43 +01:00
cache_man . note_used ( CacheId ::TransactionAddresses ( hash ) ) ;
2016-08-08 16:14:37 +02:00
}
2016-10-27 15:26:29 +02:00
for hash in pending_block_hashes {
2016-12-09 23:01:43 +01:00
cache_man . note_used ( CacheId ::BlockDetails ( hash ) ) ;
2016-10-27 15:26:29 +02:00
}
2016-08-01 19:10:13 +02:00
}
2016-03-02 18:32:54 +01:00
/// Iterator that lists `first` and then all of `first`'s ancestors, by hash.
2016-03-02 18:05:47 +01:00
pub fn ancestry_iter ( & self , first : H256 ) -> Option < AncestryIter > {
2016-03-02 18:32:54 +01:00
if self . is_known ( & first ) {
Some ( AncestryIter {
current : first ,
2016-07-26 20:31:25 +02:00
chain : self ,
2016-03-02 18:32:54 +01:00
} )
} else {
None
2016-03-02 17:04:44 +01:00
}
}
2016-03-02 19:38:00 +01:00
/// Given a block's `parent`, find every block header which represents a valid possible uncle.
pub fn find_uncle_headers ( & self , parent : & H256 , uncle_generations : usize ) -> Option < Vec < Header > > {
2016-05-24 21:56:17 +02:00
self . find_uncle_hashes ( parent , uncle_generations ) . map ( | v | v . into_iter ( ) . filter_map ( | h | self . block_header ( & h ) ) . collect ( ) )
}
/// Given a block's `parent`, find every block hash which represents a valid possible uncle.
pub fn find_uncle_hashes ( & self , parent : & H256 , uncle_generations : usize ) -> Option < Vec < H256 > > {
2016-03-02 18:32:54 +01:00
if ! self . is_known ( parent ) { return None ; }
2016-03-02 19:38:00 +01:00
let mut excluded = HashSet ::new ( ) ;
2016-10-24 18:27:23 +02:00
let ancestry = match self . ancestry_iter ( parent . clone ( ) ) {
Some ( iter ) = > iter ,
None = > return None ,
} ;
for a in ancestry . clone ( ) . take ( uncle_generations ) {
if let Some ( uncles ) = self . uncle_hashes ( & a ) {
excluded . extend ( uncles ) ;
excluded . insert ( a ) ;
} else {
break
}
2016-03-02 19:38:00 +01:00
}
let mut ret = Vec ::new ( ) ;
2016-10-24 18:27:23 +02:00
for a in ancestry . skip ( 1 ) . take ( uncle_generations ) {
if let Some ( details ) = self . block_details ( & a ) {
ret . extend ( details . children . iter ( ) . filter ( | h | ! excluded . contains ( h ) ) )
} else {
break
}
2016-03-02 18:32:54 +01:00
}
2016-10-24 18:27:23 +02:00
2016-03-02 19:38:00 +01:00
Some ( ret )
2016-03-01 19:59:12 +01:00
}
2016-02-27 10:19:33 +01:00
/// This function returns modified block hashes.
fn prepare_block_hashes_update ( & self , block_bytes : & [ u8 ] , info : & BlockInfo ) -> HashMap < BlockNumber , H256 > {
let mut block_hashes = HashMap ::new ( ) ;
2016-02-27 01:37:12 +01:00
let block = BlockView ::new ( block_bytes ) ;
let header = block . header_view ( ) ;
let number = header . number ( ) ;
match info . location {
2016-02-27 10:19:33 +01:00
BlockLocation ::Branch = > ( ) ,
2016-02-27 01:37:12 +01:00
BlockLocation ::CanonChain = > {
2016-02-27 10:19:33 +01:00
block_hashes . insert ( number , info . hash . clone ( ) ) ;
2016-02-27 01:37:12 +01:00
} ,
2016-04-17 17:18:25 +02:00
BlockLocation ::BranchBecomingCanonChain ( ref data ) = > {
2016-07-28 23:46:24 +02:00
let ancestor_number = self . block_number ( & data . ancestor ) . expect ( " Block number of ancestor is always in DB " ) ;
2016-02-27 01:37:12 +01:00
let start_number = ancestor_number + 1 ;
2016-04-17 17:18:25 +02:00
for ( index , hash ) in data . enacted . iter ( ) . cloned ( ) . enumerate ( ) {
2016-02-27 10:19:33 +01:00
block_hashes . insert ( start_number + index as BlockNumber , hash ) ;
2016-02-27 01:37:12 +01:00
}
2016-02-27 10:19:33 +01:00
block_hashes . insert ( number , info . hash . clone ( ) ) ;
2016-02-27 01:37:12 +01:00
}
2016-02-27 10:19:33 +01:00
}
block_hashes
2016-02-27 01:37:12 +01:00
}
2016-02-27 10:19:33 +01:00
/// This function returns modified block details.
2016-08-05 17:00:46 +02:00
/// Uses the given parent details or attempts to load them from the database.
2016-02-27 10:19:33 +01:00
fn prepare_block_details_update ( & self , block_bytes : & [ u8 ] , info : & BlockInfo ) -> HashMap < H256 , BlockDetails > {
2016-02-27 01:37:12 +01:00
let block = BlockView ::new ( block_bytes ) ;
let header = block . header_view ( ) ;
2015-12-17 17:20:10 +01:00
let parent_hash = header . parent_hash ( ) ;
2015-12-17 15:11:42 +01:00
2016-02-27 01:37:12 +01:00
// update parent
2016-07-19 09:23:53 +02:00
let mut parent_details = self . block_details ( & parent_hash ) . unwrap_or_else ( | | panic! ( " Invalid parent hash: {:?} " , parent_hash ) ) ;
2016-02-27 01:37:12 +01:00
parent_details . children . push ( info . hash . clone ( ) ) ;
2015-12-17 15:11:42 +01:00
// create current block details
let details = BlockDetails {
number : header . number ( ) ,
2016-02-27 01:37:12 +01:00
total_difficulty : info . total_difficulty ,
2015-12-17 17:20:10 +01:00
parent : parent_hash . clone ( ) ,
2015-12-17 15:11:42 +01:00
children : vec ! [ ]
} ;
2015-12-26 15:47:07 +01:00
2016-02-27 01:37:12 +01:00
// write to batch
2016-02-27 10:19:33 +01:00
let mut block_details = HashMap ::new ( ) ;
block_details . insert ( parent_hash , parent_details ) ;
block_details . insert ( info . hash . clone ( ) , details ) ;
block_details
2016-02-27 01:37:12 +01:00
}
2015-12-17 15:11:42 +01:00
2016-02-27 10:19:33 +01:00
/// This function returns modified block receipts.
fn prepare_block_receipts_update ( & self , receipts : Vec < Receipt > , info : & BlockInfo ) -> HashMap < H256 , BlockReceipts > {
let mut block_receipts = HashMap ::new ( ) ;
block_receipts . insert ( info . hash . clone ( ) , BlockReceipts ::new ( receipts ) ) ;
block_receipts
2016-02-27 01:37:12 +01:00
}
2015-12-21 15:22:24 +01:00
2016-02-27 10:19:33 +01:00
/// This function returns modified transaction addresses.
2016-09-28 15:49:42 +02:00
fn prepare_transaction_addresses_update ( & self , block_bytes : & [ u8 ] , info : & BlockInfo ) -> HashMap < H256 , Option < TransactionAddress > > {
2016-02-27 01:37:12 +01:00
let block = BlockView ::new ( block_bytes ) ;
2016-02-29 19:49:29 +01:00
let transaction_hashes = block . transaction_hashes ( ) ;
2015-12-17 15:11:42 +01:00
2016-08-31 16:55:43 +02:00
match info . location {
BlockLocation ::CanonChain = > {
transaction_hashes . into_iter ( )
. enumerate ( )
. map ( | ( i , tx_hash ) | {
2016-09-28 15:49:42 +02:00
( tx_hash , Some ( TransactionAddress {
2016-08-31 16:55:43 +02:00
block_hash : info . hash . clone ( ) ,
index : i
2016-09-28 15:49:42 +02:00
} ) )
2016-08-31 16:55:43 +02:00
} )
. collect ( )
} ,
BlockLocation ::BranchBecomingCanonChain ( ref data ) = > {
let addresses = data . enacted . iter ( )
2016-08-31 17:36:49 +02:00
. flat_map ( | hash | {
2016-12-28 13:44:51 +01:00
let body = self . block_body ( hash ) . expect ( " Enacted block must be in database. " ) ;
let hashes = body . transaction_hashes ( ) ;
2016-08-31 16:55:43 +02:00
hashes . into_iter ( )
. enumerate ( )
2016-09-28 15:49:42 +02:00
. map ( | ( i , tx_hash ) | ( tx_hash , Some ( TransactionAddress {
2016-08-31 16:55:43 +02:00
block_hash : hash . clone ( ) ,
index : i ,
2016-09-28 15:49:42 +02:00
} ) ) )
. collect ::< HashMap < H256 , Option < TransactionAddress > > > ( )
2016-08-31 16:55:43 +02:00
} ) ;
let current_addresses = transaction_hashes . into_iter ( )
. enumerate ( )
. map ( | ( i , tx_hash ) | {
2016-09-28 15:49:42 +02:00
( tx_hash , Some ( TransactionAddress {
2016-08-31 16:55:43 +02:00
block_hash : info . hash . clone ( ) ,
index : i
2016-09-28 15:49:42 +02:00
} ) )
2016-08-31 16:55:43 +02:00
} ) ;
2016-09-28 15:49:42 +02:00
let retracted = data . retracted . iter ( ) . flat_map ( | hash | {
2016-12-28 13:44:51 +01:00
let body = self . block_body ( hash ) . expect ( " Retracted block must be in database. " ) ;
let hashes = body . transaction_hashes ( ) ;
2016-09-28 15:49:42 +02:00
hashes . into_iter ( ) . map ( | hash | ( hash , None ) ) . collect ::< HashMap < H256 , Option < TransactionAddress > > > ( )
} ) ;
// The order here is important! Don't remove transaction if it was part of enacted blocks as well.
retracted . chain ( addresses ) . chain ( current_addresses ) . collect ( )
2016-08-31 16:55:43 +02:00
} ,
BlockLocation ::Branch = > HashMap ::new ( ) ,
}
2016-02-27 01:37:12 +01:00
}
2016-02-12 14:03:23 +01:00
2016-02-27 10:19:33 +01:00
/// This functions returns modified blocks blooms.
2016-02-27 01:37:12 +01:00
///
2016-03-01 13:44:09 +01:00
/// To accelerate blooms lookups, blomms are stored in multiple
/// layers (BLOOM_LEVELS, currently 3).
2016-02-27 19:17:29 +01:00
/// ChainFilter is responsible for building and rebuilding these layers.
/// It returns them in HashMap, where values are Blooms and
/// keys are BloomIndexes. BloomIndex represents bloom location on one
/// of these layers.
2016-03-01 13:44:09 +01:00
///
2016-02-27 19:17:29 +01:00
/// To reduce number of queries to databse, block blooms are stored
2016-03-01 13:44:09 +01:00
/// in BlocksBlooms structure which contains info about several
2016-02-27 19:17:29 +01:00
/// (BLOOM_INDEX_SIZE, currently 16) consecutive blocks blooms.
2016-03-01 13:44:09 +01:00
///
2016-02-27 19:17:29 +01:00
/// Later, BloomIndexer is used to map bloom location on filter layer (BloomIndex)
/// to bloom location in database (BlocksBloomLocation).
2016-03-01 13:44:09 +01:00
///
2016-05-26 18:24:51 +02:00
fn prepare_block_blooms_update ( & self , block_bytes : & [ u8 ] , info : & BlockInfo ) -> HashMap < LogGroupPosition , BloomGroup > {
2016-02-27 01:37:12 +01:00
let block = BlockView ::new ( block_bytes ) ;
let header = block . header_view ( ) ;
2016-02-12 02:03:04 +01:00
2016-05-26 18:24:51 +02:00
let log_blooms = match info . location {
2016-02-27 01:37:12 +01:00
BlockLocation ::Branch = > HashMap ::new ( ) ,
BlockLocation ::CanonChain = > {
2016-08-24 13:59:50 +02:00
let log_bloom = header . log_bloom ( ) ;
if log_bloom . is_zero ( ) {
HashMap ::new ( )
} else {
let chain = bc ::group ::BloomGroupChain ::new ( self . blooms_config , self ) ;
chain . insert ( info . number as bc ::Number , Bloom ::from ( log_bloom ) . into ( ) )
}
2016-02-12 02:03:04 +01:00
} ,
2016-04-17 17:18:25 +02:00
BlockLocation ::BranchBecomingCanonChain ( ref data ) = > {
let ancestor_number = self . block_number ( & data . ancestor ) . unwrap ( ) ;
2016-01-10 22:55:07 +01:00
let start_number = ancestor_number + 1 ;
2016-05-26 18:24:51 +02:00
let range = start_number as bc ::Number .. self . best_block_number ( ) as bc ::Number ;
2016-02-12 02:03:04 +01:00
2016-05-26 18:24:51 +02:00
let mut blooms : Vec < bc ::Bloom > = data . enacted . iter ( )
2016-07-28 23:46:24 +02:00
. map ( | hash | self . block_header_data ( hash ) . unwrap ( ) )
2016-12-28 13:44:51 +01:00
. map ( | h | h . log_bloom ( ) )
2016-05-26 18:24:51 +02:00
. map ( Bloom ::from )
. map ( Into ::into )
2016-02-12 14:03:23 +01:00
. collect ( ) ;
2016-05-26 18:24:51 +02:00
blooms . push ( Bloom ::from ( header . log_bloom ( ) ) . into ( ) ) ;
2015-12-17 15:11:42 +01:00
2016-05-26 18:24:51 +02:00
let chain = bc ::group ::BloomGroupChain ::new ( self . blooms_config , self ) ;
chain . replace ( & range , blooms )
2016-02-27 01:37:12 +01:00
}
} ;
2016-02-16 14:46:21 +01:00
2016-05-26 18:24:51 +02:00
log_blooms . into_iter ( )
. map ( | p | ( From ::from ( p . 0 ) , From ::from ( p . 1 ) ) )
. collect ( )
2015-12-14 17:12:47 +01:00
}
2015-12-17 17:20:10 +01:00
/// Get best block hash.
2015-12-17 15:11:42 +01:00
pub fn best_block_hash ( & self ) -> H256 {
2016-07-13 19:59:59 +02:00
self . best_block . read ( ) . hash . clone ( )
2015-12-17 15:11:42 +01:00
}
2015-12-17 17:20:10 +01:00
/// Get best block number.
2016-01-11 01:07:58 +01:00
pub fn best_block_number ( & self ) -> BlockNumber {
2016-07-13 19:59:59 +02:00
self . best_block . read ( ) . number
2015-12-17 15:11:42 +01:00
}
2017-02-03 19:32:10 +01:00
/// Get best block timestamp.
pub fn best_block_timestamp ( & self ) -> u64 {
self . best_block . read ( ) . timestamp
}
2015-12-17 17:20:10 +01:00
/// Get best block total difficulty.
2015-12-17 15:11:42 +01:00
pub fn best_block_total_difficulty ( & self ) -> U256 {
2016-07-13 19:59:59 +02:00
self . best_block . read ( ) . total_difficulty
2015-12-16 17:39:15 +01:00
}
2016-07-28 23:46:24 +02:00
/// Get best block header
2016-12-28 13:44:51 +01:00
pub fn best_block_header ( & self ) -> encoded ::Header {
2016-07-28 23:46:24 +02:00
let block = self . best_block . read ( ) ;
2016-12-28 13:44:51 +01:00
let raw = BlockView ::new ( & block . block ) . header_view ( ) . rlp ( ) . as_raw ( ) . to_vec ( ) ;
encoded ::Header ::new ( raw )
2016-07-28 23:46:24 +02:00
}
2015-12-17 17:20:10 +01:00
/// Get current cache size.
2015-12-16 17:39:15 +01:00
pub fn cache_size ( & self ) -> CacheSize {
CacheSize {
2016-07-28 23:46:24 +02:00
blocks : self . block_headers . read ( ) . heap_size_of_children ( ) + self . block_bodies . read ( ) . heap_size_of_children ( ) ,
2016-07-13 19:59:59 +02:00
block_details : self . block_details . read ( ) . heap_size_of_children ( ) ,
transaction_addresses : self . transaction_addresses . read ( ) . heap_size_of_children ( ) ,
blocks_blooms : self . blocks_blooms . read ( ) . heap_size_of_children ( ) ,
block_receipts : self . block_receipts . read ( ) . heap_size_of_children ( ) ,
2015-12-16 17:39:15 +01:00
}
}
2016-01-18 15:48:38 +01:00
/// Ticks our cache system and throws out any old data.
2016-02-02 01:59:14 +01:00
pub fn collect_garbage ( & self ) {
2016-08-08 16:14:37 +02:00
let current_size = self . cache_size ( ) . total ( ) ;
2016-07-31 00:19:27 +02:00
2016-08-08 16:14:37 +02:00
let mut block_headers = self . block_headers . write ( ) ;
let mut block_bodies = self . block_bodies . write ( ) ;
let mut block_details = self . block_details . write ( ) ;
let mut block_hashes = self . block_hashes . write ( ) ;
let mut transaction_addresses = self . transaction_addresses . write ( ) ;
let mut blocks_blooms = self . blocks_blooms . write ( ) ;
let mut block_receipts = self . block_receipts . write ( ) ;
2016-08-08 16:25:48 +02:00
let mut cache_man = self . cache_man . lock ( ) ;
2016-08-08 16:14:37 +02:00
cache_man . collect_garbage ( current_size , | ids | {
2016-07-31 00:19:27 +02:00
for id in & ids {
match * id {
2016-12-09 23:01:43 +01:00
CacheId ::BlockHeader ( ref h ) = > { block_headers . remove ( h ) ; } ,
CacheId ::BlockBody ( ref h ) = > { block_bodies . remove ( h ) ; } ,
CacheId ::BlockDetails ( ref h ) = > { block_details . remove ( h ) ; }
CacheId ::BlockHashes ( ref h ) = > { block_hashes . remove ( h ) ; }
CacheId ::TransactionAddresses ( ref h ) = > { transaction_addresses . remove ( h ) ; }
CacheId ::BlocksBlooms ( ref h ) = > { blocks_blooms . remove ( h ) ; }
CacheId ::BlockReceipts ( ref h ) = > { block_receipts . remove ( h ) ; }
2016-02-02 01:59:14 +01:00
}
2016-01-18 19:23:28 +01:00
}
2016-08-08 16:14:37 +02:00
2016-07-31 00:19:27 +02:00
block_headers . shrink_to_fit ( ) ;
block_bodies . shrink_to_fit ( ) ;
block_details . shrink_to_fit ( ) ;
block_hashes . shrink_to_fit ( ) ;
transaction_addresses . shrink_to_fit ( ) ;
blocks_blooms . shrink_to_fit ( ) ;
block_receipts . shrink_to_fit ( ) ;
2016-08-08 16:14:37 +02:00
block_headers . heap_size_of_children ( ) +
block_bodies . heap_size_of_children ( ) +
block_details . heap_size_of_children ( ) +
block_hashes . heap_size_of_children ( ) +
transaction_addresses . heap_size_of_children ( ) +
blocks_blooms . heap_size_of_children ( ) +
block_receipts . heap_size_of_children ( )
2016-07-31 00:19:27 +02:00
} ) ;
2016-01-18 15:48:38 +01:00
}
2016-07-28 23:46:24 +02:00
/// Create a block body from a block.
pub fn block_to_body ( block : & [ u8 ] ) -> Bytes {
let mut body = RlpStream ::new_list ( 2 ) ;
let block_rlp = Rlp ::new ( block ) ;
body . append_raw ( block_rlp . at ( 1 ) . as_raw ( ) , 1 ) ;
body . append_raw ( block_rlp . at ( 2 ) . as_raw ( ) , 1 ) ;
body . out ( )
}
2016-10-18 18:16:00 +02:00
/// Returns general blockchain information
pub fn chain_info ( & self ) -> BlockChainInfo {
// ensure data consistencly by locking everything first
let best_block = self . best_block . read ( ) ;
let best_ancient_block = self . best_ancient_block . read ( ) ;
BlockChainInfo {
total_difficulty : best_block . total_difficulty . clone ( ) ,
pending_total_difficulty : best_block . total_difficulty . clone ( ) ,
genesis_hash : self . genesis_hash ( ) ,
best_block_hash : best_block . hash . clone ( ) ,
best_block_number : best_block . number ,
2017-02-03 19:32:10 +01:00
best_block_timestamp : best_block . timestamp ,
2016-10-18 18:16:00 +02:00
first_block_hash : self . first_block ( ) ,
first_block_number : From ::from ( self . first_block_number ( ) ) ,
ancient_block_hash : best_ancient_block . as_ref ( ) . map ( | b | b . hash . clone ( ) ) ,
ancient_block_number : best_ancient_block . as_ref ( ) . map ( | b | b . number ) ,
}
}
2016-10-27 15:26:29 +02:00
#[ cfg(test) ]
2017-02-20 17:21:55 +01:00
pub fn db ( & self ) -> & Arc < KeyValueDB > {
2016-10-27 15:26:29 +02:00
& self . db
}
2015-12-09 19:03:25 +01:00
}
2015-12-13 22:39:01 +01:00
2015-12-17 17:20:10 +01:00
#[ cfg(test) ]
mod tests {
2016-04-06 10:07:24 +02:00
#![ cfg_attr(feature= " dev " , allow(similar_names)) ]
2016-07-28 23:46:24 +02:00
use std ::sync ::Arc ;
2015-12-17 17:20:10 +01:00
use rustc_serialize ::hex ::FromHex ;
2017-02-20 17:21:55 +01:00
use util ::kvdb ::KeyValueDB ;
2015-12-17 17:20:10 +01:00
use util ::hash ::* ;
2016-03-01 13:44:09 +01:00
use util ::sha3 ::Hashable ;
2016-07-28 23:46:24 +02:00
use receipt ::Receipt ;
2016-05-26 18:24:51 +02:00
use blockchain ::{ BlockProvider , BlockChain , Config , ImportRoute } ;
2016-01-27 16:41:50 +01:00
use tests ::helpers ::* ;
2016-03-02 04:25:03 +01:00
use blockchain ::generator ::{ ChainGenerator , ChainIterator , BlockFinalizer } ;
2016-08-31 16:55:43 +02:00
use blockchain ::extras ::TransactionAddress ;
2016-03-01 13:44:09 +01:00
use views ::BlockView ;
2016-08-31 16:55:43 +02:00
use transaction ::{ Transaction , Action } ;
2016-09-14 12:02:30 +02:00
use log_entry ::{ LogEntry , LocalizedLogEntry } ;
2017-01-11 12:16:47 +01:00
use ethkey ::Secret ;
2017-02-13 16:38:47 +01:00
use header ::BlockNumber ;
2016-07-28 23:46:24 +02:00
2017-02-20 17:21:55 +01:00
fn new_db ( ) -> Arc < KeyValueDB > {
Arc ::new ( ::util ::kvdb ::in_memory ( ::db ::NUM_COLUMNS . unwrap_or ( 0 ) ) )
2016-07-28 23:46:24 +02:00
}
2017-02-20 17:21:55 +01:00
fn new_chain ( genesis : & [ u8 ] , db : Arc < KeyValueDB > ) -> BlockChain {
2017-01-23 15:27:11 +01:00
BlockChain ::new ( Config ::default ( ) , genesis , db )
2016-12-05 16:20:32 +01:00
}
2016-07-28 23:46:24 +02:00
#[ test ]
fn should_cache_best_block ( ) {
// given
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let first = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-07-28 23:46:24 +02:00
assert_eq! ( bc . best_block_number ( ) , 0 ) ;
// when
2017-02-20 17:21:55 +01:00
let mut batch = db . transaction ( ) ;
2016-08-25 16:43:56 +02:00
bc . insert_block ( & mut batch , & first , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
assert_eq! ( bc . best_block_number ( ) , 0 ) ;
bc . commit ( ) ;
2016-07-28 23:46:24 +02:00
// NOTE no db.write here (we want to check if best block is cached)
// then
assert_eq! ( bc . best_block_number ( ) , 1 ) ;
assert! ( bc . block ( & bc . best_block_hash ( ) ) . is_some ( ) , " Best block should be queryable even without DB write. " ) ;
}
2015-12-17 17:20:10 +01:00
#[ test ]
2016-03-01 13:44:09 +01:00
fn basic_blockchain_insert ( ) {
let mut canon_chain = ChainGenerator ::default ( ) ;
2016-03-02 04:25:03 +01:00
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let first = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
2016-03-01 13:44:09 +01:00
let genesis_hash = BlockView ::new ( & genesis ) . header_view ( ) . sha3 ( ) ;
let first_hash = BlockView ::new ( & first ) . header_view ( ) . sha3 ( ) ;
2015-12-17 17:20:10 +01:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2015-12-26 15:47:07 +01:00
2015-12-17 20:37:04 +01:00
assert_eq! ( bc . genesis_hash ( ) , genesis_hash . clone ( ) ) ;
assert_eq! ( bc . best_block_hash ( ) , genesis_hash . clone ( ) ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_hash ( 0 ) , Some ( genesis_hash . clone ( ) ) ) ;
assert_eq! ( bc . block_hash ( 1 ) , None ) ;
2016-02-12 00:40:45 +01:00
assert_eq! ( bc . block_details ( & genesis_hash ) . unwrap ( ) . children , vec! [ ] ) ;
2015-12-17 17:20:10 +01:00
2017-02-20 17:21:55 +01:00
let mut batch = db . transaction ( ) ;
2016-08-25 16:43:56 +02:00
bc . insert_block ( & mut batch , & first , vec! [ ] ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2015-12-17 17:20:10 +01:00
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_hash ( 0 ) , Some ( genesis_hash . clone ( ) ) ) ;
assert_eq! ( bc . best_block_number ( ) , 1 ) ;
2015-12-17 20:37:04 +01:00
assert_eq! ( bc . best_block_hash ( ) , first_hash . clone ( ) ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_hash ( 1 ) , Some ( first_hash . clone ( ) ) ) ;
2015-12-17 20:37:04 +01:00
assert_eq! ( bc . block_details ( & first_hash ) . unwrap ( ) . parent , genesis_hash . clone ( ) ) ;
assert_eq! ( bc . block_details ( & genesis_hash ) . unwrap ( ) . children , vec! [ first_hash . clone ( ) ] ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_hash ( 2 ) , None ) ;
2015-12-17 17:20:10 +01:00
}
2015-12-21 16:31:51 +01:00
2016-03-02 17:31:42 +01:00
#[ test ]
fn check_ancestry_iter ( ) {
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let genesis_hash = BlockView ::new ( & genesis ) . header_view ( ) . sha3 ( ) ;
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-03-02 17:31:42 +01:00
let mut block_hashes = vec! [ genesis_hash . clone ( ) ] ;
2016-09-14 12:02:30 +02:00
let mut batch = db . transaction ( ) ;
2016-03-02 17:31:42 +01:00
for _ in 0 .. 10 {
let block = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
block_hashes . push ( BlockView ::new ( & block ) . header_view ( ) . sha3 ( ) ) ;
2016-08-25 16:43:56 +02:00
bc . insert_block ( & mut batch , & block , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-03-02 17:31:42 +01:00
}
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-03-02 17:31:42 +01:00
block_hashes . reverse ( ) ;
2016-03-02 18:32:54 +01:00
assert_eq! ( bc . ancestry_iter ( block_hashes [ 0 ] . clone ( ) ) . unwrap ( ) . collect ::< Vec < _ > > ( ) , block_hashes )
2016-03-02 17:31:42 +01:00
}
2015-12-21 16:31:51 +01:00
#[ test ]
2016-03-11 11:16:49 +01:00
#[ cfg_attr(feature= " dev " , allow(cyclomatic_complexity)) ]
2016-03-02 23:41:15 +01:00
fn test_find_uncles ( ) {
2016-03-01 16:22:06 +01:00
let mut canon_chain = ChainGenerator ::default ( ) ;
2016-03-02 23:41:15 +01:00
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b1b = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let b1a = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b2b = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let b2a = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b3b = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let b3a = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b4b = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let b4a = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b5b = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let b5a = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
2016-03-01 16:22:06 +01:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-07-28 23:46:24 +02:00
2016-08-25 16:43:56 +02:00
let mut batch = db . transaction ( ) ;
2016-08-03 19:01:48 +02:00
for b in & [ & b1a , & b1b , & b2a , & b2b , & b3a , & b3b , & b4a , & b4b , & b5a , & b5b ] {
2016-08-25 16:43:56 +02:00
bc . insert_block ( & mut batch , b , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
}
2016-08-25 16:43:56 +02:00
bc . insert_block ( & mut batch , & b1b , vec! [ ] ) ;
bc . insert_block ( & mut batch , & b2a , vec! [ ] ) ;
bc . insert_block ( & mut batch , & b2b , vec! [ ] ) ;
bc . insert_block ( & mut batch , & b3a , vec! [ ] ) ;
bc . insert_block ( & mut batch , & b3b , vec! [ ] ) ;
bc . insert_block ( & mut batch , & b4a , vec! [ ] ) ;
bc . insert_block ( & mut batch , & b4b , vec! [ ] ) ;
bc . insert_block ( & mut batch , & b5a , vec! [ ] ) ;
bc . insert_block ( & mut batch , & b5b , vec! [ ] ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-03-02 23:41:15 +01:00
assert_eq! (
[ & b4b , & b3b , & b2b ] . iter ( ) . map ( | b | BlockView ::new ( b ) . header ( ) ) . collect ::< Vec < _ > > ( ) ,
bc . find_uncle_headers ( & BlockView ::new ( & b4a ) . header_view ( ) . sha3 ( ) , 3 ) . unwrap ( )
) ;
// TODO: insert block that already includes one of them as an uncle to check it's not allowed.
}
2017-01-11 12:16:47 +01:00
fn secret ( ) -> Secret {
Secret ::from_slice ( & " " . sha3 ( ) ) . unwrap ( )
}
2016-09-28 15:49:42 +02:00
#[ test ]
fn test_fork_transaction_addresses ( ) {
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let mut fork_chain = canon_chain . fork ( 1 ) ;
let mut fork_finalizer = finalizer . fork ( ) ;
let t1 = Transaction {
nonce : 0. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
value : 100. into ( ) ,
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-09-28 15:49:42 +02:00
let b1a = canon_chain
. with_transaction ( t1 . clone ( ) )
. generate ( & mut finalizer ) . unwrap ( ) ;
// Empty block
let b1b = fork_chain
. generate ( & mut fork_finalizer ) . unwrap ( ) ;
let b2 = fork_chain
. generate ( & mut fork_finalizer ) . unwrap ( ) ;
let b1a_hash = BlockView ::new ( & b1a ) . header_view ( ) . sha3 ( ) ;
let b2_hash = BlockView ::new ( & b2 ) . header_view ( ) . sha3 ( ) ;
let t1_hash = t1 . hash ( ) ;
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-09-28 15:49:42 +02:00
let mut batch = db . transaction ( ) ;
let _ = bc . insert_block ( & mut batch , & b1a , vec! [ ] ) ;
bc . commit ( ) ;
let _ = bc . insert_block ( & mut batch , & b1b , vec! [ ] ) ;
bc . commit ( ) ;
db . write ( batch ) . unwrap ( ) ;
assert_eq! ( bc . best_block_hash ( ) , b1a_hash ) ;
assert_eq! ( bc . transaction_address ( & t1_hash ) , Some ( TransactionAddress {
block_hash : b1a_hash . clone ( ) ,
index : 0 ,
} ) ) ;
// now let's make forked chain the canon chain
let mut batch = db . transaction ( ) ;
let _ = bc . insert_block ( & mut batch , & b2 , vec! [ ] ) ;
bc . commit ( ) ;
db . write ( batch ) . unwrap ( ) ;
// Transaction should be retracted
assert_eq! ( bc . best_block_hash ( ) , b2_hash ) ;
assert_eq! ( bc . transaction_address ( & t1_hash ) , None ) ;
}
2016-08-31 16:55:43 +02:00
#[ test ]
fn test_overwriting_transaction_addresses ( ) {
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let mut fork_chain = canon_chain . fork ( 1 ) ;
let mut fork_finalizer = finalizer . fork ( ) ;
let t1 = Transaction {
nonce : 0. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
value : 100. into ( ) ,
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-08-31 16:55:43 +02:00
let t2 = Transaction {
nonce : 1. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
value : 100. into ( ) ,
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-08-31 16:55:43 +02:00
let t3 = Transaction {
nonce : 2. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
value : 100. into ( ) ,
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-08-31 16:55:43 +02:00
let b1a = canon_chain
. with_transaction ( t1 . clone ( ) )
. with_transaction ( t2 . clone ( ) )
. generate ( & mut finalizer ) . unwrap ( ) ;
// insert transactions in different order
let b1b = fork_chain
. with_transaction ( t2 . clone ( ) )
. with_transaction ( t1 . clone ( ) )
. generate ( & mut fork_finalizer ) . unwrap ( ) ;
let b2 = fork_chain
. with_transaction ( t3 . clone ( ) )
. generate ( & mut fork_finalizer ) . unwrap ( ) ;
let b1a_hash = BlockView ::new ( & b1a ) . header_view ( ) . sha3 ( ) ;
let b1b_hash = BlockView ::new ( & b1b ) . header_view ( ) . sha3 ( ) ;
let b2_hash = BlockView ::new ( & b2 ) . header_view ( ) . sha3 ( ) ;
let t1_hash = t1 . hash ( ) ;
let t2_hash = t2 . hash ( ) ;
let t3_hash = t3 . hash ( ) ;
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-08-31 16:55:43 +02:00
let mut batch = db . transaction ( ) ;
let _ = bc . insert_block ( & mut batch , & b1a , vec! [ ] ) ;
bc . commit ( ) ;
let _ = bc . insert_block ( & mut batch , & b1b , vec! [ ] ) ;
bc . commit ( ) ;
db . write ( batch ) . unwrap ( ) ;
assert_eq! ( bc . best_block_hash ( ) , b1a_hash ) ;
2016-09-28 15:49:42 +02:00
assert_eq! ( bc . transaction_address ( & t1_hash ) , Some ( TransactionAddress {
2016-08-31 16:55:43 +02:00
block_hash : b1a_hash . clone ( ) ,
index : 0 ,
2016-09-28 15:49:42 +02:00
} ) ) ;
assert_eq! ( bc . transaction_address ( & t2_hash ) , Some ( TransactionAddress {
2016-08-31 16:55:43 +02:00
block_hash : b1a_hash . clone ( ) ,
index : 1 ,
2016-09-28 15:49:42 +02:00
} ) ) ;
2016-08-31 16:55:43 +02:00
// now let's make forked chain the canon chain
let mut batch = db . transaction ( ) ;
let _ = bc . insert_block ( & mut batch , & b2 , vec! [ ] ) ;
bc . commit ( ) ;
db . write ( batch ) . unwrap ( ) ;
assert_eq! ( bc . best_block_hash ( ) , b2_hash ) ;
2016-09-28 15:49:42 +02:00
assert_eq! ( bc . transaction_address ( & t1_hash ) , Some ( TransactionAddress {
2016-08-31 16:55:43 +02:00
block_hash : b1b_hash . clone ( ) ,
index : 1 ,
2016-09-28 15:49:42 +02:00
} ) ) ;
assert_eq! ( bc . transaction_address ( & t2_hash ) , Some ( TransactionAddress {
2016-08-31 16:55:43 +02:00
block_hash : b1b_hash . clone ( ) ,
index : 0 ,
2016-09-28 15:49:42 +02:00
} ) ) ;
assert_eq! ( bc . transaction_address ( & t3_hash ) , Some ( TransactionAddress {
2016-08-31 16:55:43 +02:00
block_hash : b2_hash . clone ( ) ,
index : 0 ,
2016-09-28 15:49:42 +02:00
} ) ) ;
2016-08-31 16:55:43 +02:00
}
2015-12-21 16:31:51 +01:00
#[ test ]
2016-03-11 11:16:49 +01:00
#[ cfg_attr(feature= " dev " , allow(cyclomatic_complexity)) ]
2015-12-21 16:31:51 +01:00
fn test_small_fork ( ) {
2016-03-01 16:22:06 +01:00
let mut canon_chain = ChainGenerator ::default ( ) ;
2016-03-02 04:25:03 +01:00
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b1 = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b2 = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let b3b = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let b3a = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
2016-03-01 16:22:06 +01:00
let genesis_hash = BlockView ::new ( & genesis ) . header_view ( ) . sha3 ( ) ;
let b1_hash = BlockView ::new ( & b1 ) . header_view ( ) . sha3 ( ) ;
let b2_hash = BlockView ::new ( & b2 ) . header_view ( ) . sha3 ( ) ;
let b3a_hash = BlockView ::new ( & b3a ) . header_view ( ) . sha3 ( ) ;
let b3b_hash = BlockView ::new ( & b3b ) . header_view ( ) . sha3 ( ) ;
2015-12-21 16:31:51 +01:00
// b3a is a part of canon chain, whereas b3b is part of sidechain
2016-03-01 16:22:06 +01:00
let best_block_hash = b3a_hash . clone ( ) ;
2015-12-21 16:31:51 +01:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-07-28 23:46:24 +02:00
2016-08-31 16:55:43 +02:00
let mut batch = db . transaction ( ) ;
2016-08-25 16:43:56 +02:00
let ir1 = bc . insert_block ( & mut batch , & b1 , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-08-25 16:43:56 +02:00
let ir2 = bc . insert_block ( & mut batch , & b2 , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-08-25 16:43:56 +02:00
let ir3b = bc . insert_block ( & mut batch , & b3b , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-08-03 10:35:04 +02:00
assert_eq! ( bc . block_hash ( 3 ) . unwrap ( ) , b3b_hash ) ;
2016-08-25 16:43:56 +02:00
let mut batch = db . transaction ( ) ;
let ir3a = bc . insert_block ( & mut batch , & b3a , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-03-09 21:55:23 +01:00
assert_eq! ( ir1 , ImportRoute {
2016-03-10 10:17:17 +01:00
enacted : vec ! [ b1_hash ] ,
retracted : vec ! [ ] ,
2016-04-24 23:16:06 +02:00
omitted : vec ! [ ] ,
2016-03-09 21:55:23 +01:00
} ) ;
assert_eq! ( ir2 , ImportRoute {
2016-03-10 10:17:17 +01:00
enacted : vec ! [ b2_hash ] ,
retracted : vec ! [ ] ,
2016-04-24 23:16:06 +02:00
omitted : vec ! [ ] ,
2016-03-09 21:55:23 +01:00
} ) ;
assert_eq! ( ir3b , ImportRoute {
2016-03-10 10:17:17 +01:00
enacted : vec ! [ b3b_hash ] ,
retracted : vec ! [ ] ,
2016-04-24 23:16:06 +02:00
omitted : vec ! [ ] ,
2016-03-09 21:55:23 +01:00
} ) ;
assert_eq! ( ir3a , ImportRoute {
2016-03-10 10:17:17 +01:00
enacted : vec ! [ b3a_hash ] ,
retracted : vec ! [ b3b_hash ] ,
2016-04-24 23:16:06 +02:00
omitted : vec ! [ ] ,
2016-03-09 21:55:23 +01:00
} ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( bc . best_block_hash ( ) , best_block_hash ) ;
2016-01-10 22:55:07 +01:00
assert_eq! ( bc . block_number ( & genesis_hash ) . unwrap ( ) , 0 ) ;
assert_eq! ( bc . block_number ( & b1_hash ) . unwrap ( ) , 1 ) ;
assert_eq! ( bc . block_number ( & b2_hash ) . unwrap ( ) , 2 ) ;
assert_eq! ( bc . block_number ( & b3a_hash ) . unwrap ( ) , 3 ) ;
assert_eq! ( bc . block_number ( & b3b_hash ) . unwrap ( ) , 3 ) ;
assert_eq! ( bc . block_hash ( 0 ) . unwrap ( ) , genesis_hash ) ;
assert_eq! ( bc . block_hash ( 1 ) . unwrap ( ) , b1_hash ) ;
assert_eq! ( bc . block_hash ( 2 ) . unwrap ( ) , b2_hash ) ;
assert_eq! ( bc . block_hash ( 3 ) . unwrap ( ) , b3a_hash ) ;
2015-12-21 16:31:51 +01:00
// test trie route
2017-04-20 16:21:53 +02:00
let r0_1 = bc . tree_route ( genesis_hash . clone ( ) , b1_hash . clone ( ) ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r0_1 . ancestor , genesis_hash ) ;
assert_eq! ( r0_1 . blocks , [ b1_hash . clone ( ) ] ) ;
assert_eq! ( r0_1 . index , 0 ) ;
2017-04-20 16:21:53 +02:00
let r0_2 = bc . tree_route ( genesis_hash . clone ( ) , b2_hash . clone ( ) ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r0_2 . ancestor , genesis_hash ) ;
assert_eq! ( r0_2 . blocks , [ b1_hash . clone ( ) , b2_hash . clone ( ) ] ) ;
assert_eq! ( r0_2 . index , 0 ) ;
2017-04-20 16:21:53 +02:00
let r1_3a = bc . tree_route ( b1_hash . clone ( ) , b3a_hash . clone ( ) ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r1_3a . ancestor , b1_hash ) ;
assert_eq! ( r1_3a . blocks , [ b2_hash . clone ( ) , b3a_hash . clone ( ) ] ) ;
assert_eq! ( r1_3a . index , 0 ) ;
2017-04-20 16:21:53 +02:00
let r1_3b = bc . tree_route ( b1_hash . clone ( ) , b3b_hash . clone ( ) ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r1_3b . ancestor , b1_hash ) ;
assert_eq! ( r1_3b . blocks , [ b2_hash . clone ( ) , b3b_hash . clone ( ) ] ) ;
assert_eq! ( r1_3b . index , 0 ) ;
2017-04-20 16:21:53 +02:00
let r3a_3b = bc . tree_route ( b3a_hash . clone ( ) , b3b_hash . clone ( ) ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3a_3b . ancestor , b2_hash ) ;
assert_eq! ( r3a_3b . blocks , [ b3a_hash . clone ( ) , b3b_hash . clone ( ) ] ) ;
assert_eq! ( r3a_3b . index , 1 ) ;
2017-04-20 16:21:53 +02:00
let r1_0 = bc . tree_route ( b1_hash . clone ( ) , genesis_hash . clone ( ) ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r1_0 . ancestor , genesis_hash ) ;
assert_eq! ( r1_0 . blocks , [ b1_hash . clone ( ) ] ) ;
assert_eq! ( r1_0 . index , 1 ) ;
2017-04-20 16:21:53 +02:00
let r2_0 = bc . tree_route ( b2_hash . clone ( ) , genesis_hash . clone ( ) ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r2_0 . ancestor , genesis_hash ) ;
assert_eq! ( r2_0 . blocks , [ b2_hash . clone ( ) , b1_hash . clone ( ) ] ) ;
assert_eq! ( r2_0 . index , 2 ) ;
2015-12-26 15:47:07 +01:00
2017-04-20 16:21:53 +02:00
let r3a_1 = bc . tree_route ( b3a_hash . clone ( ) , b1_hash . clone ( ) ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3a_1 . ancestor , b1_hash ) ;
assert_eq! ( r3a_1 . blocks , [ b3a_hash . clone ( ) , b2_hash . clone ( ) ] ) ;
assert_eq! ( r3a_1 . index , 2 ) ;
2017-04-20 16:21:53 +02:00
let r3b_1 = bc . tree_route ( b3b_hash . clone ( ) , b1_hash . clone ( ) ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3b_1 . ancestor , b1_hash ) ;
assert_eq! ( r3b_1 . blocks , [ b3b_hash . clone ( ) , b2_hash . clone ( ) ] ) ;
assert_eq! ( r3b_1 . index , 2 ) ;
2017-04-20 16:21:53 +02:00
let r3b_3a = bc . tree_route ( b3b_hash . clone ( ) , b3a_hash . clone ( ) ) . unwrap ( ) ;
2015-12-21 16:31:51 +01:00
assert_eq! ( r3b_3a . ancestor , b2_hash ) ;
assert_eq! ( r3b_3a . blocks , [ b3b_hash . clone ( ) , b3a_hash . clone ( ) ] ) ;
assert_eq! ( r3b_3a . index , 1 ) ;
}
2015-12-21 16:38:31 +01:00
#[ test ]
fn test_reopen_blockchain_db ( ) {
2016-03-02 04:25:03 +01:00
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let first = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let genesis_hash = BlockView ::new ( & genesis ) . header_view ( ) . sha3 ( ) ;
let first_hash = BlockView ::new ( & first ) . header_view ( ) . sha3 ( ) ;
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2015-12-21 16:38:31 +01:00
{
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2015-12-21 16:38:31 +01:00
assert_eq! ( bc . best_block_hash ( ) , genesis_hash ) ;
2016-08-25 16:43:56 +02:00
let mut batch = db . transaction ( ) ;
bc . insert_block ( & mut batch , & first , vec! [ ] ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-03-02 04:25:03 +01:00
assert_eq! ( bc . best_block_hash ( ) , first_hash ) ;
2015-12-21 16:38:31 +01:00
}
{
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-07-28 23:46:24 +02:00
2016-03-02 04:25:03 +01:00
assert_eq! ( bc . best_block_hash ( ) , first_hash ) ;
2015-12-21 16:38:31 +01:00
}
}
2016-01-27 17:32:53 +01:00
#[ test ]
2016-01-27 18:31:14 +01:00
fn can_contain_arbitrary_block_sequence ( ) {
2017-04-06 19:26:17 +02:00
let bc = generate_dummy_blockchain ( 50 ) ;
2016-01-27 18:31:14 +01:00
assert_eq! ( bc . best_block_number ( ) , 49 ) ;
2016-01-27 17:32:53 +01:00
}
2016-01-28 11:55:03 +01:00
#[ test ]
fn can_collect_garbage ( ) {
2017-04-06 19:26:17 +02:00
let bc = generate_dummy_blockchain ( 3000 ) ;
2016-01-28 15:38:42 +01:00
2016-01-28 11:55:03 +01:00
assert_eq! ( bc . best_block_number ( ) , 2999 ) ;
let best_hash = bc . best_block_hash ( ) ;
let mut block_header = bc . block_header ( & best_hash ) ;
while ! block_header . is_none ( ) {
2016-09-16 23:03:26 +02:00
block_header = bc . block_header ( block_header . unwrap ( ) . parent_hash ( ) ) ;
2016-01-28 11:55:03 +01:00
}
assert! ( bc . cache_size ( ) . blocks > 1024 * 1024 ) ;
2016-01-29 16:28:13 +01:00
for _ in 0 .. 2 {
2016-02-02 01:59:14 +01:00
bc . collect_garbage ( ) ;
2016-01-29 16:28:13 +01:00
}
2016-01-28 11:55:03 +01:00
assert! ( bc . cache_size ( ) . blocks < 1024 * 1024 ) ;
}
2016-01-28 15:38:42 +01:00
#[ test ]
fn can_contain_arbitrary_block_sequence_with_extra ( ) {
2017-04-06 19:26:17 +02:00
let bc = generate_dummy_blockchain_with_extra ( 25 ) ;
2016-01-28 15:38:42 +01:00
assert_eq! ( bc . best_block_number ( ) , 24 ) ;
}
#[ test ]
fn can_contain_only_genesis_block ( ) {
2017-04-06 19:26:17 +02:00
let bc = generate_dummy_empty_blockchain ( ) ;
2016-01-28 15:38:42 +01:00
assert_eq! ( bc . best_block_number ( ) , 0 ) ;
}
2016-02-08 15:53:22 +01:00
#[ test ]
fn find_transaction_by_hash ( ) {
let genesis = " f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0af81e09f8c46ca322193edfda764fa7e88e81923f802f1d325ec0b0308ac2cd0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200008083023e38808454c98c8142a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421880102030405060708c0c0 " . from_hex ( ) . unwrap ( ) ;
let b1 = " f904a8f901faa0ce1f26f798dd03c8782d63b3e42e79a64eaea5694ea686ac5d7ce3df5171d1aea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0a65c2364cd0f1542d761823dc0109c6b072f14c20459598c5455c274601438f4a070616ebd7ad2ed6fb7860cf7e9df00163842351c38a87cac2c1cb193895035a2a05c5b4fc43c2d45787f54e1ae7d27afdb4ad16dfc567c5692070d5c4556e0b1d7b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000183023ec683021536845685109780a029f07836e4e59229b3a065913afc27702642c683bba689910b2b2fd45db310d3888957e6d004a31802f902a7f85f800a8255f094aaaf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca0575da4e21b66fa764be5f74da9389e67693d066fb0d1312e19e17e501da00ecda06baf5a5327595f6619dfc2fcb3f2e6fb410b5810af3cb52d0e7508038e91a188f85f010a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba04fa966bf34b93abc1bcd665554b7f316b50f928477b50be0f3285ead29d18c5ba017bba0eeec1625ab433746955e125d46d80b7fdc97386c51266f842d8e02192ef85f020a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca004377418ae981cc32b1312b4a427a1d69a821b28db8584f5f2bd8c6d42458adaa053a1dba1af177fac92f3b6af0a9fa46a22adf56e686c93794b6a012bf254abf5f85f030a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca04fe13febd28a05f4fcb2f451d7ddc2dda56486d9f8c79a62b0ba4da775122615a0651b2382dd402df9ebc27f8cb4b2e0f3cea68dda2dca0ee9603608f0b6f51668f85f040a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba078e6a0ba086a08f8450e208a399bb2f2d2a0d984acd2517c7c7df66ccfab567da013254002cd45a97fac049ae00afbc43ed0d9961d0c56a3b2382c80ce41c198ddf85f050a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba0a7174d8f43ea71c8e3ca9477691add8d80ac8e0ed89d8d8b572041eef81f4a54a0534ea2e28ec4da3b5b944b18c51ec84a5cf35f5b3343c5fb86521fd2d388f506f85f060a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba034bd04065833536a10c77ee2a43a5371bc6d34837088b861dd9d4b7f44074b59a078807715786a13876d3455716a6b9cb2186b7a4887a5c31160fc877454958616c0 " . from_hex ( ) . unwrap ( ) ;
2016-08-31 16:55:43 +02:00
let b1_hash : H256 = " f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3 " . into ( ) ;
2016-02-08 15:53:22 +01:00
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-08-25 16:43:56 +02:00
let mut batch = db . transaction ( ) ;
bc . insert_block ( & mut batch , & b1 , vec! [ ] ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-03-01 13:44:09 +01:00
2016-02-08 15:53:22 +01:00
let transactions = bc . transactions ( & b1_hash ) . unwrap ( ) ;
assert_eq! ( transactions . len ( ) , 7 ) ;
for t in transactions {
2016-02-10 22:16:25 +01:00
assert_eq! ( bc . transaction ( & bc . transaction_address ( & t . hash ( ) ) . unwrap ( ) ) . unwrap ( ) , t ) ;
2016-02-08 15:53:22 +01:00
}
}
2016-02-16 11:41:34 +01:00
2017-02-20 17:21:55 +01:00
fn insert_block ( db : & Arc < KeyValueDB > , bc : & BlockChain , bytes : & [ u8 ] , receipts : Vec < Receipt > ) -> ImportRoute {
2016-09-14 12:02:30 +02:00
let mut batch = db . transaction ( ) ;
2016-08-25 16:43:56 +02:00
let res = bc . insert_block ( & mut batch , bytes , receipts ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-07-28 23:46:24 +02:00
res
}
2016-09-14 12:02:30 +02:00
#[ test ]
fn test_logs ( ) {
// given
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
// just insert dummy transaction so that #transactions=#receipts
let t1 = Transaction {
nonce : 0. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
2016-09-30 11:14:30 +02:00
value : 101. into ( ) ,
2016-09-14 12:02:30 +02:00
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-09-14 12:02:30 +02:00
let t2 = Transaction {
nonce : 0. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
2016-09-30 11:14:30 +02:00
value : 102. into ( ) ,
2016-09-14 12:02:30 +02:00
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-09-14 12:02:30 +02:00
let t3 = Transaction {
nonce : 0. into ( ) ,
gas_price : 0. into ( ) ,
gas : 100_000. into ( ) ,
action : Action ::Create ,
2016-09-30 11:14:30 +02:00
value : 103. into ( ) ,
2016-09-14 12:02:30 +02:00
data : " 601080600c6000396000f3006000355415600957005b60203560003555 " . from_hex ( ) . unwrap ( ) ,
2017-01-11 12:16:47 +01:00
} . sign ( & secret ( ) , None ) ;
2016-09-14 12:02:30 +02:00
let tx_hash1 = t1 . hash ( ) ;
let tx_hash2 = t2 . hash ( ) ;
let tx_hash3 = t3 . hash ( ) ;
let b1 = canon_chain . with_transaction ( t1 ) . with_transaction ( t2 ) . generate ( & mut finalizer ) . unwrap ( ) ;
let b2 = canon_chain . with_transaction ( t3 ) . generate ( & mut finalizer ) . unwrap ( ) ;
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-09-14 12:02:30 +02:00
insert_block ( & db , & bc , & b1 , vec! [ Receipt {
2017-01-25 20:22:48 +01:00
state_root : Some ( H256 ::default ( ) ) ,
2016-09-14 12:02:30 +02:00
gas_used : 10_000. into ( ) ,
log_bloom : Default ::default ( ) ,
logs : vec ! [
LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 1 ] , } ,
LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 2 ] , } ,
] ,
} ,
Receipt {
2017-01-25 20:22:48 +01:00
state_root : Some ( H256 ::default ( ) ) ,
2016-09-14 12:02:30 +02:00
gas_used : 10_000. into ( ) ,
log_bloom : Default ::default ( ) ,
logs : vec ! [
LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 3 ] , } ,
] ,
} ] ) ;
insert_block ( & db , & bc , & b2 , vec! [
Receipt {
2017-01-25 20:22:48 +01:00
state_root : Some ( H256 ::default ( ) ) ,
2016-09-14 12:02:30 +02:00
gas_used : 10_000. into ( ) ,
log_bloom : Default ::default ( ) ,
logs : vec ! [
LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 4 ] , } ,
] ,
}
] ) ;
// when
let block1 = BlockView ::new ( & b1 ) ;
let block2 = BlockView ::new ( & b2 ) ;
let logs1 = bc . logs ( vec! [ 1 , 2 ] , | _ | true , None ) ;
let logs2 = bc . logs ( vec! [ 1 , 2 ] , | _ | true , Some ( 1 ) ) ;
// then
assert_eq! ( logs1 , vec! [
LocalizedLogEntry {
entry : LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 1 ] } ,
block_hash : block1 . hash ( ) ,
block_number : block1 . header ( ) . number ( ) ,
transaction_hash : tx_hash1 . clone ( ) ,
transaction_index : 0 ,
2016-12-29 19:48:28 +01:00
transaction_log_index : 0 ,
2016-09-14 12:02:30 +02:00
log_index : 0 ,
} ,
LocalizedLogEntry {
entry : LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 2 ] } ,
block_hash : block1 . hash ( ) ,
block_number : block1 . header ( ) . number ( ) ,
transaction_hash : tx_hash1 . clone ( ) ,
transaction_index : 0 ,
2016-12-29 19:48:28 +01:00
transaction_log_index : 1 ,
2016-09-14 12:02:30 +02:00
log_index : 1 ,
} ,
LocalizedLogEntry {
entry : LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 3 ] } ,
block_hash : block1 . hash ( ) ,
block_number : block1 . header ( ) . number ( ) ,
transaction_hash : tx_hash2 . clone ( ) ,
transaction_index : 1 ,
2016-12-29 19:48:28 +01:00
transaction_log_index : 0 ,
2016-09-14 12:02:30 +02:00
log_index : 2 ,
} ,
LocalizedLogEntry {
entry : LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 4 ] } ,
block_hash : block2 . hash ( ) ,
block_number : block2 . header ( ) . number ( ) ,
transaction_hash : tx_hash3 . clone ( ) ,
transaction_index : 0 ,
2016-12-29 19:48:28 +01:00
transaction_log_index : 0 ,
2016-09-14 12:02:30 +02:00
log_index : 0 ,
}
] ) ;
assert_eq! ( logs2 , vec! [
LocalizedLogEntry {
entry : LogEntry { address : Default ::default ( ) , topics : vec ! [ ] , data : vec ! [ 4 ] } ,
block_hash : block2 . hash ( ) ,
block_number : block2 . header ( ) . number ( ) ,
transaction_hash : tx_hash3 . clone ( ) ,
transaction_index : 0 ,
2016-12-29 19:48:28 +01:00
transaction_log_index : 0 ,
2016-09-14 12:02:30 +02:00
log_index : 0 ,
}
] ) ;
}
2016-02-16 11:41:34 +01:00
#[ test ]
fn test_bloom_filter_simple ( ) {
2016-03-02 04:25:03 +01:00
// TODO: From here
2016-08-31 16:55:43 +02:00
let bloom_b1 : H2048 = " 00000020000000000000000000000000000000000000000002000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000400000000000000000000002000 " . into ( ) ;
2016-02-16 16:54:58 +01:00
2016-08-31 16:55:43 +02:00
let bloom_b2 : H2048 = " 00000000000000000000000000000000000000000000020000001000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 " . into ( ) ;
2016-02-16 11:41:34 +01:00
2016-08-31 16:55:43 +02:00
let bloom_ba : H2048 = " 00000000000000000000000000000000000000000000020000000800000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 " . into ( ) ;
2016-02-22 09:12:15 +01:00
2016-03-02 04:25:03 +01:00
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let mut fork = canon_chain . fork ( 1 ) ;
let mut fork_finalizer = finalizer . fork ( ) ;
let b1 = fork . with_bloom ( bloom_b1 . clone ( ) ) . generate ( & mut fork_finalizer ) . unwrap ( ) ;
let b2 = fork . with_bloom ( bloom_b2 . clone ( ) ) . generate ( & mut fork_finalizer ) . unwrap ( ) ;
let b3 = fork . with_bloom ( bloom_ba . clone ( ) ) . generate ( & mut fork_finalizer ) . unwrap ( ) ;
let b1a = canon_chain . with_bloom ( bloom_ba . clone ( ) ) . generate ( & mut finalizer ) . unwrap ( ) ;
let b2a = canon_chain . with_bloom ( bloom_ba . clone ( ) ) . generate ( & mut finalizer ) . unwrap ( ) ;
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-02-16 16:54:58 +01:00
2016-02-22 09:54:56 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
2017-02-13 16:38:47 +01:00
assert_eq! ( blocks_b1 , Vec ::< BlockNumber > ::new ( ) ) ;
assert_eq! ( blocks_b2 , Vec ::< BlockNumber > ::new ( ) ) ;
2016-03-01 13:44:09 +01:00
2016-07-28 23:46:24 +02:00
insert_block ( & db , & bc , & b1 , vec! [ ] ) ;
2016-02-22 09:54:56 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
2016-02-16 16:54:58 +01:00
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
2017-02-13 16:38:47 +01:00
assert_eq! ( blocks_b2 , Vec ::< BlockNumber > ::new ( ) ) ;
2016-02-16 14:46:21 +01:00
2016-07-28 23:46:24 +02:00
insert_block ( & db , & bc , & b2 , vec! [ ] ) ;
2016-02-22 09:54:56 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
2016-02-16 16:54:58 +01:00
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
assert_eq! ( blocks_b2 , vec! [ 2 ] ) ;
2016-02-22 09:12:15 +01:00
// hasn't been forked yet
2016-07-28 23:46:24 +02:00
insert_block ( & db , & bc , & b1a , vec! [ ] ) ;
2016-02-22 09:54:56 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
let blocks_ba = bc . blocks_with_bloom ( & bloom_ba , 0 , 5 ) ;
2016-02-22 09:12:15 +01:00
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
assert_eq! ( blocks_b2 , vec! [ 2 ] ) ;
2017-02-13 16:38:47 +01:00
assert_eq! ( blocks_ba , Vec ::< BlockNumber > ::new ( ) ) ;
2016-02-22 09:12:15 +01:00
// fork has happend
2016-07-28 23:46:24 +02:00
insert_block ( & db , & bc , & b2a , vec! [ ] ) ;
2016-02-22 09:54:56 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
let blocks_ba = bc . blocks_with_bloom ( & bloom_ba , 0 , 5 ) ;
2017-02-13 16:38:47 +01:00
assert_eq! ( blocks_b1 , Vec ::< BlockNumber > ::new ( ) ) ;
assert_eq! ( blocks_b2 , Vec ::< BlockNumber > ::new ( ) ) ;
2016-02-22 09:12:15 +01:00
assert_eq! ( blocks_ba , vec! [ 1 , 2 ] ) ;
2016-02-22 09:54:56 +01:00
// fork back
2016-07-28 23:46:24 +02:00
insert_block ( & db , & bc , & b3 , vec! [ ] ) ;
2016-02-22 09:54:56 +01:00
let blocks_b1 = bc . blocks_with_bloom ( & bloom_b1 , 0 , 5 ) ;
let blocks_b2 = bc . blocks_with_bloom ( & bloom_b2 , 0 , 5 ) ;
let blocks_ba = bc . blocks_with_bloom ( & bloom_ba , 0 , 5 ) ;
assert_eq! ( blocks_b1 , vec! [ 1 ] ) ;
assert_eq! ( blocks_b2 , vec! [ 2 ] ) ;
assert_eq! ( blocks_ba , vec! [ 3 ] ) ;
2016-02-16 14:46:21 +01:00
}
2016-07-14 19:16:01 +02:00
#[ test ]
fn test_best_block_update ( ) {
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-07-14 19:16:01 +02:00
{
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-07-14 19:16:01 +02:00
let uncle = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
2017-02-20 17:21:55 +01:00
let mut batch = db . transaction ( ) ;
2016-07-14 19:16:01 +02:00
// create a longer fork
for _ in 0 .. 5 {
let canon_block = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
2016-08-25 16:43:56 +02:00
bc . insert_block ( & mut batch , & canon_block , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-07-14 19:16:01 +02:00
}
assert_eq! ( bc . best_block_number ( ) , 5 ) ;
2016-08-25 16:43:56 +02:00
bc . insert_block ( & mut batch , & uncle , vec! [ ] ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-07-14 19:16:01 +02:00
}
// re-loading the blockchain should load the correct best block.
2017-02-20 17:21:55 +01:00
let bc = new_chain ( & genesis , db ) ;
2016-07-14 19:16:01 +02:00
assert_eq! ( bc . best_block_number ( ) , 5 ) ;
}
2016-07-17 23:03:29 +02:00
#[ test ]
fn test_rewind ( ) {
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let first = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let second = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let genesis_hash = BlockView ::new ( & genesis ) . header_view ( ) . sha3 ( ) ;
let first_hash = BlockView ::new ( & first ) . header_view ( ) . sha3 ( ) ;
let second_hash = BlockView ::new ( & second ) . header_view ( ) . sha3 ( ) ;
2017-02-20 17:21:55 +01:00
let db = new_db ( ) ;
2016-12-05 16:20:32 +01:00
let bc = new_chain ( & genesis , db . clone ( ) ) ;
2016-07-17 23:03:29 +02:00
2016-08-25 16:43:56 +02:00
let mut batch = db . transaction ( ) ;
bc . insert_block ( & mut batch , & first , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-08-25 16:43:56 +02:00
bc . insert_block ( & mut batch , & second , vec! [ ] ) ;
2016-08-01 19:10:13 +02:00
bc . commit ( ) ;
2016-07-28 23:46:24 +02:00
db . write ( batch ) . unwrap ( ) ;
2016-07-17 23:03:29 +02:00
assert_eq! ( bc . rewind ( ) , Some ( first_hash . clone ( ) ) ) ;
assert! ( ! bc . is_known ( & second_hash ) ) ;
assert_eq! ( bc . best_block_number ( ) , 1 ) ;
assert_eq! ( bc . best_block_hash ( ) , first_hash . clone ( ) ) ;
assert_eq! ( bc . rewind ( ) , Some ( genesis_hash . clone ( ) ) ) ;
assert_eq! ( bc . rewind ( ) , None ) ;
}
2017-04-19 16:27:45 +02:00
#[ test ]
fn epoch_transitions_iter ( ) {
use blockchain ::extras ::EpochTransition ;
let mut canon_chain = ChainGenerator ::default ( ) ;
let mut finalizer = BlockFinalizer ::default ( ) ;
let genesis = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let db = new_db ( ) ;
{
let bc = new_chain ( & genesis , db . clone ( ) ) ;
let uncle = canon_chain . fork ( 1 ) . generate ( & mut finalizer . fork ( ) ) . unwrap ( ) ;
let mut batch = db . transaction ( ) ;
// create a longer fork
for i in 0 .. 5 {
let canon_block = canon_chain . generate ( & mut finalizer ) . unwrap ( ) ;
let hash = BlockView ::new ( & canon_block ) . header_view ( ) . sha3 ( ) ;
bc . insert_block ( & mut batch , & canon_block , vec! [ ] ) ;
bc . insert_epoch_transition ( & mut batch , i , EpochTransition {
block_hash : hash ,
block_number : i + 1 ,
proof : vec ! [ ] ,
state_proof : vec ! [ ] ,
} ) ;
bc . commit ( ) ;
}
assert_eq! ( bc . best_block_number ( ) , 5 ) ;
let hash = BlockView ::new ( & uncle ) . header_view ( ) . sha3 ( ) ;
bc . insert_block ( & mut batch , & uncle , vec! [ ] ) ;
bc . insert_epoch_transition ( & mut batch , 999 , EpochTransition {
block_hash : hash ,
block_number : 1 ,
proof : vec ! [ ] ,
state_proof : vec ! [ ]
} ) ;
db . write ( batch ) . unwrap ( ) ;
bc . commit ( ) ;
// epoch 999 not in canonical chain.
assert_eq! ( bc . epoch_transitions ( ) . map ( | ( i , _ ) | i ) . collect ::< Vec < _ > > ( ) , vec! [ 0 , 1 , 2 , 3 , 4 ] ) ;
}
// re-loading the blockchain should load the correct best block.
let bc = new_chain ( & genesis , db ) ;
assert_eq! ( bc . best_block_number ( ) , 5 ) ;
assert_eq! ( bc . epoch_transitions ( ) . map ( | ( i , _ ) | i ) . collect ::< Vec < _ > > ( ) , vec! [ 0 , 1 , 2 , 3 , 4 ] ) ;
}
2015-12-17 17:20:10 +01:00
}