2016-02-02 15:29:53 +01:00
//! Blockchain database client.
2016-01-09 18:50:45 +01:00
use util ::* ;
2016-01-16 11:52:28 +01:00
use rocksdb ::{ Options , DB } ;
2016-01-18 19:23:28 +01:00
use blockchain ::{ BlockChain , BlockProvider , CacheSize } ;
2016-01-07 21:35:06 +01:00
use views ::BlockView ;
2016-01-11 12:28:59 +01:00
use error ::* ;
2016-01-11 01:07:58 +01:00
use header ::BlockNumber ;
2016-01-26 15:00:22 +01:00
use state ::State ;
2016-01-11 12:28:59 +01:00
use spec ::Spec ;
2016-01-11 11:51:31 +01:00
use engine ::Engine ;
2016-01-26 15:00:22 +01:00
use views ::HeaderView ;
2016-01-22 04:54:38 +01:00
use block_queue ::{ BlockQueue , BlockQueueInfo } ;
2016-01-21 23:33:52 +01:00
use service ::NetSyncMessage ;
2016-01-14 01:28:37 +01:00
use env_info ::LastHashes ;
use verification ::* ;
use block ::* ;
2016-02-03 16:43:48 +01:00
pub use blockchain ::TreeRoute ;
2016-01-07 21:35:06 +01:00
/// General block status
2016-02-02 12:12:32 +01:00
#[ derive(Debug, Eq, PartialEq) ]
2016-01-07 21:35:06 +01:00
pub enum BlockStatus {
/// Part of the blockchain.
InChain ,
/// Queued for import.
2016-01-10 23:37:09 +01:00
Queued ,
2016-01-07 21:35:06 +01:00
/// Known as bad.
Bad ,
/// Unknown.
Unknown ,
}
/// Information about the blockchain gthered together.
2016-01-16 13:30:27 +01:00
#[ derive(Debug) ]
2016-01-07 21:35:06 +01:00
pub struct BlockChainInfo {
/// Blockchain difficulty.
pub total_difficulty : U256 ,
/// Block queue difficulty.
pub pending_total_difficulty : U256 ,
/// Genesis block hash.
pub genesis_hash : H256 ,
/// Best blockchain block hash.
pub best_block_hash : H256 ,
/// Best blockchain block number.
pub best_block_number : BlockNumber
}
2016-01-18 19:23:28 +01:00
impl fmt ::Display for BlockChainInfo {
fn fmt ( & self , f : & mut fmt ::Formatter ) -> fmt ::Result {
write! ( f , " #{}.{} " , self . best_block_number , self . best_block_hash )
}
}
2016-01-07 21:35:06 +01:00
/// Blockchain database client. Owns and manages a blockchain and a block queue.
2016-01-14 19:03:48 +01:00
pub trait BlockChainClient : Sync + Send {
2016-01-07 21:35:06 +01:00
/// Get raw block header data by block header hash.
fn block_header ( & self , hash : & H256 ) -> Option < Bytes > ;
/// Get raw block body data by block header hash.
/// Block body is an RLP list of two items: uncles and transactions.
fn block_body ( & self , hash : & H256 ) -> Option < Bytes > ;
/// Get raw block data by block header hash.
fn block ( & self , hash : & H256 ) -> Option < Bytes > ;
/// Get block status by block header hash.
fn block_status ( & self , hash : & H256 ) -> BlockStatus ;
2016-01-27 14:43:43 +01:00
/// Get block total difficulty.
fn block_total_difficulty ( & self , hash : & H256 ) -> Option < U256 > ;
2016-01-27 12:31:54 +01:00
2016-01-07 21:35:06 +01:00
/// Get raw block header data by block number.
fn block_header_at ( & self , n : BlockNumber ) -> Option < Bytes > ;
/// Get raw block body data by block number.
/// Block body is an RLP list of two items: uncles and transactions.
fn block_body_at ( & self , n : BlockNumber ) -> Option < Bytes > ;
/// Get raw block data by block number.
fn block_at ( & self , n : BlockNumber ) -> Option < Bytes > ;
/// Get block status by block number.
fn block_status_at ( & self , n : BlockNumber ) -> BlockStatus ;
2016-01-27 14:43:43 +01:00
/// Get block total difficulty.
fn block_total_difficulty_at ( & self , n : BlockNumber ) -> Option < U256 > ;
2016-01-07 21:35:06 +01:00
/// Get a tree route between `from` and `to`.
/// See `BlockChain::tree_route`.
2016-01-10 23:37:09 +01:00
fn tree_route ( & self , from : & H256 , to : & H256 ) -> Option < TreeRoute > ;
2016-01-07 21:35:06 +01:00
/// Get latest state node
fn state_data ( & self , hash : & H256 ) -> Option < Bytes > ;
/// Get raw block receipts data by block header hash.
fn block_receipts ( & self , hash : & H256 ) -> Option < Bytes > ;
/// Import a block into the blockchain.
2016-01-21 23:33:52 +01:00
fn import_block ( & self , bytes : Bytes ) -> ImportResult ;
2016-01-07 21:35:06 +01:00
/// Get block queue information.
2016-01-22 04:54:38 +01:00
fn queue_info ( & self ) -> BlockQueueInfo ;
2016-01-07 21:35:06 +01:00
2016-01-09 10:16:35 +01:00
/// Clear block queue and abort all import activity.
2016-01-21 23:33:52 +01:00
fn clear_queue ( & self ) ;
2016-01-07 21:35:06 +01:00
/// Get blockchain information.
fn chain_info ( & self ) -> BlockChainInfo ;
2016-01-26 15:00:22 +01:00
/// Get the best block header.
fn best_block_header ( & self ) -> Bytes {
self . block_header ( & self . chain_info ( ) . best_block_hash ) . unwrap ( )
}
2016-01-07 21:35:06 +01:00
}
2016-01-18 23:23:32 +01:00
#[ derive(Default, Clone, Debug, Eq, PartialEq) ]
2016-02-02 23:43:29 +01:00
/// Report on the status of a client.
2016-01-18 23:23:32 +01:00
pub struct ClientReport {
2016-02-02 23:43:29 +01:00
/// How many blocks have been imported so far.
2016-01-18 23:23:32 +01:00
pub blocks_imported : usize ,
2016-02-02 23:43:29 +01:00
/// How many transactions have been applied so far.
2016-01-18 23:23:32 +01:00
pub transactions_applied : usize ,
2016-02-02 23:43:29 +01:00
/// How much gas has been processed so far.
2016-01-18 23:23:32 +01:00
pub gas_processed : U256 ,
}
impl ClientReport {
2016-02-02 23:43:29 +01:00
/// Alter internal reporting to reflect the additional `block` has been processed.
2016-01-18 23:23:32 +01:00
pub fn accrue_block ( & mut self , block : & PreVerifiedBlock ) {
self . blocks_imported + = 1 ;
self . transactions_applied + = block . transactions . len ( ) ;
self . gas_processed + = block . header . gas_used ;
}
}
2016-01-07 21:35:06 +01:00
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
2016-01-25 18:56:36 +01:00
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
2016-01-07 21:35:06 +01:00
pub struct Client {
2016-01-11 13:42:32 +01:00
chain : Arc < RwLock < BlockChain > > ,
2016-01-14 01:28:37 +01:00
engine : Arc < Box < Engine > > ,
2016-02-03 19:34:51 +01:00
state_db : Arc < DB > ,
2016-01-21 23:33:52 +01:00
block_queue : RwLock < BlockQueue > ,
report : RwLock < ClientReport > ,
uncommited_states : RwLock < HashMap < H256 , JournalDB > > ,
import_lock : Mutex < ( ) >
2016-01-07 21:35:06 +01:00
}
2016-02-04 02:40:35 +01:00
const HISTORY : u64 = 1 ;
2016-01-18 13:54:46 +01:00
2016-01-07 21:35:06 +01:00
impl Client {
2016-01-15 01:03:29 +01:00
/// Create a new client with given spec and DB path.
2016-01-21 23:33:52 +01:00
pub fn new ( spec : Spec , path : & Path , message_channel : IoChannel < NetSyncMessage > ) -> Result < Arc < Client > , Error > {
2016-01-26 15:00:22 +01:00
let gb = spec . genesis_block ( ) ;
let chain = Arc ::new ( RwLock ::new ( BlockChain ::new ( & gb , path ) ) ) ;
2016-01-16 11:52:28 +01:00
let mut opts = Options ::new ( ) ;
2016-01-21 16:48:37 +01:00
opts . set_max_open_files ( 256 ) ;
2016-01-16 11:52:28 +01:00
opts . create_if_missing ( true ) ;
2016-01-25 18:56:36 +01:00
opts . set_use_fsync ( false ) ;
/*
2016-01-16 11:52:28 +01:00
opts . set_bytes_per_sync ( 8388608 ) ;
opts . set_disable_data_sync ( false ) ;
opts . set_block_cache_size_mb ( 1024 ) ;
opts . set_table_cache_num_shard_bits ( 6 ) ;
opts . set_max_write_buffer_number ( 32 ) ;
opts . set_write_buffer_size ( 536870912 ) ;
opts . set_target_file_size_base ( 1073741824 ) ;
opts . set_min_write_buffer_number_to_merge ( 4 ) ;
opts . set_level_zero_stop_writes_trigger ( 2000 ) ;
opts . set_level_zero_slowdown_writes_trigger ( 0 ) ;
opts . set_compaction_style ( DBUniversalCompaction ) ;
opts . set_max_background_compactions ( 4 ) ;
opts . set_max_background_flushes ( 4 ) ;
opts . set_filter_deletes ( false ) ;
2016-01-18 14:44:06 +01:00
opts . set_disable_auto_compactions ( false ) ; * /
2016-01-16 11:52:28 +01:00
2016-01-14 01:28:37 +01:00
let mut state_path = path . to_path_buf ( ) ;
state_path . push ( " state " ) ;
2016-01-21 23:33:52 +01:00
let db = Arc ::new ( DB ::open ( & opts , state_path . to_str ( ) . unwrap ( ) ) . unwrap ( ) ) ;
2016-01-16 11:52:28 +01:00
let engine = Arc ::new ( try ! ( spec . to_engine ( ) ) ) ;
2016-01-23 02:36:58 +01:00
let mut state_db = JournalDB ::new_with_arc ( db . clone ( ) ) ;
2016-02-04 02:40:35 +01:00
if engine . spec ( ) . ensure_db_good ( & mut state_db ) { state_db . commit ( 0 , & engine . spec ( ) . genesis_header ( ) . hash ( ) , None ) . expect ( " Error commiting genesis state to state DB " ) ;
2016-01-18 13:54:46 +01:00
}
2016-01-22 04:57:02 +01:00
Ok ( Arc ::new ( Client {
2016-01-16 11:52:28 +01:00
chain : chain ,
2016-01-14 01:28:37 +01:00
engine : engine . clone ( ) ,
2016-02-03 19:34:51 +01:00
state_db : db ,
2016-01-21 23:33:52 +01:00
block_queue : RwLock ::new ( BlockQueue ::new ( engine , message_channel ) ) ,
report : RwLock ::new ( Default ::default ( ) ) ,
uncommited_states : RwLock ::new ( HashMap ::new ( ) ) ,
import_lock : Mutex ::new ( ( ) ) ,
2016-01-22 04:57:02 +01:00
} ) )
2016-01-07 21:35:06 +01:00
}
2016-01-13 23:15:44 +01:00
2016-01-25 18:56:36 +01:00
/// Flush the block import queue.
pub fn flush_queue ( & self ) {
2016-01-25 19:20:34 +01:00
self . block_queue . write ( ) . unwrap ( ) . flush ( ) ;
2016-01-25 18:56:36 +01:00
}
2016-01-15 01:03:29 +01:00
/// This is triggered by a message coming from a block queue when the block is ready for insertion
2016-01-27 13:28:15 +01:00
pub fn import_verified_blocks ( & self , _io : & IoChannel < NetSyncMessage > ) -> usize {
let mut ret = 0 ;
2016-01-17 23:07:58 +01:00
let mut bad = HashSet ::new ( ) ;
2016-01-21 23:33:52 +01:00
let _import_lock = self . import_lock . lock ( ) ;
2016-01-25 23:24:51 +01:00
let blocks = self . block_queue . write ( ) . unwrap ( ) . drain ( 128 ) ;
2016-02-02 12:12:32 +01:00
let mut good_blocks = Vec ::with_capacity ( 128 ) ;
2016-01-25 23:24:51 +01:00
for block in blocks {
2016-01-17 23:07:58 +01:00
if bad . contains ( & block . header . parent_hash ) {
2016-01-21 23:33:52 +01:00
self . block_queue . write ( ) . unwrap ( ) . mark_as_bad ( & block . header . hash ( ) ) ;
2016-01-17 23:07:58 +01:00
bad . insert ( block . header . hash ( ) ) ;
continue ;
}
let header = & block . header ;
if let Err ( e ) = verify_block_family ( & header , & block . bytes , self . engine . deref ( ) . deref ( ) , self . chain . read ( ) . unwrap ( ) . deref ( ) ) {
warn! ( target : " client " , " Stage 3 block verification failed for #{} ({}) \n Error: {:?} " , header . number ( ) , header . hash ( ) , e ) ;
2016-01-21 23:33:52 +01:00
self . block_queue . write ( ) . unwrap ( ) . mark_as_bad ( & header . hash ( ) ) ;
2016-01-17 23:07:58 +01:00
bad . insert ( block . header . hash ( ) ) ;
2016-01-27 13:28:15 +01:00
break ;
2016-01-17 23:07:58 +01:00
} ;
let parent = match self . chain . read ( ) . unwrap ( ) . block_header ( & header . parent_hash ) {
Some ( p ) = > p ,
None = > {
warn! ( target : " client " , " Block import failed for #{} ({}): Parent not found ({}) " , header . number ( ) , header . hash ( ) , header . parent_hash ) ;
2016-01-21 23:33:52 +01:00
self . block_queue . write ( ) . unwrap ( ) . mark_as_bad ( & header . hash ( ) ) ;
2016-01-17 23:07:58 +01:00
bad . insert ( block . header . hash ( ) ) ;
2016-01-27 13:28:15 +01:00
break ;
2016-01-14 01:28:37 +01:00
} ,
2016-01-17 23:07:58 +01:00
} ;
// build last hashes
let mut last_hashes = LastHashes ::new ( ) ;
last_hashes . resize ( 256 , H256 ::new ( ) ) ;
last_hashes [ 0 ] = header . parent_hash . clone ( ) ;
for i in 0 .. 255 {
match self . chain . read ( ) . unwrap ( ) . block_details ( & last_hashes [ i ] ) {
Some ( details ) = > {
last_hashes [ i + 1 ] = details . parent . clone ( ) ;
} ,
None = > break ,
}
2016-01-14 01:28:37 +01:00
}
2016-02-03 19:34:51 +01:00
let db = JournalDB ::new_with_arc ( self . state_db . clone ( ) ) ;
2016-01-21 23:33:52 +01:00
let result = match enact_verified ( & block , self . engine . deref ( ) . deref ( ) , db , & parent , & last_hashes ) {
2016-01-17 23:07:58 +01:00
Ok ( b ) = > b ,
Err ( e ) = > {
warn! ( target : " client " , " Block import failed for #{} ({}) \n Error: {:?} " , header . number ( ) , header . hash ( ) , e ) ;
2016-01-26 15:33:24 +01:00
bad . insert ( block . header . hash ( ) ) ;
2016-01-21 23:33:52 +01:00
self . block_queue . write ( ) . unwrap ( ) . mark_as_bad ( & header . hash ( ) ) ;
2016-01-27 13:28:15 +01:00
break ;
2016-01-17 23:07:58 +01:00
}
} ;
if let Err ( e ) = verify_block_final ( & header , result . block ( ) . header ( ) ) {
warn! ( target : " client " , " Stage 4 block verification failed for #{} ({}) \n Error: {:?} " , header . number ( ) , header . hash ( ) , e ) ;
2016-01-21 23:33:52 +01:00
self . block_queue . write ( ) . unwrap ( ) . mark_as_bad ( & header . hash ( ) ) ;
2016-01-27 13:28:15 +01:00
break ;
2016-01-14 19:03:48 +01:00
}
2016-01-14 01:28:37 +01:00
2016-02-02 12:12:32 +01:00
good_blocks . push ( header . hash ( ) . clone ( ) ) ;
2016-01-17 23:07:58 +01:00
self . chain . write ( ) . unwrap ( ) . insert_block ( & block . bytes ) ; //TODO: err here?
2016-01-18 16:20:35 +01:00
let ancient = if header . number ( ) > = HISTORY { Some ( header . number ( ) - HISTORY ) } else { None } ;
match result . drain ( ) . commit ( header . number ( ) , & header . hash ( ) , ancient . map ( | n | ( n , self . chain . read ( ) . unwrap ( ) . block_hash ( n ) . unwrap ( ) ) ) ) {
2016-01-17 23:07:58 +01:00
Ok ( _ ) = > ( ) ,
Err ( e ) = > {
warn! ( target : " client " , " State DB commit failed: {:?} " , e ) ;
2016-01-27 13:28:15 +01:00
break ;
2016-01-17 23:07:58 +01:00
}
2016-01-14 19:03:48 +01:00
}
2016-01-21 23:33:52 +01:00
self . report . write ( ) . unwrap ( ) . accrue_block ( & block ) ;
2016-01-18 23:23:32 +01:00
trace! ( target : " client " , " Imported #{} ({}) " , header . number ( ) , header . hash ( ) ) ;
2016-01-27 13:28:15 +01:00
ret + = 1 ;
2016-01-14 01:28:37 +01:00
}
2016-02-02 12:12:32 +01:00
self . block_queue . write ( ) . unwrap ( ) . mark_as_good ( & good_blocks ) ;
2016-01-27 13:28:15 +01:00
ret
2016-01-13 23:15:44 +01:00
}
2016-01-18 19:23:28 +01:00
2016-01-21 23:33:52 +01:00
/// Clear cached state overlay
pub fn clear_state ( & self , hash : & H256 ) {
self . uncommited_states . write ( ) . unwrap ( ) . remove ( hash ) ;
}
2016-01-26 15:00:22 +01:00
/// Get a copy of the best block's state.
pub fn state ( & self ) -> State {
2016-02-03 19:34:51 +01:00
State ::from_existing ( JournalDB ::new_with_arc ( self . state_db . clone ( ) ) , HeaderView ::new ( & self . best_block_header ( ) ) . state_root ( ) , self . engine . account_start_nonce ( ) )
2016-01-26 15:00:22 +01:00
}
2016-01-18 19:23:28 +01:00
/// Get info on the cache.
pub fn cache_info ( & self ) -> CacheSize {
self . chain . read ( ) . unwrap ( ) . cache_size ( )
}
2016-01-18 23:23:32 +01:00
/// Get the report.
pub fn report ( & self ) -> ClientReport {
2016-01-21 23:33:52 +01:00
self . report . read ( ) . unwrap ( ) . clone ( )
2016-01-18 23:23:32 +01:00
}
2016-01-18 19:23:28 +01:00
/// Tick the client.
pub fn tick ( & self ) {
2016-02-02 01:59:14 +01:00
self . chain . read ( ) . unwrap ( ) . collect_garbage ( ) ;
}
/// Set up the cache behaviour.
pub fn configure_cache ( & self , pref_cache_size : usize , max_cache_size : usize ) {
self . chain . write ( ) . unwrap ( ) . configure_cache ( pref_cache_size , max_cache_size ) ;
2016-01-18 19:23:28 +01:00
}
2016-01-07 21:35:06 +01:00
}
impl BlockChainClient for Client {
fn block_header ( & self , hash : & H256 ) -> Option < Bytes > {
2016-01-11 13:42:32 +01:00
self . chain . read ( ) . unwrap ( ) . block ( hash ) . map ( | bytes | BlockView ::new ( & bytes ) . rlp ( ) . at ( 0 ) . as_raw ( ) . to_vec ( ) )
2016-01-07 21:35:06 +01:00
}
fn block_body ( & self , hash : & H256 ) -> Option < Bytes > {
2016-01-11 13:42:32 +01:00
self . chain . read ( ) . unwrap ( ) . block ( hash ) . map ( | bytes | {
2016-01-07 21:35:06 +01:00
let rlp = Rlp ::new ( & bytes ) ;
let mut body = RlpStream ::new ( ) ;
2016-01-08 16:00:32 +01:00
body . append_raw ( rlp . at ( 1 ) . as_raw ( ) , 1 ) ;
body . append_raw ( rlp . at ( 2 ) . as_raw ( ) , 1 ) ;
2016-01-07 21:35:06 +01:00
body . out ( )
} )
}
fn block ( & self , hash : & H256 ) -> Option < Bytes > {
2016-01-11 13:42:32 +01:00
self . chain . read ( ) . unwrap ( ) . block ( hash )
2016-01-07 21:35:06 +01:00
}
fn block_status ( & self , hash : & H256 ) -> BlockStatus {
2016-02-02 12:12:32 +01:00
if self . chain . read ( ) . unwrap ( ) . is_known ( & hash ) {
BlockStatus ::InChain
} else {
self . block_queue . read ( ) . unwrap ( ) . block_status ( hash )
}
2016-01-07 21:35:06 +01:00
}
2016-01-27 12:31:54 +01:00
2016-01-27 14:43:43 +01:00
fn block_total_difficulty ( & self , hash : & H256 ) -> Option < U256 > {
self . chain . read ( ) . unwrap ( ) . block_details ( hash ) . map ( | d | d . total_difficulty )
2016-01-27 12:31:54 +01:00
}
2016-01-07 21:35:06 +01:00
fn block_header_at ( & self , n : BlockNumber ) -> Option < Bytes > {
2016-01-11 13:42:32 +01:00
self . chain . read ( ) . unwrap ( ) . block_hash ( n ) . and_then ( | h | self . block_header ( & h ) )
2016-01-07 21:35:06 +01:00
}
fn block_body_at ( & self , n : BlockNumber ) -> Option < Bytes > {
2016-01-11 13:42:32 +01:00
self . chain . read ( ) . unwrap ( ) . block_hash ( n ) . and_then ( | h | self . block_body ( & h ) )
2016-01-07 21:35:06 +01:00
}
fn block_at ( & self , n : BlockNumber ) -> Option < Bytes > {
2016-01-11 13:42:32 +01:00
self . chain . read ( ) . unwrap ( ) . block_hash ( n ) . and_then ( | h | self . block ( & h ) )
2016-01-07 21:35:06 +01:00
}
fn block_status_at ( & self , n : BlockNumber ) -> BlockStatus {
2016-01-11 13:42:32 +01:00
match self . chain . read ( ) . unwrap ( ) . block_hash ( n ) {
2016-01-07 21:35:06 +01:00
Some ( h ) = > self . block_status ( & h ) ,
None = > BlockStatus ::Unknown
}
}
2016-01-27 14:43:43 +01:00
fn block_total_difficulty_at ( & self , n : BlockNumber ) -> Option < U256 > {
self . chain . read ( ) . unwrap ( ) . block_hash ( n ) . and_then ( | h | self . block_total_difficulty ( & h ) )
}
2016-01-10 23:37:09 +01:00
fn tree_route ( & self , from : & H256 , to : & H256 ) -> Option < TreeRoute > {
2016-01-11 13:42:32 +01:00
self . chain . read ( ) . unwrap ( ) . tree_route ( from . clone ( ) , to . clone ( ) )
2016-01-07 21:35:06 +01:00
}
fn state_data ( & self , _hash : & H256 ) -> Option < Bytes > {
unimplemented! ( ) ;
}
fn block_receipts ( & self , _hash : & H256 ) -> Option < Bytes > {
unimplemented! ( ) ;
}
2016-01-21 23:33:52 +01:00
fn import_block ( & self , bytes : Bytes ) -> ImportResult {
2016-01-17 23:07:58 +01:00
let header = BlockView ::new ( & bytes ) . header ( ) ;
2016-01-14 01:28:37 +01:00
if self . chain . read ( ) . unwrap ( ) . is_known ( & header . hash ( ) ) {
return Err ( ImportError ::AlreadyInChain ) ;
}
2016-02-02 12:12:32 +01:00
if self . block_status ( & header . parent_hash ) = = BlockStatus ::Unknown {
return Err ( ImportError ::UnknownParent ) ;
}
2016-01-21 23:33:52 +01:00
self . block_queue . write ( ) . unwrap ( ) . import_block ( bytes )
2016-01-07 21:35:06 +01:00
}
2016-01-22 04:54:38 +01:00
fn queue_info ( & self ) -> BlockQueueInfo {
self . block_queue . read ( ) . unwrap ( ) . queue_info ( )
2016-01-07 21:35:06 +01:00
}
2016-01-21 23:33:52 +01:00
fn clear_queue ( & self ) {
self . block_queue . write ( ) . unwrap ( ) . clear ( ) ;
2016-01-07 21:35:06 +01:00
}
fn chain_info ( & self ) -> BlockChainInfo {
2016-01-11 13:42:32 +01:00
let chain = self . chain . read ( ) . unwrap ( ) ;
2016-01-07 21:35:06 +01:00
BlockChainInfo {
2016-01-11 13:42:32 +01:00
total_difficulty : chain . best_block_total_difficulty ( ) ,
pending_total_difficulty : chain . best_block_total_difficulty ( ) ,
genesis_hash : chain . genesis_hash ( ) ,
best_block_hash : chain . best_block_hash ( ) ,
best_block_number : From ::from ( chain . best_block_number ( ) )
2016-01-07 21:35:06 +01:00
}
}
}