2016-12-11 19:31:31 +01:00
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
2016-02-05 13:40:41 +01:00
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
2016-11-18 12:14:52 +01:00
2016-08-17 19:25:02 +02:00
use std ::collections ::{ HashSet , HashMap , BTreeMap , VecDeque } ;
2016-11-18 12:14:52 +01:00
use std ::str ::FromStr ;
2016-07-13 19:59:59 +02:00
use std ::sync ::{ Arc , Weak } ;
2016-07-25 16:09:47 +02:00
use std ::path ::{ Path } ;
2016-07-07 09:39:32 +02:00
use std ::fmt ;
use std ::sync ::atomic ::{ AtomicUsize , AtomicBool , Ordering as AtomicOrdering } ;
2016-07-20 12:36:20 +02:00
use std ::time ::{ Instant } ;
2016-07-17 23:00:57 +02:00
use time ::precise_time_ns ;
2016-07-07 09:39:32 +02:00
// util
2016-11-18 12:14:52 +01:00
use util ::{ Bytes , PerfTimer , Itertools , Mutex , RwLock , MutexGuard , Hashable } ;
2016-10-03 11:13:10 +02:00
use util ::{ journaldb , TrieFactory , Trie } ;
use util ::{ U256 , H256 , Address , H2048 , Uint , FixedHash } ;
use util ::trie ::TrieSpec ;
2016-07-07 09:39:32 +02:00
use util ::kvdb ::* ;
// other
2016-08-05 10:32:04 +02:00
use io ::* ;
2016-12-28 13:44:51 +01:00
use views ::BlockView ;
2016-09-06 15:31:13 +02:00
use error ::{ ImportError , ExecutionError , CallError , BlockError , ImportResult , Error as EthcoreError } ;
2016-07-07 09:39:32 +02:00
use header ::BlockNumber ;
2016-11-03 22:22:25 +01:00
use state ::{ State , CleanupMode } ;
2016-01-11 12:28:59 +01:00
use spec ::Spec ;
2016-07-14 15:24:12 +02:00
use basic_types ::Seal ;
2016-07-28 20:32:20 +02:00
use engines ::Engine ;
2016-07-11 17:02:42 +02:00
use service ::ClientIoMessage ;
2016-01-14 01:28:37 +01:00
use env_info ::LastHashes ;
2016-06-28 13:23:15 +02:00
use verification ;
use verification ::{ PreverifiedBlock , Verifier } ;
2016-01-14 01:28:37 +01:00
use block ::* ;
2016-12-16 13:36:07 +01:00
use transaction ::{ LocalizedTransaction , SignedTransaction , Transaction , PendingTransaction , Action } ;
2016-05-26 18:24:51 +02:00
use blockchain ::extras ::TransactionAddress ;
2016-07-07 09:39:32 +02:00
use types ::filter ::Filter ;
2016-10-31 16:58:35 +01:00
use types ::mode ::Mode as IpcMode ;
2016-02-17 12:35:37 +01:00
use log_entry ::LocalizedLogEntry ;
2016-10-20 23:36:18 +02:00
use verification ::queue ::BlockQueue ;
2016-03-13 15:29:55 +01:00
use blockchain ::{ BlockChain , BlockProvider , TreeRoute , ImportRoute } ;
2016-08-17 19:25:02 +02:00
use client ::{
2016-12-09 23:01:43 +01:00
BlockId , TransactionId , UncleId , TraceId , ClientConfig , BlockChainClient ,
2017-01-10 12:23:59 +01:00
MiningBlockChainClient , EngineClient , TraceFilter , CallAnalytics , BlockImportError , Mode ,
2016-12-07 10:50:18 +01:00
ChainNotify , PruningInfo ,
2016-08-17 19:25:02 +02:00
} ;
2016-05-18 11:34:15 +02:00
use client ::Error as ClientError ;
2016-03-19 21:37:11 +01:00
use env_info ::EnvInfo ;
2016-04-06 13:05:58 +02:00
use executive ::{ Executive , Executed , TransactOptions , contract_address } ;
2016-12-29 19:48:28 +01:00
use receipt ::{ Receipt , LocalizedReceipt } ;
2016-05-04 15:22:22 +02:00
use trace ::{ TraceDB , ImportRequest as TraceImportRequest , LocalizedTrace , Database as TraceDatabase } ;
2016-05-02 12:17:30 +02:00
use trace ;
2016-07-28 20:31:29 +02:00
use trace ::FlatTransactionTraces ;
2016-10-28 16:42:24 +02:00
use evm ::{ Factory as EvmFactory , Schedule } ;
2016-07-06 17:15:59 +02:00
use miner ::{ Miner , MinerService } ;
2016-08-05 17:00:46 +02:00
use snapshot ::{ self , io as snapshot_io } ;
2016-08-24 16:53:36 +02:00
use factory ::Factories ;
2016-12-28 13:44:51 +01:00
use rlp ::{ View , UntrustedRlp } ;
2016-09-27 18:02:11 +02:00
use state_db ::StateDB ;
2016-10-28 16:10:30 +02:00
use rand ::OsRng ;
2016-11-18 12:14:52 +01:00
use client ::registry ::Registry ;
2016-12-28 13:44:51 +01:00
use encoded ;
2016-07-07 09:39:32 +02:00
// re-export
pub use types ::blockchain_info ::BlockChainInfo ;
pub use types ::block_status ::BlockStatus ;
pub use blockchain ::CacheSize as BlockChainCacheSize ;
2016-10-20 23:36:18 +02:00
pub use verification ::queue ::QueueInfo as BlockQueueInfo ;
2016-01-07 21:35:06 +01:00
2016-06-19 14:35:42 +02:00
const MAX_TX_QUEUE_SIZE : usize = 4096 ;
2016-07-05 17:50:46 +02:00
const MAX_QUEUE_SIZE_TO_SLEEP_ON : usize = 2 ;
2016-10-14 14:44:56 +02:00
const MIN_HISTORY_SIZE : u64 = 8 ;
2016-06-19 14:35:42 +02:00
2016-01-18 19:23:28 +01:00
impl fmt ::Display for BlockChainInfo {
fn fmt ( & self , f : & mut fmt ::Formatter ) -> fmt ::Result {
write! ( f , " #{}.{} " , self . best_block_number , self . best_block_hash )
}
}
2016-02-02 23:43:29 +01:00
/// Report on the status of a client.
2016-03-10 11:32:10 +01:00
#[ derive(Default, Clone, Debug, Eq, PartialEq) ]
2016-01-18 23:23:32 +01:00
pub struct ClientReport {
2016-02-02 23:43:29 +01:00
/// How many blocks have been imported so far.
2016-01-18 23:23:32 +01:00
pub blocks_imported : usize ,
2016-02-02 23:43:29 +01:00
/// How many transactions have been applied so far.
2016-01-18 23:23:32 +01:00
pub transactions_applied : usize ,
2016-02-02 23:43:29 +01:00
/// How much gas has been processed so far.
2016-01-18 23:23:32 +01:00
pub gas_processed : U256 ,
2016-03-06 22:39:04 +01:00
/// Memory used by state DB
pub state_db_mem : usize ,
2016-01-18 23:23:32 +01:00
}
impl ClientReport {
2016-02-02 23:43:29 +01:00
/// Alter internal reporting to reflect the additional `block` has been processed.
2016-03-01 00:02:48 +01:00
pub fn accrue_block ( & mut self , block : & PreverifiedBlock ) {
2016-01-18 23:23:32 +01:00
self . blocks_imported + = 1 ;
self . transactions_applied + = block . transactions . len ( ) ;
2016-08-29 11:35:24 +02:00
self . gas_processed = self . gas_processed + block . header . gas_used ( ) . clone ( ) ;
2016-01-18 23:23:32 +01:00
}
}
2016-07-05 17:50:46 +02:00
struct SleepState {
last_activity : Option < Instant > ,
last_autosleep : Option < Instant > ,
}
impl SleepState {
fn new ( awake : bool ) -> Self {
SleepState {
last_activity : match awake { false = > None , true = > Some ( Instant ::now ( ) ) } ,
last_autosleep : match awake { false = > Some ( Instant ::now ( ) ) , true = > None } ,
}
}
}
2016-01-07 21:35:06 +01:00
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
2016-01-25 18:56:36 +01:00
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
2016-06-28 13:23:15 +02:00
pub struct Client {
2016-12-11 16:52:41 +01:00
enabled : AtomicBool ,
2016-10-31 16:58:35 +01:00
mode : Mutex < Mode > ,
2016-09-06 15:31:13 +02:00
chain : RwLock < Arc < BlockChain > > ,
tracedb : RwLock < TraceDB < BlockChain > > ,
2016-08-05 17:00:46 +02:00
engine : Arc < Engine > ,
2016-09-06 15:31:13 +02:00
config : ClientConfig ,
pruning : journaldb ::Algorithm ,
2016-09-27 18:02:11 +02:00
db : RwLock < Arc < Database > > ,
state_db : Mutex < StateDB > ,
2016-02-21 19:46:29 +01:00
block_queue : BlockQueue ,
2016-01-21 23:33:52 +01:00
report : RwLock < ClientReport > ,
2016-02-10 12:50:27 +01:00
import_lock : Mutex < ( ) > ,
2016-02-10 15:28:43 +01:00
panic_handler : Arc < PanicHandler > ,
2016-06-28 13:23:15 +02:00
verifier : Box < Verifier > ,
2016-05-31 19:52:53 +02:00
miner : Arc < Miner > ,
2016-07-05 17:50:46 +02:00
sleep_state : Mutex < SleepState > ,
liveness : AtomicBool ,
2016-10-30 09:56:34 +01:00
io_channel : Mutex < IoChannel < ClientIoMessage > > ,
2016-07-20 12:36:20 +02:00
notify : RwLock < Vec < Weak < ChainNotify > > > ,
2016-06-19 14:35:42 +02:00
queue_transactions : AtomicUsize ,
2016-07-17 09:18:15 +02:00
last_hashes : RwLock < VecDeque < H256 > > ,
2016-08-24 16:53:36 +02:00
factories : Factories ,
2016-10-14 14:44:56 +02:00
history : u64 ,
2016-10-28 16:10:30 +02:00
rng : Mutex < OsRng > ,
2016-11-05 10:38:00 +01:00
on_mode_change : Mutex < Option < Box < FnMut ( & Mode ) + 'static + Send > > > ,
2016-11-18 12:14:52 +01:00
registrar : Mutex < Option < Registry > > ,
2016-01-07 21:35:06 +01:00
}
2016-06-28 13:23:15 +02:00
impl Client {
2016-08-05 17:00:46 +02:00
/// Create a new client with given spec and DB path and custom verifier.
2016-06-28 13:23:15 +02:00
pub fn new (
2016-05-31 19:01:37 +02:00
config : ClientConfig ,
2016-08-05 17:00:46 +02:00
spec : & Spec ,
2016-05-31 19:01:37 +02:00
path : & Path ,
2016-05-31 19:52:53 +02:00
miner : Arc < Miner > ,
2016-07-11 17:02:42 +02:00
message_channel : IoChannel < ClientIoMessage > ,
2016-09-07 15:27:28 +02:00
db_config : & DatabaseConfig ,
2016-07-05 17:50:46 +02:00
) -> Result < Arc < Client > , ClientError > {
2016-12-11 16:52:41 +01:00
2016-07-25 16:09:47 +02:00
let path = path . to_path_buf ( ) ;
2016-12-27 12:53:56 +01:00
let db = Arc ::new ( Database ::open ( & db_config , & path . to_str ( ) . expect ( " DB path could not be converted to string. " ) ) . map_err ( ClientError ::Database ) ? ) ;
2016-10-03 11:13:10 +02:00
let trie_spec = match config . fat_db {
true = > TrieSpec ::Fat ,
false = > TrieSpec ::Secure ,
} ;
2016-03-11 13:50:39 +01:00
2016-11-27 11:11:56 +01:00
let trie_factory = TrieFactory ::new ( trie_spec ) ;
2016-12-23 18:44:39 +01:00
let factories = Factories {
vm : EvmFactory ::new ( config . vm_type . clone ( ) , config . jump_table_size ) ,
trie : trie_factory ,
accountdb : Default ::default ( ) ,
} ;
2016-09-27 18:02:11 +02:00
let journal_db = journaldb ::new ( db . clone ( ) , config . pruning , ::db ::COL_STATE ) ;
2016-10-07 00:28:42 +02:00
let mut state_db = StateDB ::new ( journal_db , config . state_cache_size ) ;
2016-12-23 18:44:39 +01:00
if state_db . journal_db ( ) . is_empty ( ) {
// Sets the correct state root.
state_db = spec . ensure_db_good ( state_db , & factories ) ? ;
2016-08-25 16:43:56 +02:00
let mut batch = DBTransaction ::new ( & db ) ;
2016-12-27 12:53:56 +01:00
state_db . journal_under ( & mut batch , 0 , & spec . genesis_header ( ) . hash ( ) ) ? ;
db . write ( batch ) . map_err ( ClientError ::Database ) ? ;
2016-01-18 13:54:46 +01:00
}
2016-02-10 12:50:27 +01:00
2016-12-23 18:44:39 +01:00
let gb = spec . genesis_block ( ) ;
let chain = Arc ::new ( BlockChain ::new ( config . blockchain . clone ( ) , & gb , db . clone ( ) , spec . engine . clone ( ) ) ) ;
let tracedb = RwLock ::new ( TraceDB ::new ( config . tracing . clone ( ) , db . clone ( ) , chain . clone ( ) ) ) ;
2016-10-14 14:44:56 +02:00
trace! ( " Cleanup journal: DB Earliest = {:?}, Latest = {:?} " , state_db . journal_db ( ) . earliest_era ( ) , state_db . journal_db ( ) . latest_era ( ) ) ;
let history = if config . history < MIN_HISTORY_SIZE {
info! ( target : " client " , " Ignoring pruning history parameter of {} \
, falling back to minimum of { } " ,
config . history , MIN_HISTORY_SIZE ) ;
MIN_HISTORY_SIZE
} else {
config . history
} ;
if let ( Some ( earliest ) , Some ( latest ) ) = ( state_db . journal_db ( ) . earliest_era ( ) , state_db . journal_db ( ) . latest_era ( ) ) {
if latest > earliest & & latest - earliest > history {
for era in earliest .. ( latest - history + 1 ) {
trace! ( " Removing era {} " , era ) ;
let mut batch = DBTransaction ::new ( & db ) ;
2016-12-27 12:53:56 +01:00
state_db . mark_canonical ( & mut batch , era , & chain . block_hash ( era ) . expect ( " Old block not found in the database " ) ) ? ;
db . write ( batch ) . map_err ( ClientError ::Database ) ? ;
2016-10-14 14:44:56 +02:00
}
}
}
2016-09-27 18:02:11 +02:00
if ! chain . block_header ( & chain . best_block_hash ( ) ) . map_or ( true , | h | state_db . journal_db ( ) . contains ( h . state_root ( ) ) ) {
2016-07-23 17:05:34 +02:00
warn! ( " State root not found for block #{} ({}) " , chain . best_block_number ( ) , chain . best_block_hash ( ) . hex ( ) ) ;
}
2016-08-05 17:00:46 +02:00
let engine = spec . engine . clone ( ) ;
2016-04-09 19:20:35 +02:00
2016-10-24 15:09:13 +02:00
let block_queue = BlockQueue ::new ( config . queue . clone ( ) , engine . clone ( ) , message_channel . clone ( ) , config . verifier_type . verifying_seal ( ) ) ;
2016-02-10 16:35:52 +01:00
let panic_handler = PanicHandler ::new_in_arc ( ) ;
panic_handler . forward_from ( & block_queue ) ;
2016-02-10 12:50:27 +01:00
2016-11-05 10:38:00 +01:00
let awake = match config . mode { Mode ::Dark ( .. ) | Mode ::Off = > false , _ = > true } ;
2016-08-24 16:53:36 +02:00
2016-11-18 12:14:52 +01:00
let client = Arc ::new ( Client {
2016-12-11 16:52:41 +01:00
enabled : AtomicBool ::new ( true ) ,
2016-07-05 17:50:46 +02:00
sleep_state : Mutex ::new ( SleepState ::new ( awake ) ) ,
liveness : AtomicBool ::new ( awake ) ,
2016-10-31 16:58:35 +01:00
mode : Mutex ::new ( config . mode . clone ( ) ) ,
2016-09-06 15:31:13 +02:00
chain : RwLock ::new ( chain ) ,
2016-04-30 17:41:24 +02:00
tracedb : tracedb ,
2016-02-10 12:50:27 +01:00
engine : engine ,
2016-09-06 15:31:13 +02:00
pruning : config . pruning . clone ( ) ,
verifier : verification ::new ( config . verifier_type . clone ( ) ) ,
config : config ,
db : RwLock ::new ( db ) ,
2016-09-27 18:02:11 +02:00
state_db : Mutex ::new ( state_db ) ,
2016-02-21 19:46:29 +01:00
block_queue : block_queue ,
2016-01-21 23:33:52 +01:00
report : RwLock ::new ( Default ::default ( ) ) ,
import_lock : Mutex ::new ( ( ) ) ,
2016-02-29 14:57:41 +01:00
panic_handler : panic_handler ,
2016-05-31 19:52:53 +02:00
miner : miner ,
2016-10-30 09:56:34 +01:00
io_channel : Mutex ::new ( message_channel ) ,
2016-07-20 12:36:20 +02:00
notify : RwLock ::new ( Vec ::new ( ) ) ,
2016-06-19 14:35:42 +02:00
queue_transactions : AtomicUsize ::new ( 0 ) ,
2016-07-17 09:18:15 +02:00
last_hashes : RwLock ::new ( VecDeque ::new ( ) ) ,
2016-08-24 16:53:36 +02:00
factories : factories ,
2016-10-14 14:44:56 +02:00
history : history ,
2016-12-27 12:53:56 +01:00
rng : Mutex ::new ( OsRng ::new ( ) . map_err ( ::util ::UtilError ::StdIo ) ? ) ,
2016-11-05 10:38:00 +01:00
on_mode_change : Mutex ::new ( None ) ,
2016-11-18 12:14:52 +01:00
registrar : Mutex ::new ( None ) ,
} ) ;
if let Some ( reg_addr ) = client . additional_params ( ) . get ( " registrar " ) . and_then ( | s | Address ::from_str ( s ) . ok ( ) ) {
2016-11-18 12:52:11 +01:00
trace! ( target : " client " , " Found registrar at {} " , reg_addr ) ;
2016-11-18 12:14:52 +01:00
let weak = Arc ::downgrade ( & client ) ;
let registrar = Registry ::new ( reg_addr , move | a , d | weak . upgrade ( ) . ok_or ( " No client! " . into ( ) ) . and_then ( | c | c . call_contract ( a , d ) ) ) ;
* client . registrar . lock ( ) = Some ( registrar ) ;
}
Ok ( client )
2016-01-07 21:35:06 +01:00
}
2016-01-13 23:15:44 +01:00
2016-07-20 12:36:20 +02:00
/// Adds an actor to be notified on certain events
2016-07-20 18:13:56 +02:00
pub fn add_notify ( & self , target : Arc < ChainNotify > ) {
self . notify . write ( ) . push ( Arc ::downgrade ( & target ) ) ;
2016-07-11 17:02:42 +02:00
}
2016-12-06 19:23:15 +01:00
/// Returns engine reference.
pub fn engine ( & self ) -> & Engine {
& * self . engine
}
2016-07-20 12:36:20 +02:00
fn notify < F > ( & self , f : F ) where F : Fn ( & ChainNotify ) {
for np in self . notify . read ( ) . iter ( ) {
if let Some ( n ) = np . upgrade ( ) {
f ( & * n ) ;
}
}
2016-07-11 17:02:42 +02:00
}
2016-11-18 12:14:52 +01:00
/// Get the Registry object - useful for looking up names.
pub fn registrar ( & self ) -> MutexGuard < Option < Registry > > {
self . registrar . lock ( )
}
2016-11-16 10:45:55 +01:00
/// Register an action to be done if a mode change happens.
2016-11-05 10:38:00 +01:00
pub fn on_mode_change < F > ( & self , f : F ) where F : 'static + FnMut ( & Mode ) + Send {
* self . on_mode_change . lock ( ) = Some ( Box ::new ( f ) ) ;
}
2016-01-25 18:56:36 +01:00
/// Flush the block import queue.
pub fn flush_queue ( & self ) {
2016-02-21 19:46:29 +01:00
self . block_queue . flush ( ) ;
2016-07-19 09:23:53 +02:00
while ! self . block_queue . queue_info ( ) . is_empty ( ) {
2016-07-19 10:54:25 +02:00
self . import_verified_blocks ( ) ;
2016-07-19 09:23:53 +02:00
}
2016-01-25 18:56:36 +01:00
}
2016-11-03 22:22:25 +01:00
/// The env info as of the best block.
fn latest_env_info ( & self ) -> EnvInfo {
2016-12-28 13:44:51 +01:00
let header = self . best_block_header ( ) ;
2016-11-03 22:22:25 +01:00
EnvInfo {
2016-12-28 13:44:51 +01:00
number : header . number ( ) ,
author : header . author ( ) ,
timestamp : header . timestamp ( ) ,
difficulty : header . difficulty ( ) ,
last_hashes : self . build_last_hashes ( header . hash ( ) ) ,
2016-11-03 22:22:25 +01:00
gas_used : U256 ::default ( ) ,
2016-12-28 13:44:51 +01:00
gas_limit : header . gas_limit ( ) ,
2016-11-03 22:22:25 +01:00
}
}
2016-08-03 22:03:40 +02:00
fn build_last_hashes ( & self , parent_hash : H256 ) -> Arc < LastHashes > {
2016-07-17 09:18:15 +02:00
{
let hashes = self . last_hashes . read ( ) ;
if hashes . front ( ) . map_or ( false , | h | h = = & parent_hash ) {
let mut res = Vec ::from ( hashes . clone ( ) ) ;
res . resize ( 256 , H256 ::default ( ) ) ;
2016-08-03 22:03:40 +02:00
return Arc ::new ( res ) ;
2016-07-17 09:18:15 +02:00
}
}
2016-02-23 18:44:13 +01:00
let mut last_hashes = LastHashes ::new ( ) ;
2016-08-04 08:52:31 +02:00
last_hashes . resize ( 256 , H256 ::default ( ) ) ;
2016-02-29 14:57:41 +01:00
last_hashes [ 0 ] = parent_hash ;
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
2016-02-23 18:44:13 +01:00
for i in 0 .. 255 {
2016-09-06 15:31:13 +02:00
match chain . block_details ( & last_hashes [ i ] ) {
2016-02-23 18:44:13 +01:00
Some ( details ) = > {
last_hashes [ i + 1 ] = details . parent . clone ( ) ;
} ,
None = > break ,
}
}
2016-07-17 09:18:15 +02:00
let mut cached_hashes = self . last_hashes . write ( ) ;
* cached_hashes = VecDeque ::from ( last_hashes . clone ( ) ) ;
2016-08-03 22:03:40 +02:00
Arc ::new ( last_hashes )
2016-02-23 18:44:13 +01:00
}
2016-03-27 20:33:23 +02:00
fn check_and_close_block ( & self , block : & PreverifiedBlock ) -> Result < LockedBlock , ( ) > {
2016-08-05 17:00:46 +02:00
let engine = & * self . engine ;
2016-02-24 10:55:34 +01:00
let header = & block . header ;
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
2016-03-02 12:57:34 +01:00
// Check the block isn't so old we won't be able to enact it.
2016-09-06 15:31:13 +02:00
let best_block_number = chain . best_block_number ( ) ;
2016-10-14 14:44:56 +02:00
if best_block_number > = self . history & & header . number ( ) < = best_block_number - self . history {
2016-03-02 12:57:34 +01:00
warn! ( target : " client " , " Block import failed for #{} ({}) \n Block is ancient (current best block: #{}). " , header . number ( ) , header . hash ( ) , best_block_number ) ;
return Err ( ( ) ) ;
}
2016-02-24 10:55:34 +01:00
// Verify Block Family
2016-09-06 15:31:13 +02:00
let verify_family_result = self . verifier . verify_block_family ( header , & block . bytes , engine , & * * chain ) ;
2016-02-24 10:55:34 +01:00
if let Err ( e ) = verify_family_result {
warn! ( target : " client " , " Stage 3 block verification failed for #{} ({}) \n Error: {:?} " , header . number ( ) , header . hash ( ) , e ) ;
return Err ( ( ) ) ;
} ;
// Check if Parent is in chain
2016-09-06 15:31:13 +02:00
let chain_has_parent = chain . block_header ( header . parent_hash ( ) ) ;
2016-10-10 17:43:44 +02:00
if let Some ( parent ) = chain_has_parent {
// Enact Verified Block
let last_hashes = self . build_last_hashes ( header . parent_hash ( ) . clone ( ) ) ;
2016-10-27 08:28:12 +02:00
let db = self . state_db . lock ( ) . boxed_clone_canon ( header . parent_hash ( ) ) ;
2016-10-10 17:43:44 +02:00
let enact_result = enact_verified ( block , engine , self . tracedb . read ( ) . tracing_enabled ( ) , db , & parent , last_hashes , self . factories . clone ( ) ) ;
2016-12-27 12:53:56 +01:00
let locked_block = enact_result . map_err ( | e | {
2016-10-10 17:43:44 +02:00
warn! ( target : " client " , " Block import failed for #{} ({}) \n Error: {:?} " , header . number ( ) , header . hash ( ) , e ) ;
2016-12-27 12:53:56 +01:00
} ) ? ;
2016-10-10 17:43:44 +02:00
// Final Verification
if let Err ( e ) = self . verifier . verify_block_final ( header , locked_block . block ( ) . header ( ) ) {
warn! ( target : " client " , " Stage 4 block verification failed for #{} ({}) \n Error: {:?} " , header . number ( ) , header . hash ( ) , e ) ;
return Err ( ( ) ) ;
}
2016-02-24 10:55:34 +01:00
2016-10-10 17:43:44 +02:00
Ok ( locked_block )
} else {
warn! ( target : " client " , " Block import failed for #{} ({}): Parent not found ({}) " , header . number ( ) , header . hash ( ) , header . parent_hash ( ) ) ;
Err ( ( ) )
2016-02-24 10:55:34 +01:00
}
}
2016-06-29 21:49:12 +02:00
fn calculate_enacted_retracted ( & self , import_results : & [ ImportRoute ] ) -> ( Vec < H256 > , Vec < H256 > ) {
2016-03-13 15:29:55 +01:00
fn map_to_vec ( map : Vec < ( H256 , bool ) > ) -> Vec < H256 > {
map . into_iter ( ) . map ( | ( k , _v ) | k ) . collect ( )
}
// In ImportRoute we get all the blocks that have been enacted and retracted by single insert.
// Because we are doing multiple inserts some of the blocks that were enacted in import `k`
// could be retracted in import `k+1`. This is why to understand if after all inserts
// the block is enacted or retracted we iterate over all routes and at the end final state
// will be in the hashmap
2016-06-29 21:49:12 +02:00
let map = import_results . iter ( ) . fold ( HashMap ::new ( ) , | mut map , route | {
for hash in & route . enacted {
map . insert ( hash . clone ( ) , true ) ;
2016-03-13 15:29:55 +01:00
}
2016-06-29 21:49:12 +02:00
for hash in & route . retracted {
map . insert ( hash . clone ( ) , false ) ;
2016-03-13 15:29:55 +01:00
}
map
} ) ;
// Split to enacted retracted (using hashmap value)
let ( enacted , retracted ) = map . into_iter ( ) . partition ( | & ( _k , v ) | v ) ;
// And convert tuples to keys
( map_to_vec ( enacted ) , map_to_vec ( retracted ) )
}
2016-03-14 17:36:51 +01:00
2016-01-15 01:03:29 +01:00
/// This is triggered by a message coming from a block queue when the block is ready for insertion
2016-07-19 09:21:41 +02:00
pub fn import_verified_blocks ( & self ) -> usize {
2016-12-11 16:52:41 +01:00
// Shortcut out if we know we're incapable of syncing the chain.
if ! self . enabled . load ( AtomicOrdering ::Relaxed ) {
return 0 ;
}
2016-10-20 14:49:12 +02:00
let max_blocks_to_import = 4 ;
2016-12-08 12:03:34 +01:00
let ( imported_blocks , import_results , invalid_blocks , imported , proposed_blocks , duration , is_empty ) = {
2016-07-11 17:02:42 +02:00
let mut imported_blocks = Vec ::with_capacity ( max_blocks_to_import ) ;
let mut invalid_blocks = HashSet ::new ( ) ;
2016-12-08 12:03:34 +01:00
let mut proposed_blocks = Vec ::with_capacity ( max_blocks_to_import ) ;
2016-07-11 17:02:42 +02:00
let mut import_results = Vec ::with_capacity ( max_blocks_to_import ) ;
2016-02-23 18:44:13 +01:00
2016-07-11 17:02:42 +02:00
let _import_lock = self . import_lock . lock ( ) ;
2016-10-20 14:49:12 +02:00
let blocks = self . block_queue . drain ( max_blocks_to_import ) ;
if blocks . is_empty ( ) {
return 0 ;
}
2016-07-11 17:02:42 +02:00
let _timer = PerfTimer ::new ( " import_verified_blocks " ) ;
2016-07-20 12:36:20 +02:00
let start = precise_time_ns ( ) ;
2016-02-23 18:44:13 +01:00
2016-07-11 17:02:42 +02:00
for block in blocks {
let header = & block . header ;
2016-10-10 17:43:44 +02:00
let is_invalid = invalid_blocks . contains ( header . parent_hash ( ) ) ;
2016-10-11 16:19:00 +02:00
if is_invalid {
2016-07-11 17:02:42 +02:00
invalid_blocks . insert ( header . hash ( ) ) ;
continue ;
}
2016-10-11 16:19:00 +02:00
if let Ok ( closed_block ) = self . check_and_close_block ( & block ) {
2016-12-08 12:03:34 +01:00
if self . engine . is_proposal ( & block . header ) {
2016-12-08 21:13:32 +01:00
self . block_queue . mark_as_good ( & [ header . hash ( ) ] ) ;
2016-12-08 12:03:34 +01:00
proposed_blocks . push ( block . bytes ) ;
} else {
imported_blocks . push ( header . hash ( ) ) ;
2016-07-17 23:00:57 +02:00
2016-12-08 12:03:34 +01:00
let route = self . commit_block ( closed_block , & header . hash ( ) , & block . bytes ) ;
import_results . push ( route ) ;
2016-03-05 16:46:04 +01:00
2016-12-08 12:03:34 +01:00
self . report . write ( ) . accrue_block ( & block ) ;
}
2016-10-10 17:43:44 +02:00
} else {
invalid_blocks . insert ( header . hash ( ) ) ;
}
2016-07-11 17:02:42 +02:00
}
2016-02-23 18:44:13 +01:00
2016-07-11 17:02:42 +02:00
let imported = imported_blocks . len ( ) ;
let invalid_blocks = invalid_blocks . into_iter ( ) . collect ::< Vec < H256 > > ( ) ;
2016-02-24 10:55:34 +01:00
2016-10-20 14:49:12 +02:00
if ! invalid_blocks . is_empty ( ) {
self . block_queue . mark_as_bad ( & invalid_blocks ) ;
2016-03-10 00:21:07 +01:00
}
2016-10-20 14:49:12 +02:00
let is_empty = self . block_queue . mark_as_good ( & imported_blocks ) ;
2016-07-20 12:36:20 +02:00
let duration_ns = precise_time_ns ( ) - start ;
2016-12-08 12:03:34 +01:00
( imported_blocks , import_results , invalid_blocks , imported , proposed_blocks , duration_ns , is_empty )
2016-07-11 17:02:42 +02:00
} ;
2016-02-24 10:55:34 +01:00
{
2016-10-20 14:49:12 +02:00
if ! imported_blocks . is_empty ( ) & & is_empty {
2016-06-29 21:49:12 +02:00
let ( enacted , retracted ) = self . calculate_enacted_retracted ( & import_results ) ;
2016-05-31 21:17:46 +02:00
2016-10-20 14:49:12 +02:00
if is_empty {
2016-05-31 21:17:46 +02:00
self . miner . chain_new_blocks ( self , & imported_blocks , & invalid_blocks , & enacted , & retracted ) ;
}
2016-07-20 12:36:20 +02:00
self . notify ( | notify | {
2016-07-11 17:02:42 +02:00
notify . new_blocks (
2016-07-20 12:36:20 +02:00
imported_blocks . clone ( ) ,
invalid_blocks . clone ( ) ,
enacted . clone ( ) ,
retracted . clone ( ) ,
2016-07-11 17:02:42 +02:00
Vec ::new ( ) ,
2016-12-08 12:03:34 +01:00
proposed_blocks . clone ( ) ,
2016-07-20 12:36:20 +02:00
duration ,
2016-07-11 17:02:42 +02:00
) ;
2016-07-20 12:36:20 +02:00
} ) ;
2016-02-24 10:55:34 +01:00
}
2016-02-11 22:14:06 +01:00
}
2016-02-24 10:55:34 +01:00
2016-09-06 15:31:13 +02:00
self . db . read ( ) . flush ( ) . expect ( " DB flush failed. " ) ;
2016-02-23 18:44:13 +01:00
imported
2016-01-13 23:15:44 +01:00
}
2016-01-18 19:23:28 +01:00
2016-10-18 18:16:00 +02:00
/// Import a block with transaction receipts.
/// The block is guaranteed to be the next best blocks in the first block sequence.
/// Does no sealing or transaction validation.
2016-10-28 16:10:30 +02:00
fn import_old_block ( & self , block_bytes : Bytes , receipts_bytes : Bytes ) -> Result < H256 , ::error ::Error > {
2016-10-18 18:16:00 +02:00
let block = BlockView ::new ( & block_bytes ) ;
2016-10-28 16:10:30 +02:00
let header = block . header ( ) ;
let hash = header . hash ( ) ;
2016-10-18 18:16:00 +02:00
let _import_lock = self . import_lock . lock ( ) ;
{
let _timer = PerfTimer ::new ( " import_old_block " ) ;
2016-10-28 16:10:30 +02:00
let mut rng = self . rng . lock ( ) ;
2016-10-18 18:16:00 +02:00
let chain = self . chain . read ( ) ;
2016-10-28 16:10:30 +02:00
// verify block.
2016-12-27 12:53:56 +01:00
::snapshot ::verify_old_block (
2016-10-28 16:10:30 +02:00
& mut * rng ,
& header ,
& * self . engine ,
& * chain ,
Some ( & block_bytes ) ,
false ,
2016-12-27 12:53:56 +01:00
) ? ;
2016-10-28 16:10:30 +02:00
2016-10-18 18:16:00 +02:00
// Commit results
let receipts = ::rlp ::decode ( & receipts_bytes ) ;
let mut batch = DBTransaction ::new ( & self . db . read ( ) ) ;
chain . insert_unordered_block ( & mut batch , & block_bytes , receipts , None , false , true ) ;
// Final commit to the DB
self . db . read ( ) . write_buffered ( batch ) ;
chain . commit ( ) ;
}
self . db . read ( ) . flush ( ) . expect ( " DB flush failed. " ) ;
2016-10-28 16:10:30 +02:00
Ok ( hash )
2016-10-18 18:16:00 +02:00
}
2016-07-01 02:08:14 +02:00
fn commit_block < B > ( & self , block : B , hash : & H256 , block_data : & [ u8 ] ) -> ImportRoute where B : IsBlock + Drain {
2016-06-29 21:49:12 +02:00
let number = block . header ( ) . number ( ) ;
2016-07-17 09:18:15 +02:00
let parent = block . header ( ) . parent_hash ( ) . clone ( ) ;
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
2016-06-29 21:49:12 +02:00
// Commit results
2016-07-12 10:28:35 +02:00
let receipts = block . receipts ( ) . to_owned ( ) ;
2016-07-28 20:31:29 +02:00
let traces = block . traces ( ) . clone ( ) . unwrap_or_else ( Vec ::new ) ;
let traces : Vec < FlatTransactionTraces > = traces . into_iter ( )
. map ( Into ::into )
. collect ( ) ;
//let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new));
2016-06-29 21:49:12 +02:00
2016-09-06 15:31:13 +02:00
let mut batch = DBTransaction ::new ( & self . db . read ( ) ) ;
2016-06-30 12:56:58 +02:00
// CHECK! I *think* this is fine, even if the state_root is equal to another
// already-imported block of the same number.
// TODO: Prove it with a test.
2016-09-27 18:02:11 +02:00
let mut state = block . drain ( ) ;
2016-09-26 17:27:48 +02:00
2016-10-13 12:59:32 +02:00
state . journal_under ( & mut batch , number , hash ) . expect ( " DB commit failed " ) ;
2016-09-26 17:27:48 +02:00
2016-10-14 14:44:56 +02:00
if number > = self . history {
let n = number - self . history ;
2016-10-20 23:41:15 +02:00
if let Some ( ancient_hash ) = chain . block_hash ( n ) {
state . mark_canonical ( & mut batch , n , & ancient_hash ) . expect ( " DB commit failed " ) ;
} else {
debug! ( target : " client " , " Missing expected hash for block {} " , n ) ;
}
2016-09-26 17:27:48 +02:00
}
2016-06-29 21:49:12 +02:00
2016-09-06 15:31:13 +02:00
let route = chain . insert_block ( & mut batch , block_data , receipts ) ;
self . tracedb . read ( ) . import ( & mut batch , TraceImportRequest {
2016-07-28 20:31:29 +02:00
traces : traces . into ( ) ,
2016-06-29 21:49:12 +02:00
block_hash : hash . clone ( ) ,
block_number : number ,
enacted : route . enacted . clone ( ) ,
retracted : route . retracted . len ( )
} ) ;
2016-10-13 12:59:32 +02:00
2016-10-07 13:34:32 +02:00
let is_canon = route . enacted . last ( ) . map_or ( false , | h | h = = hash ) ;
state . sync_cache ( & route . enacted , & route . retracted , is_canon ) ;
2016-07-28 23:46:24 +02:00
// Final commit to the DB
2016-09-06 15:31:13 +02:00
self . db . read ( ) . write_buffered ( batch ) ;
chain . commit ( ) ;
2016-07-17 09:18:15 +02:00
self . update_last_hashes ( & parent , hash ) ;
2016-06-29 21:49:12 +02:00
route
}
2016-07-17 09:18:15 +02:00
fn update_last_hashes ( & self , parent : & H256 , hash : & H256 ) {
let mut hashes = self . last_hashes . write ( ) ;
if hashes . front ( ) . map_or ( false , | h | h = = parent ) {
if hashes . len ( ) > 255 {
hashes . pop_back ( ) ;
}
hashes . push_front ( hash . clone ( ) ) ;
}
}
2016-06-19 14:35:42 +02:00
/// Import transactions from the IO queue
2016-12-10 21:22:19 +01:00
pub fn import_queued_transactions ( & self , transactions : & [ Bytes ] , peer_id : usize ) -> usize {
2016-09-19 10:38:47 +02:00
trace! ( target : " external_tx " , " Importing queued " ) ;
2016-06-19 14:35:42 +02:00
let _timer = PerfTimer ::new ( " import_queued_transactions " ) ;
self . queue_transactions . fetch_sub ( transactions . len ( ) , AtomicOrdering ::SeqCst ) ;
2016-12-10 14:56:41 +01:00
let txs : Vec < SignedTransaction > = transactions . iter ( ) . filter_map ( | bytes | UntrustedRlp ::new ( bytes ) . as_val ( ) . ok ( ) ) . collect ( ) ;
let hashes : Vec < _ > = txs . iter ( ) . map ( | tx | tx . hash ( ) ) . collect ( ) ;
self . notify ( | notify | {
2016-12-10 21:22:19 +01:00
notify . transactions_received ( hashes . clone ( ) , peer_id ) ;
2016-12-10 14:56:41 +01:00
} ) ;
2016-07-06 17:15:59 +02:00
let results = self . miner . import_external_transactions ( self , txs ) ;
2016-06-19 14:35:42 +02:00
results . len ( )
}
2016-12-06 19:23:15 +01:00
/// Get shared miner reference.
pub fn miner ( & self ) -> Arc < Miner > {
self . miner . clone ( )
}
2016-12-11 12:32:01 +01:00
/// Replace io channel. Useful for testing.
pub fn set_io_channel ( & self , io_channel : IoChannel < ClientIoMessage > ) {
* self . io_channel . lock ( ) = io_channel ;
2016-09-27 12:12:18 +02:00
}
2016-07-27 21:34:32 +02:00
/// Attempt to get a copy of a specific block's final state.
2016-05-25 17:35:15 +02:00
///
2016-12-09 23:01:43 +01:00
/// This will not fail if given BlockId::Latest.
2016-12-23 18:46:17 +01:00
/// Otherwise, this can fail (but may not) if the DB prunes state or the block
/// is unknown.
2016-12-09 23:01:43 +01:00
pub fn state_at ( & self , id : BlockId ) -> Option < State > {
2016-05-26 11:46:45 +02:00
// fast path for latest state.
2016-07-14 15:24:12 +02:00
match id . clone ( ) {
2016-12-09 23:01:43 +01:00
BlockId ::Pending = > return self . miner . pending_state ( ) . or_else ( | | Some ( self . state ( ) ) ) ,
BlockId ::Latest = > return Some ( self . state ( ) ) ,
2016-07-14 15:24:12 +02:00
_ = > { } ,
2016-05-26 11:46:45 +02:00
}
2016-06-02 21:01:47 +02:00
let block_number = match self . block_number ( id . clone ( ) ) {
Some ( num ) = > num ,
None = > return None ,
} ;
2016-06-02 20:34:38 +02:00
2016-06-02 20:52:21 +02:00
self . block_header ( id ) . and_then ( | header | {
2016-09-27 18:02:11 +02:00
let db = self . state_db . lock ( ) . boxed_clone ( ) ;
2016-06-02 21:01:47 +02:00
2016-06-05 22:14:25 +02:00
// early exit for pruned blocks
2016-10-14 14:44:56 +02:00
if db . is_pruned ( ) & & self . chain . read ( ) . best_block_number ( ) > = block_number + self . history {
2016-06-05 22:14:25 +02:00
return None ;
}
2016-12-28 13:44:51 +01:00
let root = header . state_root ( ) ;
2016-08-24 16:53:36 +02:00
State ::from_existing ( db , root , self . engine . account_start_nonce ( ) , self . factories . clone ( ) ) . ok ( )
2016-05-25 17:35:15 +02:00
} )
}
2016-07-27 21:34:32 +02:00
/// Attempt to get a copy of a specific block's beginning state.
///
2016-12-09 23:01:43 +01:00
/// This will not fail if given BlockId::Latest.
2016-07-27 21:34:32 +02:00
/// Otherwise, this can fail (but may not) if the DB prunes state.
2016-12-09 23:01:43 +01:00
pub fn state_at_beginning ( & self , id : BlockId ) -> Option < State > {
2016-07-27 21:34:32 +02:00
// fast path for latest state.
match id {
2016-12-09 23:01:43 +01:00
BlockId ::Pending = > self . state_at ( BlockId ::Latest ) ,
2016-07-27 21:34:32 +02:00
id = > match self . block_number ( id ) {
None | Some ( 0 ) = > None ,
2016-12-09 23:01:43 +01:00
Some ( n ) = > self . state_at ( BlockId ::Number ( n - 1 ) ) ,
2016-07-27 21:34:32 +02:00
}
}
}
2016-01-26 15:00:22 +01:00
/// Get a copy of the best block's state.
pub fn state ( & self ) -> State {
2016-10-07 13:34:32 +02:00
let header = self . best_block_header ( ) ;
2016-07-01 20:29:56 +02:00
State ::from_existing (
2016-10-07 13:34:32 +02:00
self . state_db . lock ( ) . boxed_clone_canon ( & header . hash ( ) ) ,
header . state_root ( ) ,
2016-07-01 20:29:56 +02:00
self . engine . account_start_nonce ( ) ,
2016-08-24 16:53:36 +02:00
self . factories . clone ( ) )
2016-07-07 09:39:32 +02:00
. expect ( " State root of best block header always valid. " )
2016-01-26 15:00:22 +01:00
}
2016-01-18 19:23:28 +01:00
/// Get info on the cache.
2016-02-25 14:09:39 +01:00
pub fn blockchain_cache_info ( & self ) -> BlockChainCacheSize {
2016-09-06 15:31:13 +02:00
self . chain . read ( ) . cache_size ( )
2016-01-18 19:23:28 +01:00
}
2016-01-18 23:23:32 +01:00
/// Get the report.
pub fn report ( & self ) -> ClientReport {
2016-07-13 19:59:59 +02:00
let mut report = self . report . read ( ) . clone ( ) ;
2016-09-27 18:02:11 +02:00
report . state_db_mem = self . state_db . lock ( ) . mem_used ( ) ;
2016-03-06 22:39:04 +01:00
report
2016-01-18 23:23:32 +01:00
}
2016-01-18 19:23:28 +01:00
/// Tick the client.
2016-07-05 17:50:46 +02:00
// TODO: manage by real events.
2016-01-18 19:23:28 +01:00
pub fn tick ( & self ) {
2016-11-13 15:52:33 +01:00
self . check_garbage ( ) ;
self . check_snooze ( ) ;
}
fn check_garbage ( & self ) {
2016-09-06 15:31:13 +02:00
self . chain . read ( ) . collect_garbage ( ) ;
2016-02-29 18:11:59 +01:00
self . block_queue . collect_garbage ( ) ;
2016-09-06 15:31:13 +02:00
self . tracedb . read ( ) . collect_garbage ( ) ;
2016-11-13 15:52:33 +01:00
}
2016-07-07 09:39:32 +02:00
2016-11-13 15:52:33 +01:00
fn check_snooze ( & self ) {
2016-10-31 16:58:35 +01:00
let mode = self . mode . lock ( ) . clone ( ) ;
match mode {
2016-07-05 17:50:46 +02:00
Mode ::Dark ( timeout ) = > {
2016-07-13 19:59:59 +02:00
let mut ss = self . sleep_state . lock ( ) ;
2016-07-05 17:50:46 +02:00
if let Some ( t ) = ss . last_activity {
2016-07-07 09:39:32 +02:00
if Instant ::now ( ) > t + timeout {
2016-07-05 17:50:46 +02:00
self . sleep ( ) ;
ss . last_activity = None ;
}
}
}
Mode ::Passive ( timeout , wakeup_after ) = > {
2016-07-13 19:59:59 +02:00
let mut ss = self . sleep_state . lock ( ) ;
2016-07-05 17:50:46 +02:00
let now = Instant ::now ( ) ;
if let Some ( t ) = ss . last_activity {
2016-07-07 09:39:32 +02:00
if now > t + timeout {
2016-07-05 17:50:46 +02:00
self . sleep ( ) ;
ss . last_activity = None ;
ss . last_autosleep = Some ( now ) ;
}
}
2016-07-07 09:39:32 +02:00
if let Some ( t ) = ss . last_autosleep {
if now > t + wakeup_after {
2016-07-05 17:50:46 +02:00
self . wake_up ( ) ;
ss . last_activity = Some ( now ) ;
ss . last_autosleep = None ;
}
}
}
_ = > { }
}
2016-02-02 01:59:14 +01:00
}
2016-08-08 18:41:30 +02:00
/// Take a snapshot at the given block.
/// If the ID given is "latest", this will default to 1000 blocks behind.
2016-12-09 23:01:43 +01:00
pub fn take_snapshot < W : snapshot_io ::SnapshotWriter + Send > ( & self , writer : W , at : BlockId , p : & snapshot ::Progress ) -> Result < ( ) , EthcoreError > {
2016-09-27 18:02:11 +02:00
let db = self . state_db . lock ( ) . journal_db ( ) . boxed_clone ( ) ;
2016-08-05 17:00:46 +02:00
let best_block_number = self . chain_info ( ) . best_block_number ;
2016-12-27 12:53:56 +01:00
let block_number = self . block_number ( at ) . ok_or ( snapshot ::Error ::InvalidStartingBlock ( at ) ) ? ;
2016-08-08 18:41:30 +02:00
2016-10-14 14:44:56 +02:00
if best_block_number > self . history + block_number & & db . is_pruned ( ) {
2016-08-08 18:41:30 +02:00
return Err ( snapshot ::Error ::OldBlockPrunedDB . into ( ) ) ;
}
2016-10-17 13:05:57 +02:00
let history = ::std ::cmp ::min ( self . history , 1000 ) ;
2016-08-08 18:41:30 +02:00
let start_hash = match at {
2016-12-09 23:01:43 +01:00
BlockId ::Latest = > {
2016-10-17 13:05:57 +02:00
let start_num = match db . earliest_era ( ) {
Some ( era ) = > ::std ::cmp ::max ( era , best_block_number - history ) ,
None = > best_block_number - history ,
2016-08-08 18:41:30 +02:00
} ;
2016-12-09 23:01:43 +01:00
match self . block_hash ( BlockId ::Number ( start_num ) ) {
2016-10-14 14:44:56 +02:00
Some ( h ) = > h ,
None = > return Err ( snapshot ::Error ::InvalidStartingBlock ( at ) . into ( ) ) ,
}
2016-08-08 18:41:30 +02:00
}
_ = > match self . block_hash ( at ) {
Some ( hash ) = > hash ,
None = > return Err ( snapshot ::Error ::InvalidStartingBlock ( at ) . into ( ) ) ,
} ,
2016-08-05 17:00:46 +02:00
} ;
2016-12-27 12:53:56 +01:00
snapshot ::take_snapshot ( & self . chain . read ( ) , start_hash , db . as_hashdb ( ) , writer , p ) ? ;
2016-08-05 17:00:46 +02:00
Ok ( ( ) )
}
2016-10-14 14:44:56 +02:00
/// Ask the client what the history parameter is.
pub fn pruning_history ( & self ) -> u64 {
self . history
}
2016-12-09 23:01:43 +01:00
fn block_hash ( chain : & BlockChain , id : BlockId ) -> Option < H256 > {
2016-02-10 19:29:27 +01:00
match id {
2016-12-09 23:01:43 +01:00
BlockId ::Hash ( hash ) = > Some ( hash ) ,
BlockId ::Number ( number ) = > chain . block_hash ( number ) ,
BlockId ::Earliest = > chain . block_hash ( 0 ) ,
BlockId ::Latest | BlockId ::Pending = > Some ( chain . best_block_hash ( ) ) ,
2016-02-10 19:29:27 +01:00
}
}
2016-02-13 13:05:28 +01:00
2016-12-09 23:01:43 +01:00
fn transaction_address ( & self , id : TransactionId ) -> Option < TransactionAddress > {
2016-03-20 17:29:39 +01:00
match id {
2016-12-09 23:01:43 +01:00
TransactionId ::Hash ( ref hash ) = > self . chain . read ( ) . transaction_address ( hash ) ,
TransactionId ::Location ( id , index ) = > Self ::block_hash ( & self . chain . read ( ) , id ) . map ( | hash | TransactionAddress {
2016-03-20 17:29:39 +01:00
block_hash : hash ,
2016-05-31 21:03:44 +02:00
index : index ,
2016-03-20 17:29:39 +01:00
} )
}
}
2016-07-05 17:50:46 +02:00
fn wake_up ( & self ) {
if ! self . liveness . load ( AtomicOrdering ::Relaxed ) {
self . liveness . store ( true , AtomicOrdering ::Relaxed ) ;
2016-07-20 12:36:20 +02:00
self . notify ( | n | n . start ( ) ) ;
2016-07-05 17:50:46 +02:00
trace! ( target : " mode " , " wake_up: Waking. " ) ;
}
}
fn sleep ( & self ) {
if self . liveness . load ( AtomicOrdering ::Relaxed ) {
// only sleep if the import queue is mostly empty.
if self . queue_info ( ) . total_queue_size ( ) < = MAX_QUEUE_SIZE_TO_SLEEP_ON {
self . liveness . store ( false , AtomicOrdering ::Relaxed ) ;
2016-07-20 12:36:20 +02:00
self . notify ( | n | n . stop ( ) ) ;
2016-07-05 17:50:46 +02:00
trace! ( target : " mode " , " sleep: Sleeping. " ) ;
} else {
trace! ( target : " mode " , " sleep: Cannot sleep - syncing ongoing. " ) ;
// TODO: Consider uncommenting.
2016-07-13 19:59:59 +02:00
//*self.last_activity.lock() = Some(Instant::now());
2016-07-05 17:50:46 +02:00
}
}
}
2016-03-08 15:46:44 +01:00
}
2016-02-29 14:57:41 +01:00
2016-09-06 15:31:13 +02:00
impl snapshot ::DatabaseRestore for Client {
/// Restart the client with a new backend
fn restore_db ( & self , new_db : & str ) -> Result < ( ) , EthcoreError > {
2016-09-07 19:38:59 +02:00
trace! ( target : " snapshot " , " Replacing client database with {:?} " , new_db ) ;
2016-09-06 15:31:13 +02:00
let _import_lock = self . import_lock . lock ( ) ;
2016-09-27 18:02:11 +02:00
let mut state_db = self . state_db . lock ( ) ;
2016-09-06 15:31:13 +02:00
let mut chain = self . chain . write ( ) ;
let mut tracedb = self . tracedb . write ( ) ;
self . miner . clear ( ) ;
let db = self . db . write ( ) ;
2016-12-27 12:53:56 +01:00
db . restore ( new_db ) ? ;
2016-09-06 15:31:13 +02:00
2016-10-07 00:28:42 +02:00
let cache_size = state_db . cache_size ( ) ;
* state_db = StateDB ::new ( journaldb ::new ( db . clone ( ) , self . pruning , ::db ::COL_STATE ) , cache_size ) ;
2016-12-05 16:20:32 +01:00
* chain = Arc ::new ( BlockChain ::new ( self . config . blockchain . clone ( ) , & [ ] , db . clone ( ) , self . engine . clone ( ) ) ) ;
2016-09-26 19:21:25 +02:00
* tracedb = TraceDB ::new ( self . config . tracing . clone ( ) , db . clone ( ) , chain . clone ( ) ) ;
2016-09-06 15:31:13 +02:00
Ok ( ( ) )
}
}
2016-06-28 13:23:15 +02:00
impl BlockChainClient for Client {
2016-12-09 23:01:43 +01:00
fn call ( & self , t : & SignedTransaction , block : BlockId , analytics : CallAnalytics ) -> Result < Executed , CallError > {
2016-12-27 12:53:56 +01:00
let header = self . block_header ( block ) . ok_or ( CallError ::StatePruned ) ? ;
2016-12-28 13:44:51 +01:00
let last_hashes = self . build_last_hashes ( header . parent_hash ( ) ) ;
2016-03-19 21:37:11 +01:00
let env_info = EnvInfo {
2016-12-28 13:44:51 +01:00
number : header . number ( ) ,
author : header . author ( ) ,
timestamp : header . timestamp ( ) ,
difficulty : header . difficulty ( ) ,
2016-03-19 21:37:11 +01:00
last_hashes : last_hashes ,
2016-08-04 08:52:31 +02:00
gas_used : U256 ::zero ( ) ,
2016-03-19 21:37:11 +01:00
gas_limit : U256 ::max_value ( ) ,
} ;
// that's just a copy of the state.
2016-12-27 12:53:56 +01:00
let mut state = self . state_at ( block ) . ok_or ( CallError ::StatePruned ) ? ;
2016-08-04 18:17:21 +02:00
let original_state = if analytics . state_diffing { Some ( state . clone ( ) ) } else { None } ;
2016-12-27 12:53:56 +01:00
let sender = t . sender ( ) . map_err ( | e | {
2016-05-14 14:28:44 +02:00
let message = format! ( " Transaction malformed: {:?} " , e ) ;
ExecutionError ::TransactionMalformed ( message )
2016-12-27 12:53:56 +01:00
} ) ? ;
2016-03-19 21:37:11 +01:00
let balance = state . balance ( & sender ) ;
2016-05-31 21:03:44 +02:00
let needed_balance = t . value + t . gas * t . gas_price ;
if balance < needed_balance {
// give the sender a sufficient balance
2016-11-03 22:22:25 +01:00
state . add_balance ( & sender , & ( needed_balance - balance ) , CleanupMode ::NoEmpty ) ;
2016-05-31 21:03:44 +02:00
}
2016-06-02 13:50:50 +02:00
let options = TransactOptions { tracing : analytics . transaction_tracing , vm_tracing : analytics . vm_tracing , check_nonce : false } ;
2016-12-27 12:53:56 +01:00
let mut ret = Executive ::new ( & mut state , & env_info , & * self . engine , & self . factories . vm ) . transact ( t , options ) ? ;
2016-06-06 14:33:12 +02:00
2016-05-31 21:03:44 +02:00
// TODO gav move this into Executive.
2016-08-04 18:17:21 +02:00
ret . state_diff = original_state . map ( | original | state . diff_from ( original ) ) ;
Ok ( ret )
2016-03-19 21:37:11 +01:00
}
2016-12-09 23:01:43 +01:00
fn replay ( & self , id : TransactionId , analytics : CallAnalytics ) -> Result < Executed , CallError > {
2016-12-27 12:53:56 +01:00
let address = self . transaction_address ( id ) . ok_or ( CallError ::TransactionNotFound ) ? ;
2016-12-28 13:44:51 +01:00
let header = self . block_header ( BlockId ::Hash ( address . block_hash ) ) . ok_or ( CallError ::StatePruned ) ? ;
let body = self . block_body ( BlockId ::Hash ( address . block_hash ) ) . ok_or ( CallError ::StatePruned ) ? ;
2016-12-27 12:53:56 +01:00
let mut state = self . state_at_beginning ( BlockId ::Hash ( address . block_hash ) ) . ok_or ( CallError ::StatePruned ) ? ;
2016-12-28 13:44:51 +01:00
let txs = body . transactions ( ) ;
2016-07-27 21:34:32 +02:00
if address . index > = txs . len ( ) {
2016-08-04 18:17:21 +02:00
return Err ( CallError ::TransactionNotFound ) ;
2016-07-27 21:34:32 +02:00
}
let options = TransactOptions { tracing : analytics . transaction_tracing , vm_tracing : analytics . vm_tracing , check_nonce : false } ;
2016-12-28 13:44:51 +01:00
let last_hashes = self . build_last_hashes ( header . hash ( ) ) ;
2016-07-27 21:34:32 +02:00
let mut env_info = EnvInfo {
2016-12-28 13:44:51 +01:00
number : header . number ( ) ,
author : header . author ( ) ,
timestamp : header . timestamp ( ) ,
difficulty : header . difficulty ( ) ,
2016-07-27 21:34:32 +02:00
last_hashes : last_hashes ,
2016-08-04 08:52:31 +02:00
gas_used : U256 ::default ( ) ,
2016-12-28 13:44:51 +01:00
gas_limit : header . gas_limit ( ) ,
2016-07-27 21:34:32 +02:00
} ;
for t in txs . iter ( ) . take ( address . index ) {
2016-08-24 16:53:36 +02:00
match Executive ::new ( & mut state , & env_info , & * self . engine , & self . factories . vm ) . transact ( t , Default ::default ( ) ) {
2016-07-27 21:34:32 +02:00
Ok ( x ) = > { env_info . gas_used = env_info . gas_used + x . gas_used ; }
2016-08-04 18:17:21 +02:00
Err ( ee ) = > { return Err ( CallError ::Execution ( ee ) ) }
2016-07-27 21:34:32 +02:00
}
}
let t = & txs [ address . index ] ;
2016-08-04 18:17:21 +02:00
let original_state = if analytics . state_diffing { Some ( state . clone ( ) ) } else { None } ;
2016-12-27 12:53:56 +01:00
let mut ret = Executive ::new ( & mut state , & env_info , & * self . engine , & self . factories . vm ) . transact ( t , options ) ? ;
2016-08-04 18:17:21 +02:00
ret . state_diff = original_state . map ( | original | state . diff_from ( original ) ) ;
Ok ( ret )
2016-07-27 21:34:32 +02:00
}
2016-07-05 17:50:46 +02:00
fn keep_alive ( & self ) {
2016-11-28 13:20:49 +01:00
let should_wake = match * self . mode . lock ( ) {
Mode ::Dark ( .. ) | Mode ::Passive ( .. ) = > true ,
2016-11-05 10:38:00 +01:00
_ = > false ,
} ;
if should_wake {
2016-07-05 17:50:46 +02:00
self . wake_up ( ) ;
2016-07-13 19:59:59 +02:00
( * self . sleep_state . lock ( ) ) . last_activity = Some ( Instant ::now ( ) ) ;
2016-07-05 17:50:46 +02:00
}
}
2016-05-19 00:44:49 +02:00
2016-11-05 10:38:00 +01:00
fn mode ( & self ) -> IpcMode {
let r = self . mode . lock ( ) . clone ( ) . into ( ) ;
trace! ( target : " mode " , " Asked for mode = {:?}. returning {:?} " , & * self . mode . lock ( ) , r ) ;
r
}
2016-10-31 16:58:35 +01:00
2016-12-11 16:52:41 +01:00
fn disable ( & self ) {
self . set_mode ( IpcMode ::Off ) ;
self . enabled . store ( false , AtomicOrdering ::Relaxed ) ;
2016-12-15 22:05:54 +01:00
self . clear_queue ( ) ;
2016-12-11 16:52:41 +01:00
}
2016-11-05 10:38:00 +01:00
fn set_mode ( & self , new_mode : IpcMode ) {
trace! ( target : " mode " , " Client::set_mode({:?}) " , new_mode ) ;
2016-12-11 16:52:41 +01:00
if ! self . enabled . load ( AtomicOrdering ::Relaxed ) {
return ;
}
2016-11-05 10:38:00 +01:00
{
let mut mode = self . mode . lock ( ) ;
* mode = new_mode . clone ( ) . into ( ) ;
trace! ( target : " mode " , " Mode now {:?} " , & * mode ) ;
2016-11-17 13:48:25 +01:00
if let Some ( ref mut f ) = * self . on_mode_change . lock ( ) {
trace! ( target : " mode " , " Making callback... " ) ;
f ( & * mode )
2016-11-05 10:38:00 +01:00
}
}
match new_mode {
2016-10-31 16:58:35 +01:00
IpcMode ::Active = > self . wake_up ( ) ,
IpcMode ::Off = > self . sleep ( ) ,
_ = > { ( * self . sleep_state . lock ( ) ) . last_activity = Some ( Instant ::now ( ) ) ; }
}
}
2016-12-28 13:44:51 +01:00
fn best_block_header ( & self ) -> encoded ::Header {
2016-09-06 15:31:13 +02:00
self . chain . read ( ) . best_block_header ( )
2016-07-28 23:46:24 +02:00
}
2016-12-28 13:44:51 +01:00
fn block_header ( & self , id : BlockId ) -> Option < ::encoded ::Header > {
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
Self ::block_hash ( & chain , id ) . and_then ( | hash | chain . block_header_data ( & hash ) )
2016-01-07 21:35:06 +01:00
}
2016-12-11 02:02:40 +01:00
fn block_number ( & self , id : BlockId ) -> Option < BlockNumber > {
match id {
BlockId ::Number ( number ) = > Some ( number ) ,
BlockId ::Hash ( ref hash ) = > self . chain . read ( ) . block_number ( hash ) ,
BlockId ::Earliest = > Some ( 0 ) ,
BlockId ::Latest | BlockId ::Pending = > Some ( self . chain . read ( ) . best_block_number ( ) ) ,
}
}
2016-12-28 13:44:51 +01:00
fn block_body ( & self , id : BlockId ) -> Option < encoded ::Body > {
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
Self ::block_hash ( & chain , id ) . and_then ( | hash | chain . block_body ( & hash ) )
2016-01-07 21:35:06 +01:00
}
2016-12-28 13:44:51 +01:00
fn block ( & self , id : BlockId ) -> Option < encoded ::Block > {
2016-12-09 23:01:43 +01:00
if let BlockId ::Pending = id {
2016-07-16 11:31:59 +02:00
if let Some ( block ) = self . miner . pending_block ( ) {
2016-12-28 13:44:51 +01:00
return Some ( encoded ::Block ::new ( block . rlp_bytes ( Seal ::Without ) ) ) ;
2016-07-14 15:24:12 +02:00
}
}
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
Self ::block_hash ( & chain , id ) . and_then ( | hash | {
chain . block ( & hash )
2016-02-10 19:29:27 +01:00
} )
2016-01-07 21:35:06 +01:00
}
2016-12-09 23:01:43 +01:00
fn block_status ( & self , id : BlockId ) -> BlockStatus {
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
match Self ::block_hash ( & chain , id ) {
Some ( ref hash ) if chain . is_known ( hash ) = > BlockStatus ::InChain ,
2016-09-27 16:50:24 +02:00
Some ( hash ) = > self . block_queue . status ( & hash ) . into ( ) ,
2016-02-10 19:29:27 +01:00
None = > BlockStatus ::Unknown
2016-02-02 12:12:32 +01:00
}
2016-01-07 21:35:06 +01:00
}
2016-02-23 18:44:13 +01:00
2016-12-09 23:01:43 +01:00
fn block_total_difficulty ( & self , id : BlockId ) -> Option < U256 > {
if let BlockId ::Pending = id {
2016-07-14 15:24:12 +02:00
if let Some ( block ) = self . miner . pending_block ( ) {
2016-12-09 23:01:43 +01:00
return Some ( * block . header . difficulty ( ) + self . block_total_difficulty ( BlockId ::Latest ) . expect ( " blocks in chain have details; qed " ) ) ;
2016-07-16 11:31:59 +02:00
}
2016-07-14 15:24:12 +02:00
}
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
Self ::block_hash ( & chain , id ) . and_then ( | hash | chain . block_details ( & hash ) ) . map ( | d | d . total_difficulty )
2016-01-27 12:31:54 +01:00
}
2016-01-07 21:35:06 +01:00
2016-12-09 23:01:43 +01:00
fn nonce ( & self , address : & Address , id : BlockId ) -> Option < U256 > {
2016-05-26 12:40:29 +02:00
self . state_at ( id ) . map ( | s | s . nonce ( address ) )
2016-03-05 16:46:04 +01:00
}
2016-12-09 23:01:43 +01:00
fn storage_root ( & self , address : & Address , id : BlockId ) -> Option < H256 > {
2016-11-27 11:11:56 +01:00
self . state_at ( id ) . and_then ( | s | s . storage_root ( address ) )
}
2016-12-09 23:01:43 +01:00
fn block_hash ( & self , id : BlockId ) -> Option < H256 > {
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
Self ::block_hash ( & chain , id )
2016-02-24 14:16:05 +01:00
}
2016-12-09 23:01:43 +01:00
fn code ( & self , address : & Address , id : BlockId ) -> Option < Option < Bytes > > {
2016-10-02 18:45:36 +02:00
self . state_at ( id ) . map ( | s | s . code ( address ) . map ( | c | ( * c ) . clone ( ) ) )
2016-02-08 10:58:08 +01:00
}
2016-12-09 23:01:43 +01:00
fn balance ( & self , address : & Address , id : BlockId ) -> Option < U256 > {
2016-05-26 11:46:45 +02:00
self . state_at ( id ) . map ( | s | s . balance ( address ) )
2016-03-13 12:09:30 +01:00
}
2016-12-09 23:01:43 +01:00
fn storage_at ( & self , address : & Address , position : & H256 , id : BlockId ) -> Option < H256 > {
2016-05-26 11:46:45 +02:00
self . state_at ( id ) . map ( | s | s . storage_at ( address , position ) )
2016-05-25 17:54:20 +02:00
}
2016-12-09 23:01:43 +01:00
fn list_accounts ( & self , id : BlockId , after : Option < & Address > , count : u64 ) -> Option < Vec < Address > > {
2016-10-03 11:13:10 +02:00
if ! self . factories . trie . is_fat ( ) {
trace! ( target : " fatdb " , " list_accounts: Not a fat DB " ) ;
return None ;
}
let state = match self . state_at ( id ) {
Some ( state ) = > state ,
_ = > return None ,
} ;
let ( root , db ) = state . drop ( ) ;
let trie = match self . factories . trie . readonly ( db . as_hashdb ( ) , & root ) {
Ok ( trie ) = > trie ,
_ = > {
trace! ( target : " fatdb " , " list_accounts: Couldn't open the DB " ) ;
return None ;
}
} ;
2016-11-27 11:11:56 +01:00
let mut iter = match trie . iter ( ) {
2016-10-03 11:13:10 +02:00
Ok ( iter ) = > iter ,
_ = > return None ,
} ;
2016-11-27 11:11:56 +01:00
if let Some ( after ) = after {
if let Err ( e ) = iter . seek ( after ) {
trace! ( target : " fatdb " , " list_accounts: Couldn't seek the DB: {:?} " , e ) ;
}
}
2016-10-03 11:13:10 +02:00
let accounts = iter . filter_map ( | item | {
item . ok ( ) . map ( | ( addr , _ ) | Address ::from_slice ( & addr ) )
2016-11-27 11:11:56 +01:00
} ) . take ( count as usize ) . collect ( ) ;
2016-10-03 11:13:10 +02:00
Some ( accounts )
}
2016-12-09 23:01:43 +01:00
fn list_storage ( & self , id : BlockId , account : & Address , after : Option < & H256 > , count : u64 ) -> Option < Vec < H256 > > {
2016-11-27 11:11:56 +01:00
if ! self . factories . trie . is_fat ( ) {
trace! ( target : " fatdb " , " list_stroage: Not a fat DB " ) ;
return None ;
}
let state = match self . state_at ( id ) {
Some ( state ) = > state ,
_ = > return None ,
} ;
let root = match state . storage_root ( account ) {
Some ( root ) = > root ,
_ = > return None ,
} ;
let ( _ , db ) = state . drop ( ) ;
let account_db = self . factories . accountdb . readonly ( db . as_hashdb ( ) , account . sha3 ( ) ) ;
let trie = match self . factories . trie . readonly ( account_db . as_hashdb ( ) , & root ) {
Ok ( trie ) = > trie ,
_ = > {
trace! ( target : " fatdb " , " list_storage: Couldn't open the DB " ) ;
return None ;
}
} ;
let mut iter = match trie . iter ( ) {
Ok ( iter ) = > iter ,
_ = > return None ,
} ;
if let Some ( after ) = after {
if let Err ( e ) = iter . seek ( after ) {
trace! ( target : " fatdb " , " list_accounts: Couldn't seek the DB: {:?} " , e ) ;
}
}
let keys = iter . filter_map ( | item | {
item . ok ( ) . map ( | ( key , _ ) | H256 ::from_slice ( & key ) )
} ) . take ( count as usize ) . collect ( ) ;
Some ( keys )
}
2016-12-09 23:01:43 +01:00
fn transaction ( & self , id : TransactionId ) -> Option < LocalizedTransaction > {
2016-09-06 15:31:13 +02:00
self . transaction_address ( id ) . and_then ( | address | self . chain . read ( ) . transaction ( & address ) )
2016-03-20 17:29:39 +01:00
}
2016-12-09 23:01:43 +01:00
fn transaction_block ( & self , id : TransactionId ) -> Option < H256 > {
2016-12-07 23:13:53 +01:00
self . transaction_address ( id ) . map ( | addr | addr . block_hash )
}
2016-12-28 13:44:51 +01:00
fn uncle ( & self , id : UncleId ) -> Option < encoded ::Header > {
2016-07-01 12:26:44 +02:00
let index = id . position ;
2016-12-28 13:44:51 +01:00
self . block_body ( id . block ) . and_then ( | body | body . view ( ) . uncle_rlp_at ( index ) )
. map ( encoded ::Header ::new )
2016-03-22 16:07:42 +01:00
}
2016-12-09 23:01:43 +01:00
fn transaction_receipt ( & self , id : TransactionId ) -> Option < LocalizedReceipt > {
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
2016-09-28 23:32:32 +02:00
self . transaction_address ( id )
. and_then ( | address | chain . block_number ( & address . block_hash ) . and_then ( | block_number | {
2016-12-29 19:48:28 +01:00
let transaction = chain . block_body ( & address . block_hash )
. and_then ( | body | body . view ( ) . localized_transaction_at ( & address . block_hash , block_number , address . index ) ) ;
let previous_receipts = ( 0 .. address . index + 1 )
. map ( | index | {
let mut address = address . clone ( ) ;
address . index = index ;
chain . transaction_receipt ( & address )
2016-03-20 18:44:57 +01:00
} )
2016-12-29 19:48:28 +01:00
. collect ( ) ;
match ( transaction , previous_receipts ) {
( Some ( transaction ) , Some ( previous_receipts ) ) = > {
Some ( transaction_receipt ( transaction , previous_receipts ) )
} ,
_ = > None ,
}
} ) )
2016-02-09 13:17:44 +01:00
}
2016-01-10 23:37:09 +01:00
fn tree_route ( & self , from : & H256 , to : & H256 ) -> Option < TreeRoute > {
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
match chain . is_known ( from ) & & chain . is_known ( to ) {
true = > Some ( chain . tree_route ( from . clone ( ) , to . clone ( ) ) ) ,
2016-02-27 01:37:12 +01:00
false = > None
}
2016-01-07 21:35:06 +01:00
}
2016-05-24 21:56:17 +02:00
fn find_uncles ( & self , hash : & H256 ) -> Option < Vec < H256 > > {
2016-09-06 15:31:13 +02:00
self . chain . read ( ) . find_uncle_hashes ( hash , self . engine . maximum_uncle_age ( ) )
2016-05-24 21:56:17 +02:00
}
2016-03-11 20:09:14 +01:00
fn state_data ( & self , hash : & H256 ) -> Option < Bytes > {
2016-09-27 18:02:11 +02:00
self . state_db . lock ( ) . journal_db ( ) . state ( hash )
2016-01-07 21:35:06 +01:00
}
2016-03-11 23:33:01 +01:00
fn block_receipts ( & self , hash : & H256 ) -> Option < Bytes > {
2016-09-06 15:31:13 +02:00
self . chain . read ( ) . block_receipts ( hash ) . map ( | receipts | ::rlp ::encode ( & receipts ) . to_vec ( ) )
2016-01-07 21:35:06 +01:00
}
2016-07-01 21:13:56 +02:00
fn import_block ( & self , bytes : Bytes ) -> Result < H256 , BlockImportError > {
2016-12-23 18:43:40 +01:00
use verification ::queue ::kind ::BlockLike ;
2016-09-27 16:50:24 +02:00
use verification ::queue ::kind ::blocks ::Unverified ;
// create unverified block here so the `sha3` calculation can be cached.
let unverified = Unverified ::new ( bytes ) ;
2016-03-01 00:02:48 +01:00
{
2016-09-27 16:50:24 +02:00
if self . chain . read ( ) . is_known ( & unverified . hash ( ) ) {
2016-07-01 21:13:56 +02:00
return Err ( BlockImportError ::Import ( ImportError ::AlreadyInChain ) ) ;
2016-03-01 00:02:48 +01:00
}
2016-12-09 23:01:43 +01:00
if self . block_status ( BlockId ::Hash ( unverified . parent_hash ( ) ) ) = = BlockStatus ::Unknown {
2016-09-27 16:50:24 +02:00
return Err ( BlockImportError ::Block ( BlockError ::UnknownParent ( unverified . parent_hash ( ) ) ) ) ;
2016-03-01 00:02:48 +01:00
}
2016-02-02 12:12:32 +01:00
}
2016-12-27 12:53:56 +01:00
Ok ( self . block_queue . import ( unverified ) ? )
2016-01-07 21:35:06 +01:00
}
2016-10-18 18:16:00 +02:00
fn import_block_with_receipts ( & self , block_bytes : Bytes , receipts_bytes : Bytes ) -> Result < H256 , BlockImportError > {
{
// check block order
let header = BlockView ::new ( & block_bytes ) . header_view ( ) ;
if self . chain . read ( ) . is_known ( & header . hash ( ) ) {
return Err ( BlockImportError ::Import ( ImportError ::AlreadyInChain ) ) ;
}
2016-12-09 23:01:43 +01:00
if self . block_status ( BlockId ::Hash ( header . parent_hash ( ) ) ) = = BlockStatus ::Unknown {
2016-10-18 18:16:00 +02:00
return Err ( BlockImportError ::Block ( BlockError ::UnknownParent ( header . parent_hash ( ) ) ) ) ;
}
}
2016-10-28 16:10:30 +02:00
self . import_old_block ( block_bytes , receipts_bytes ) . map_err ( Into ::into )
2016-10-18 18:16:00 +02:00
}
2016-01-22 04:54:38 +01:00
fn queue_info ( & self ) -> BlockQueueInfo {
2016-02-21 19:46:29 +01:00
self . block_queue . queue_info ( )
2016-01-07 21:35:06 +01:00
}
2016-01-21 23:33:52 +01:00
fn clear_queue ( & self ) {
2016-02-21 19:46:29 +01:00
self . block_queue . clear ( ) ;
2016-01-07 21:35:06 +01:00
}
fn chain_info ( & self ) -> BlockChainInfo {
2016-12-23 18:43:40 +01:00
let mut chain_info = self . chain . read ( ) . chain_info ( ) ;
chain_info . pending_total_difficulty = chain_info . total_difficulty + self . block_queue . total_difficulty ( ) ;
chain_info
2016-01-07 21:35:06 +01:00
}
2016-02-13 13:05:28 +01:00
2016-08-17 19:25:02 +02:00
fn additional_params ( & self ) -> BTreeMap < String , String > {
self . engine . additional_params ( ) . into_iter ( ) . collect ( )
}
2016-12-09 23:01:43 +01:00
fn blocks_with_bloom ( & self , bloom : & H2048 , from_block : BlockId , to_block : BlockId ) -> Option < Vec < BlockNumber > > {
2016-02-13 13:05:28 +01:00
match ( self . block_number ( from_block ) , self . block_number ( to_block ) ) {
2016-09-06 15:31:13 +02:00
( Some ( from ) , Some ( to ) ) = > Some ( self . chain . read ( ) . blocks_with_bloom ( bloom , from , to ) ) ,
2016-02-13 13:05:28 +01:00
_ = > None
}
}
2016-02-17 12:35:37 +01:00
2016-09-21 12:51:10 +02:00
fn logs ( & self , filter : Filter ) -> Vec < LocalizedLogEntry > {
2016-09-14 12:02:30 +02:00
let blocks = filter . bloom_possibilities ( ) . iter ( )
2016-02-24 10:23:25 +01:00
. filter_map ( | bloom | self . blocks_with_bloom ( bloom , filter . from_block . clone ( ) , filter . to_block . clone ( ) ) )
2016-02-17 12:35:37 +01:00
. flat_map ( | m | m )
// remove duplicate elements
. collect ::< HashSet < u64 > > ( )
. into_iter ( )
. collect ::< Vec < u64 > > ( ) ;
2016-09-21 12:51:10 +02:00
self . chain . read ( ) . logs ( blocks , | entry | filter . matches ( entry ) , filter . limit )
2016-02-17 12:35:37 +01:00
}
2016-04-28 21:47:44 +02:00
2016-05-02 12:17:30 +02:00
fn filter_traces ( & self , filter : TraceFilter ) -> Option < Vec < LocalizedTrace > > {
let start = self . block_number ( filter . range . start ) ;
let end = self . block_number ( filter . range . end ) ;
2016-10-10 17:43:44 +02:00
match ( start , end ) {
( Some ( s ) , Some ( e ) ) = > {
let filter = trace ::Filter {
range : s as usize .. e as usize ,
from_address : From ::from ( filter . from_address ) ,
to_address : From ::from ( filter . to_address ) ,
} ;
2016-05-02 12:17:30 +02:00
2016-10-10 17:43:44 +02:00
let traces = self . tracedb . read ( ) . filter ( & filter ) ;
Some ( traces )
} ,
_ = > None ,
2016-05-02 12:17:30 +02:00
}
}
fn trace ( & self , trace : TraceId ) -> Option < LocalizedTrace > {
let trace_address = trace . address ;
self . transaction_address ( trace . transaction )
. and_then ( | tx_address | {
2016-12-09 23:01:43 +01:00
self . block_number ( BlockId ::Hash ( tx_address . block_hash ) )
2016-09-06 15:31:13 +02:00
. and_then ( | number | self . tracedb . read ( ) . trace ( number , tx_address . index , trace_address ) )
2016-05-02 12:17:30 +02:00
} )
}
2016-12-09 23:01:43 +01:00
fn transaction_traces ( & self , transaction : TransactionId ) -> Option < Vec < LocalizedTrace > > {
2016-05-02 12:17:30 +02:00
self . transaction_address ( transaction )
. and_then ( | tx_address | {
2016-12-09 23:01:43 +01:00
self . block_number ( BlockId ::Hash ( tx_address . block_hash ) )
2016-09-06 15:31:13 +02:00
. and_then ( | number | self . tracedb . read ( ) . transaction_traces ( number , tx_address . index ) )
2016-05-02 12:17:30 +02:00
} )
}
2016-12-09 23:01:43 +01:00
fn block_traces ( & self , block : BlockId ) -> Option < Vec < LocalizedTrace > > {
2016-05-02 12:17:30 +02:00
self . block_number ( block )
2016-09-06 15:31:13 +02:00
. and_then ( | number | self . tracedb . read ( ) . block_traces ( number ) )
2016-05-02 12:17:30 +02:00
}
2016-04-28 21:47:44 +02:00
fn last_hashes ( & self ) -> LastHashes {
2016-09-06 15:31:13 +02:00
( * self . build_last_hashes ( self . chain . read ( ) . best_block_hash ( ) ) ) . clone ( )
2016-04-28 21:47:44 +02:00
}
2016-05-31 19:52:53 +02:00
2016-12-10 21:22:19 +01:00
fn queue_transactions ( & self , transactions : Vec < Bytes > , peer_id : usize ) {
2016-09-27 12:12:18 +02:00
let queue_size = self . queue_transactions . load ( AtomicOrdering ::Relaxed ) ;
trace! ( target : " external_tx " , " Queue size: {} " , queue_size ) ;
if queue_size > MAX_TX_QUEUE_SIZE {
2016-06-19 14:35:42 +02:00
debug! ( " Ignoring {} transactions: queue is full " , transactions . len ( ) ) ;
} else {
let len = transactions . len ( ) ;
2016-12-10 21:22:19 +01:00
match self . io_channel . lock ( ) . send ( ClientIoMessage ::NewTransactions ( transactions , peer_id ) ) {
2016-06-19 14:35:42 +02:00
Ok ( _ ) = > {
self . queue_transactions . fetch_add ( len , AtomicOrdering ::SeqCst ) ;
}
Err ( e ) = > {
debug! ( " Ignoring {} transactions: error queueing: {} " , len , e ) ;
}
}
}
}
2016-12-16 14:54:26 +01:00
fn ready_transactions ( & self ) -> Vec < PendingTransaction > {
self . miner . ready_transactions ( self . chain . read ( ) . best_block_number ( ) )
2016-05-31 19:52:53 +02:00
}
2016-11-03 22:22:25 +01:00
2016-11-29 16:54:30 +01:00
fn queue_consensus_message ( & self , message : Bytes ) {
2016-12-11 12:32:01 +01:00
let channel = self . io_channel . lock ( ) . clone ( ) ;
if let Err ( e ) = channel . send ( ClientIoMessage ::NewMessage ( message ) ) {
2016-11-28 16:42:36 +01:00
debug! ( " Ignoring the message, error queueing: {} " , e ) ;
}
2016-08-15 14:25:57 +02:00
}
2016-11-15 11:26:37 +01:00
2016-12-04 19:48:26 +01:00
fn signing_network_id ( & self ) -> Option < u64 > {
2016-11-03 22:22:25 +01:00
self . engine . signing_network_id ( & self . latest_env_info ( ) )
}
2016-11-04 17:35:02 +01:00
2016-12-09 23:01:43 +01:00
fn block_extra_info ( & self , id : BlockId ) -> Option < BTreeMap < String , String > > {
2016-11-04 17:35:02 +01:00
self . block_header ( id )
2016-12-28 13:44:51 +01:00
. map ( | header | self . engine . extra_info ( & header . decode ( ) ) )
2016-11-04 17:35:02 +01:00
}
2016-12-09 23:01:43 +01:00
fn uncle_extra_info ( & self , id : UncleId ) -> Option < BTreeMap < String , String > > {
2016-11-04 17:35:02 +01:00
self . uncle ( id )
2016-12-28 13:44:51 +01:00
. map ( | header | self . engine . extra_info ( & header . decode ( ) ) )
2016-11-04 17:35:02 +01:00
}
2016-11-09 23:25:54 +01:00
fn pruning_info ( & self ) -> PruningInfo {
PruningInfo {
2016-11-10 14:05:47 +01:00
earliest_chain : self . chain . read ( ) . first_block_number ( ) . unwrap_or ( 1 ) ,
2016-11-09 23:25:54 +01:00
earliest_state : self . state_db . lock ( ) . journal_db ( ) . earliest_era ( ) . unwrap_or ( 0 ) ,
2016-12-23 18:43:40 +01:00
state_history_size : Some ( self . history ) ,
2016-11-09 23:25:54 +01:00
}
}
2016-12-10 23:58:39 +01:00
fn call_contract ( & self , address : Address , data : Bytes ) -> Result < Bytes , String > {
let from = Address ::default ( ) ;
let transaction = Transaction {
nonce : self . latest_nonce ( & from ) ,
action : Action ::Call ( address ) ,
gas : U256 ::from ( 50_000_000 ) ,
gas_price : U256 ::default ( ) ,
value : U256 ::default ( ) ,
data : data ,
} . fake_sign ( from ) ;
self . call ( & transaction , BlockId ::Latest , Default ::default ( ) )
. map_err ( | e | format! ( " {:?} " , e ) )
. map ( | executed | {
executed . output
} )
}
fn registrar_address ( & self ) -> Option < Address > {
self . registrar . lock ( ) . as_ref ( ) . map ( | r | r . address . clone ( ) )
}
fn registry_address ( & self , name : String ) -> Option < Address > {
self . registrar . lock ( ) . as_ref ( )
. and_then ( | r | r . get_address ( & ( name . as_bytes ( ) . sha3 ( ) ) , " A " ) . ok ( ) )
. and_then ( | a | if a . is_zero ( ) { None } else { Some ( a ) } )
2016-12-13 21:21:07 +01:00
}
2016-01-07 21:35:06 +01:00
}
2016-02-10 12:50:27 +01:00
2016-06-28 13:23:15 +02:00
impl MiningBlockChainClient for Client {
2016-10-28 16:42:24 +02:00
fn latest_schedule ( & self ) -> Schedule {
2016-11-03 22:22:25 +01:00
self . engine . schedule ( & self . latest_env_info ( ) )
2016-10-28 16:42:24 +02:00
}
2016-06-23 14:29:16 +02:00
fn prepare_open_block ( & self , author : Address , gas_range_target : ( U256 , U256 ) , extra_data : Bytes ) -> OpenBlock {
2016-08-05 17:00:46 +02:00
let engine = & * self . engine ;
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
let h = chain . best_block_hash ( ) ;
2016-05-31 16:41:15 +02:00
2016-06-06 14:33:12 +02:00
let mut open_block = OpenBlock ::new (
2016-05-31 16:41:15 +02:00
engine ,
2016-08-24 16:53:36 +02:00
self . factories . clone ( ) ,
2016-05-31 16:41:15 +02:00
false , // TODO: this will need to be parameterised once we want to do immediate mining insertion.
2016-10-08 10:35:54 +02:00
self . state_db . lock ( ) . boxed_clone_canon ( & h ) ,
2016-09-06 15:31:13 +02:00
& chain . block_header ( & h ) . expect ( " h is best block hash: so its header must exist: qed " ) ,
2016-05-31 16:41:15 +02:00
self . build_last_hashes ( h . clone ( ) ) ,
author ,
2016-06-23 14:29:16 +02:00
gas_range_target ,
2016-05-31 16:41:15 +02:00
extra_data ,
2016-06-16 12:44:08 +02:00
) . expect ( " OpenBlock::new only fails if parent state root invalid; state root of best block's header is never invalid; qed " ) ;
2016-05-31 16:41:15 +02:00
// Add uncles
2016-09-06 15:31:13 +02:00
chain
2016-05-31 16:41:15 +02:00
. find_uncle_headers ( & h , engine . maximum_uncle_age ( ) )
2016-10-10 17:43:44 +02:00
. unwrap_or_else ( Vec ::new )
2016-05-31 16:41:15 +02:00
. into_iter ( )
. take ( engine . maximum_uncle_count ( ) )
. foreach ( | h | {
2016-10-10 17:43:44 +02:00
open_block . push_uncle ( h ) . expect ( " pushing maximum_uncle_count;
open_block was just created ;
push_uncle is not ok only if more than maximum_uncle_count is pushed ;
so all push_uncle are Ok ;
qed " );
2016-05-31 16:41:15 +02:00
} ) ;
2016-06-06 14:33:12 +02:00
open_block
2016-05-31 16:41:15 +02:00
}
2016-06-29 16:23:29 +02:00
fn vm_factory ( & self ) -> & EvmFactory {
2016-08-24 16:53:36 +02:00
& self . factories . vm
2016-06-29 16:23:29 +02:00
}
2016-06-29 21:49:12 +02:00
2016-12-08 12:03:34 +01:00
fn broadcast_proposal_block ( & self , block : SealedBlock ) {
self . notify ( | notify | {
notify . new_blocks (
vec! [ ] ,
vec! [ ] ,
vec! [ ] ,
vec! [ ] ,
vec! [ ] ,
vec! [ block . rlp_bytes ( ) ] ,
0 ,
) ;
} ) ;
}
2016-06-29 21:49:12 +02:00
fn import_sealed_block ( & self , block : SealedBlock ) -> ImportResult {
let h = block . header ( ) . hash ( ) ;
2016-10-18 18:16:00 +02:00
let start = precise_time_ns ( ) ;
let route = {
// scope for self.import_lock
let _import_lock = self . import_lock . lock ( ) ;
let _timer = PerfTimer ::new ( " import_sealed_block " ) ;
let number = block . header ( ) . number ( ) ;
let block_data = block . rlp_bytes ( ) ;
let route = self . commit_block ( block , & h , & block_data ) ;
trace! ( target : " client " , " Imported sealed block #{} ({}) " , number , h ) ;
self . state_db . lock ( ) . sync_cache ( & route . enacted , & route . retracted , false ) ;
route
} ;
2016-07-29 09:56:55 +02:00
let ( enacted , retracted ) = self . calculate_enacted_retracted ( & [ route ] ) ;
self . miner . chain_new_blocks ( self , & [ h . clone ( ) ] , & [ ] , & enacted , & retracted ) ;
self . notify ( | notify | {
notify . new_blocks (
vec! [ h . clone ( ) ] ,
vec! [ ] ,
enacted . clone ( ) ,
retracted . clone ( ) ,
vec! [ h . clone ( ) ] ,
2016-12-08 12:03:34 +01:00
vec! [ ] ,
2016-07-29 09:56:55 +02:00
precise_time_ns ( ) - start ,
) ;
} ) ;
2016-09-06 15:31:13 +02:00
self . db . read ( ) . flush ( ) . expect ( " DB flush failed. " ) ;
2016-06-29 21:49:12 +02:00
Ok ( h )
}
2016-01-07 21:35:06 +01:00
}
2016-02-10 12:50:27 +01:00
2017-01-10 12:23:59 +01:00
impl EngineClient for Client {
fn update_sealing ( & self ) {
self . miner . update_sealing ( self )
}
fn submit_seal ( & self , block_hash : H256 , seal : Vec < Bytes > ) {
if self . miner . submit_seal ( self , block_hash , seal ) . is_err ( ) {
warn! ( target : " poa " , " Wrong internal seal submission! " )
}
}
fn broadcast_consensus_message ( & self , message : Bytes ) {
self . notify ( | notify | notify . broadcast ( message . clone ( ) ) ) ;
}
}
2016-02-10 15:28:43 +01:00
impl MayPanic for Client {
fn on_panic < F > ( & self , closure : F ) where F : OnPanicListener {
2016-02-10 12:50:27 +01:00
self . panic_handler . on_panic ( closure ) ;
}
}
2016-10-27 15:26:29 +02:00
2016-12-07 10:50:18 +01:00
impl ::client ::ProvingBlockChainClient for Client {
2016-12-09 23:01:43 +01:00
fn prove_storage ( & self , key1 : H256 , key2 : H256 , from_level : u32 , id : BlockId ) -> Vec < Bytes > {
2016-12-05 16:55:33 +01:00
self . state_at ( id )
. and_then ( move | state | state . prove_storage ( key1 , key2 , from_level ) . ok ( ) )
. unwrap_or_else ( Vec ::new )
2016-11-15 14:53:30 +01:00
}
2016-10-27 15:26:29 +02:00
2016-12-09 23:01:43 +01:00
fn prove_account ( & self , key1 : H256 , from_level : u32 , id : BlockId ) -> Vec < Bytes > {
2016-12-05 16:55:33 +01:00
self . state_at ( id )
. and_then ( move | state | state . prove_account ( key1 , from_level ) . ok ( ) )
. unwrap_or_else ( Vec ::new )
2016-11-15 14:53:30 +01:00
}
2016-10-27 15:26:29 +02:00
2016-12-09 23:01:43 +01:00
fn code_by_hash ( & self , account_key : H256 , id : BlockId ) -> Bytes {
2016-12-05 16:55:33 +01:00
self . state_at ( id )
. and_then ( move | state | state . code_by_address_hash ( account_key ) . ok ( ) )
. and_then ( | x | x )
. unwrap_or_else ( Vec ::new )
2016-11-15 14:53:30 +01:00
}
2016-10-27 15:26:29 +02:00
}
2016-11-15 14:53:30 +01:00
2016-12-09 14:52:08 +01:00
impl Drop for Client {
fn drop ( & mut self ) {
self . engine . stop ( ) ;
}
}
2016-12-29 19:48:28 +01:00
/// Returns `LocalizedReceipt` given `LocalizedTransaction`
/// and a vector of receipts from given block up to transaction index.
fn transaction_receipt ( tx : LocalizedTransaction , mut receipts : Vec < Receipt > ) -> LocalizedReceipt {
assert_eq! ( receipts . len ( ) , tx . transaction_index + 1 , " All previous receipts are provided. " ) ;
let sender = tx . sender ( )
. expect ( " LocalizedTransaction is part of the blockchain; We have only valid transactions in chain; qed " ) ;
let receipt = receipts . pop ( ) . expect ( " Current receipt is provided; qed " ) ;
let prior_gas_used = match tx . transaction_index {
0 = > 0. into ( ) ,
i = > receipts . get ( i - 1 ) . expect ( " All previous receipts are provided; qed " ) . gas_used ,
} ;
let no_of_logs = receipts . into_iter ( ) . map ( | receipt | receipt . logs . len ( ) ) . sum ::< usize > ( ) ;
let transaction_hash = tx . hash ( ) ;
let block_hash = tx . block_hash ;
let block_number = tx . block_number ;
let transaction_index = tx . transaction_index ;
LocalizedReceipt {
transaction_hash : transaction_hash ,
transaction_index : transaction_index ,
block_hash : block_hash ,
block_number :block_number ,
cumulative_gas_used : receipt . gas_used ,
gas_used : receipt . gas_used - prior_gas_used ,
contract_address : match tx . action {
Action ::Call ( _ ) = > None ,
Action ::Create = > Some ( contract_address ( & sender , & tx . nonce ) )
} ,
logs : receipt . logs . into_iter ( ) . enumerate ( ) . map ( | ( i , log ) | LocalizedLogEntry {
entry : log ,
block_hash : block_hash ,
block_number : block_number ,
transaction_hash : transaction_hash ,
transaction_index : transaction_index ,
transaction_log_index : i ,
log_index : no_of_logs + i ,
} ) . collect ( ) ,
log_bloom : receipt . log_bloom ,
state_root : receipt . state_root ,
}
}
2016-11-15 14:53:30 +01:00
#[ cfg(test) ]
mod tests {
#[ test ]
fn should_not_cache_details_before_commit ( ) {
use client ::BlockChainClient ;
use tests ::helpers ::* ;
use std ::thread ;
use std ::time ::Duration ;
use std ::sync ::Arc ;
use std ::sync ::atomic ::{ AtomicBool , Ordering } ;
use util ::kvdb ::DBTransaction ;
let client = generate_dummy_client ( 0 ) ;
let genesis = client . chain_info ( ) . best_block_hash ;
let ( new_hash , new_block ) = get_good_dummy_block_hash ( ) ;
let go = {
// Separate thread uncommited transaction
let go = Arc ::new ( AtomicBool ::new ( false ) ) ;
let go_thread = go . clone ( ) ;
let another_client = client . reference ( ) . clone ( ) ;
thread ::spawn ( move | | {
let mut batch = DBTransaction ::new ( & * another_client . chain . read ( ) . db ( ) . clone ( ) ) ;
another_client . chain . read ( ) . insert_block ( & mut batch , & new_block , Vec ::new ( ) ) ;
go_thread . store ( true , Ordering ::SeqCst ) ;
} ) ;
go
} ;
while ! go . load ( Ordering ::SeqCst ) { thread ::park_timeout ( Duration ::from_millis ( 5 ) ) ; }
assert! ( client . tree_route ( & genesis , & new_hash ) . is_none ( ) ) ;
}
2016-12-29 19:48:28 +01:00
#[ test ]
fn should_return_correct_log_index ( ) {
use super ::transaction_receipt ;
use ethkey ::KeyPair ;
use log_entry ::{ LogEntry , LocalizedLogEntry } ;
use receipt ::{ Receipt , LocalizedReceipt } ;
use transaction ::{ Transaction , LocalizedTransaction , Action } ;
use util ::Hashable ;
// given
let key = KeyPair ::from_secret ( " test " . sha3 ( ) ) . unwrap ( ) ;
let secret = key . secret ( ) ;
let block_number = 1 ;
let block_hash = 5. into ( ) ;
let state_root = 99. into ( ) ;
let gas_used = 10. into ( ) ;
let raw_tx = Transaction {
nonce : 0. into ( ) ,
gas_price : 0. into ( ) ,
gas : 21000. into ( ) ,
action : Action ::Call ( 10. into ( ) ) ,
value : 0. into ( ) ,
data : vec ! [ ] ,
} ;
let tx1 = raw_tx . clone ( ) . sign ( secret , None ) ;
let transaction = LocalizedTransaction {
signed : tx1 . clone ( ) ,
block_number : block_number ,
block_hash : block_hash ,
transaction_index : 1 ,
} ;
let logs = vec! [ LogEntry {
address : 5. into ( ) ,
topics : vec ! [ ] ,
data : vec ! [ ] ,
} , LogEntry {
address : 15. into ( ) ,
topics : vec ! [ ] ,
data : vec ! [ ] ,
} ] ;
let receipts = vec! [ Receipt {
state_root : state_root ,
gas_used : 5. into ( ) ,
log_bloom : Default ::default ( ) ,
logs : vec ! [ logs [ 0 ] . clone ( ) ] ,
} , Receipt {
state_root : state_root ,
gas_used : gas_used ,
log_bloom : Default ::default ( ) ,
logs : logs . clone ( ) ,
} ] ;
// when
let receipt = transaction_receipt ( transaction , receipts ) ;
// then
assert_eq! ( receipt , LocalizedReceipt {
transaction_hash : tx1 . hash ( ) ,
transaction_index : 1 ,
block_hash : block_hash ,
block_number : block_number ,
cumulative_gas_used : gas_used ,
gas_used : gas_used - 5. into ( ) ,
contract_address : None ,
logs : vec ! [ LocalizedLogEntry {
entry : logs [ 0 ] . clone ( ) ,
block_hash : block_hash ,
block_number : block_number ,
transaction_hash : tx1 . hash ( ) ,
transaction_index : 1 ,
transaction_log_index : 0 ,
log_index : 1 ,
} , LocalizedLogEntry {
entry : logs [ 1 ] . clone ( ) ,
block_hash : block_hash ,
block_number : block_number ,
transaction_hash : tx1 . hash ( ) ,
transaction_index : 1 ,
transaction_log_index : 1 ,
log_index : 2 ,
} ] ,
log_bloom : Default ::default ( ) ,
state_root : state_root ,
} ) ;
}
2016-12-06 19:23:15 +01:00
}