2020-09-22 14:53:52 +02:00
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
// This file is part of OpenEthereum.
2016-02-05 13:40:41 +01:00
2020-09-22 14:53:52 +02:00
// OpenEthereum is free software: you can redistribute it and/or modify
2016-02-05 13:40:41 +01:00
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
2020-09-22 14:53:52 +02:00
// OpenEthereum is distributed in the hope that it will be useful,
2016-02-05 13:40:41 +01:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
2020-09-22 14:53:52 +02:00
// along with OpenEthereum. If not, see <http://www.gnu.org/licenses/>.
2016-11-18 12:14:52 +01:00
2018-08-02 12:58:02 +02:00
use std ::{
cmp ,
2019-01-04 14:05:46 +01:00
collections ::{ BTreeMap , HashSet , VecDeque } ,
2019-11-11 21:57:38 +01:00
convert ::TryFrom ,
io ::{ BufRead , BufReader } ,
str ::{ from_utf8 , FromStr } ,
sync ::{
2020-07-29 10:57:15 +02:00
atomic ::{ AtomicBool , AtomicI64 , Ordering as AtomicOrdering } ,
2018-05-09 08:49:34 +02:00
Arc , Weak ,
2019-11-11 21:57:38 +01:00
} ,
time ::{ Duration , Instant } ,
} ;
2020-08-05 06:08:03 +02:00
2019-01-16 16:37:26 +01:00
use blockchain ::{
BlockChain , BlockChainDB , BlockNumberKey , BlockProvider , BlockReceipts , ExtrasInsert ,
ImportRoute , TransactionAddress , TreeRoute ,
} ;
2019-11-11 21:57:38 +01:00
use bytes ::{ Bytes , ToPretty } ;
use call_contract ::CallContract ;
use error ::Error ;
use ethcore_miner ::pool ::VerifiedTransaction ;
use ethereum_types ::{ Address , H256 , H264 , U256 } ;
2019-01-04 14:05:46 +01:00
use hash ::keccak ;
2018-05-09 08:49:34 +02:00
use itertools ::Itertools ;
2019-11-11 21:57:38 +01:00
use kvdb ::{ DBTransaction , DBValue , KeyValueDB } ;
2019-01-04 14:05:46 +01:00
use parking_lot ::{ Mutex , RwLock } ;
use rand ::OsRng ;
2020-12-10 16:42:05 +01:00
use rlp ::{ PayloadInfo , Rlp } ;
2019-11-11 21:57:38 +01:00
use rustc_hex ::FromHex ;
2019-01-04 14:05:46 +01:00
use trie ::{ Trie , TrieFactory , TrieSpec } ;
use types ::{
ancestry_action ::AncestryAction ,
2019-11-11 21:57:38 +01:00
data_format ::DataFormat ,
2020-08-05 06:08:03 +02:00
encoded ,
2019-01-04 14:05:46 +01:00
filter ::Filter ,
header ::{ ExtendedHeader , Header } ,
log_entry ::LocalizedLogEntry ,
2020-12-10 16:42:05 +01:00
receipt ::{ LocalizedReceipt , TypedReceipt } ,
transaction ::{
self , Action , LocalizedTransaction , SignedTransaction , TypedTransaction ,
UnverifiedTransaction ,
} ,
2019-01-04 14:05:46 +01:00
BlockNumber ,
} ;
use vm ::{ EnvInfo , LastHashes } ;
2016-07-07 09:39:32 +02:00
2019-01-16 16:37:26 +01:00
use ansi_term ::Colour ;
2019-03-15 13:22:47 +01:00
use block ::{ enact_verified , ClosedBlock , Drain , LockedBlock , OpenBlock , SealedBlock } ;
2017-05-17 12:41:33 +02:00
use call_contract ::RegistryInfo ;
2018-03-03 18:42:13 +01:00
use client ::{
2018-03-13 11:49:57 +01:00
ancient_import ::AncientVerifier , bad_blocks , traits ::ForceUpdateSealing , AccountData ,
BadBlocks , Balance , BlockChain as BlockChainTrait , BlockChainClient , BlockChainReset , BlockId ,
BlockInfo , BlockProducer , BroadcastProposalBlock , Call , CallAnalytics , ChainInfo ,
2019-11-11 21:57:38 +01:00
ChainMessageType , ChainNotify , ChainRoute , ClientConfig , ClientIoMessage , EngineInfo ,
ImportBlock , ImportExportBlocks , ImportSealedBlock , IoClient , Mode , NewBlocks , Nonce ,
PrepareOpenBlock , ProvingBlockChainClient , PruningInfo , ReopenBlock , ScheduleInfo ,
2016-12-09 23:01:43 +01:00
SealedBlockImporter , StateClient , StateInfo , StateOrBlock , TraceFilter , TraceId , TransactionId ,
TransactionInfo , UncleId ,
2018-03-03 18:42:13 +01:00
} ;
2016-08-17 19:25:02 +02:00
use engines ::{
2018-12-19 10:24:14 +01:00
epoch ::PendingTransition , EngineError , EpochTransition , EthEngine , ForkChoice , MAX_UNCLE_AGE ,
2016-08-17 19:25:02 +02:00
} ;
2018-08-24 10:42:24 +02:00
use error ::{
2018-09-24 12:28:54 +02:00
BlockError , CallError , Error as EthcoreError , ErrorKind as EthcoreErrorKind , EthcoreResult ,
2020-07-29 10:57:15 +02:00
ExecutionError , ImportErrorKind ,
2018-08-24 10:42:24 +02:00
} ;
2017-04-03 09:40:18 +02:00
use executive ::{ contract_address , Executed , Executive , TransactOptions } ;
2018-02-19 12:27:42 +01:00
use factory ::{ Factories , VmFactory } ;
2019-11-11 21:57:38 +01:00
use io ::IoChannel ;
2018-01-11 17:49:10 +01:00
use miner ::{ Miner , MinerService } ;
2018-11-18 00:06:34 +01:00
use snapshot ::{ self , io as snapshot_io , SnapshotClient } ;
2017-04-03 09:40:18 +02:00
use spec ::Spec ;
2017-05-17 12:41:33 +02:00
use state ::{ self , State } ;
2019-01-04 14:05:46 +01:00
use state_db ::StateDB ;
2020-09-14 16:08:57 +02:00
use stats ::{ prometheus , prometheus_counter , prometheus_gauge , PrometheusMetrics } ;
2019-01-04 14:05:46 +01:00
use trace ::{
self , Database as TraceDatabase , ImportRequest as TraceImportRequest , LocalizedTrace , TraceDB ,
} ;
use transaction_ext ::Transaction ;
2018-08-02 11:20:46 +02:00
use verification ::{
2019-01-04 14:05:46 +01:00
self ,
queue ::kind ::{ blocks ::Unverified , BlockLike } ,
BlockQueue , PreverifiedBlock , Verifier ,
} ;
2019-11-11 21:57:38 +01:00
use vm ::Schedule ;
2016-07-07 09:39:32 +02:00
// re-export
pub use blockchain ::CacheSize as BlockChainCacheSize ;
2019-06-11 20:56:03 +02:00
use db ::{ keys ::BlockDetails , Readable , Writable } ;
2016-07-07 09:39:32 +02:00
pub use types ::{ block_status ::BlockStatus , blockchain_info ::BlockChainInfo } ;
2018-07-25 14:36:46 +02:00
pub use verification ::QueueInfo as BlockQueueInfo ;
2016-01-07 21:35:06 +01:00
2018-09-13 11:04:39 +02:00
use_contract! ( registry , " res/contracts/registrar.json " ) ;
2018-02-09 09:32:06 +01:00
2018-05-09 08:49:34 +02:00
const MAX_ANCIENT_BLOCKS_QUEUE_SIZE : usize = 4096 ;
2018-06-05 19:49:46 +02:00
// Max number of blocks imported at once.
const MAX_ANCIENT_BLOCKS_TO_IMPORT : usize = 4 ;
2016-07-05 17:50:46 +02:00
const MAX_QUEUE_SIZE_TO_SLEEP_ON : usize = 2 ;
2016-10-14 14:44:56 +02:00
const MIN_HISTORY_SIZE : u64 = 8 ;
2016-06-19 14:35:42 +02:00
2016-02-02 23:43:29 +01:00
/// Report on the status of a client.
2016-03-10 11:32:10 +01:00
#[ derive(Default, Clone, Debug, Eq, PartialEq) ]
2016-01-18 23:23:32 +01:00
pub struct ClientReport {
2016-02-02 23:43:29 +01:00
/// How many blocks have been imported so far.
2016-01-18 23:23:32 +01:00
pub blocks_imported : usize ,
2016-02-02 23:43:29 +01:00
/// How many transactions have been applied so far.
2016-01-18 23:23:32 +01:00
pub transactions_applied : usize ,
2016-02-02 23:43:29 +01:00
/// How much gas has been processed so far.
2016-01-18 23:23:32 +01:00
pub gas_processed : U256 ,
2020-09-14 16:08:57 +02:00
/// Internal structure item sizes
pub item_sizes : BTreeMap < String , usize > ,
2016-01-18 23:23:32 +01:00
}
impl ClientReport {
2016-02-02 23:43:29 +01:00
/// Alter internal reporting to reflect the additional `block` has been processed.
2018-04-13 17:34:27 +02:00
pub fn accrue_block ( & mut self , header : & Header , transactions : usize ) {
2016-01-18 23:23:32 +01:00
self . blocks_imported + = 1 ;
2018-04-13 17:34:27 +02:00
self . transactions_applied + = transactions ;
self . gas_processed = self . gas_processed + * header . gas_used ( ) ;
2016-01-18 23:23:32 +01:00
}
}
2017-07-10 13:21:11 +02:00
impl < ' a > ::std ::ops ::Sub < & ' a ClientReport > for ClientReport {
type Output = Self ;
fn sub ( mut self , other : & ' a ClientReport ) -> Self {
self . blocks_imported - = other . blocks_imported ;
self . transactions_applied - = other . transactions_applied ;
self . gas_processed = self . gas_processed - other . gas_processed ;
self
}
}
2016-07-05 17:50:46 +02:00
struct SleepState {
last_activity : Option < Instant > ,
last_autosleep : Option < Instant > ,
}
impl SleepState {
fn new ( awake : bool ) -> Self {
SleepState {
last_activity : match awake {
false = > None ,
true = > Some ( Instant ::now ( ) ) ,
} ,
last_autosleep : match awake {
false = > Some ( Instant ::now ( ) ) ,
true = > None ,
} ,
}
2020-08-05 06:08:03 +02:00
}
2016-07-05 17:50:46 +02:00
}
2018-03-03 18:42:13 +01:00
struct Importer {
/// Lock used during block import
pub import_lock : Mutex < ( ) > , // FIXME Maybe wrap the whole `Importer` instead?
/// Used to verify blocks
2019-11-11 21:57:38 +01:00
pub verifier : Box < dyn Verifier < Client > > ,
2018-03-03 18:42:13 +01:00
/// Queue containing pending blocks
pub block_queue : BlockQueue ,
/// Handles block sealing
pub miner : Arc < Miner > ,
/// Ancient block verifier: import an ancient sequence of blocks in order from a starting epoch
2018-05-09 08:49:34 +02:00
pub ancient_verifier : AncientVerifier ,
2018-03-03 18:42:13 +01:00
/// Ethereum engine to be used during import
2020-07-29 10:36:15 +02:00
pub engine : Arc < dyn EthEngine > ,
2018-09-08 04:04:28 +02:00
/// A lru cache of recently detected bad blocks
pub bad_blocks : bad_blocks ::BadBlocks ,
2018-03-03 18:42:13 +01:00
}
2016-01-07 21:35:06 +01:00
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
2016-01-25 18:56:36 +01:00
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
2016-06-28 13:23:15 +02:00
pub struct Client {
2018-03-03 18:42:13 +01:00
/// Flag used to disable the client forever. Not to be confused with `liveness`.
2016-12-11 16:52:41 +01:00
enabled : AtomicBool ,
2018-03-03 18:42:13 +01:00
/// Operating mode for the client
2016-10-31 16:58:35 +01:00
mode : Mutex < Mode > ,
2018-03-03 18:42:13 +01:00
2016-09-06 15:31:13 +02:00
chain : RwLock < Arc < BlockChain > > ,
tracedb : RwLock < TraceDB < BlockChain > > ,
2020-07-29 10:36:15 +02:00
engine : Arc < dyn EthEngine > ,
2018-03-03 18:42:13 +01:00
/// Client configuration
2016-09-06 15:31:13 +02:00
config : ClientConfig ,
2018-03-03 18:42:13 +01:00
/// Database pruning strategy to use for StateDB
2016-09-06 15:31:13 +02:00
pruning : journaldb ::Algorithm ,
2018-03-03 18:42:13 +01:00
/// Client uses this to store blocks, traces, etc.
2019-11-11 21:57:38 +01:00
db : RwLock < Arc < dyn BlockChainDB > > ,
2018-03-03 18:42:13 +01:00
2018-01-02 09:43:08 +01:00
state_db : RwLock < StateDB > ,
2018-03-03 18:42:13 +01:00
/// Report on the status of client
2016-01-21 23:33:52 +01:00
report : RwLock < ClientReport > ,
2018-03-03 18:42:13 +01:00
2016-07-05 17:50:46 +02:00
sleep_state : Mutex < SleepState > ,
2018-03-03 18:42:13 +01:00
/// Flag changed by `sleep` and `wake_up` methods. Not to be confused with `enabled`.
2016-07-05 17:50:46 +02:00
liveness : AtomicBool ,
2018-07-13 12:23:57 +02:00
io_channel : RwLock < IoChannel < ClientIoMessage > > ,
2018-03-03 18:42:13 +01:00
/// List of actors to be notified on certain chain events
2019-11-11 21:57:38 +01:00
notify : RwLock < Vec < Weak < dyn ChainNotify > > > ,
2018-03-03 18:42:13 +01:00
2018-05-09 08:49:34 +02:00
/// Queued transactions from IO
queue_transactions : IoChannelQueue ,
/// Ancient blocks import queue
queue_ancient_blocks : IoChannelQueue ,
2018-06-05 19:49:46 +02:00
/// Queued ancient blocks, make sure they are imported in order.
queued_ancient_blocks : Arc < RwLock < ( HashSet < H256 > , VecDeque < ( Unverified , Bytes ) > ) > > ,
ancient_blocks_import_lock : Arc < Mutex < ( ) > > ,
2018-05-09 08:49:34 +02:00
/// Consensus messages import queue
queue_consensus_message : IoChannelQueue ,
2016-07-17 09:18:15 +02:00
last_hashes : RwLock < VecDeque < H256 > > ,
2016-08-24 16:53:36 +02:00
factories : Factories ,
2018-03-03 18:42:13 +01:00
/// Number of eras kept in a journal before they are pruned
2016-10-14 14:44:56 +02:00
history : u64 ,
2018-03-03 18:42:13 +01:00
/// An action to be done if a mode/spec_name change happens
2019-11-11 21:57:38 +01:00
on_user_defaults_change : Mutex < Option < Box < dyn FnMut ( Option < Mode > ) + 'static + Send > > > ,
2018-03-03 18:42:13 +01:00
2018-02-09 09:32:06 +01:00
registrar_address : Option < Address > ,
2018-03-03 18:42:13 +01:00
/// A closure to call when we want to restart the client
2019-11-11 21:57:38 +01:00
exit_handler : Mutex < Option < Box < dyn Fn ( String ) + 'static + Send > > > ,
2018-03-03 18:42:13 +01:00
importer : Importer ,
2016-01-07 21:35:06 +01:00
}
2018-03-03 18:42:13 +01:00
impl Importer {
2016-06-28 13:23:15 +02:00
pub fn new (
2018-03-03 18:42:13 +01:00
config : & ClientConfig ,
2020-07-29 10:36:15 +02:00
engine : Arc < dyn EthEngine > ,
2016-07-11 17:02:42 +02:00
message_channel : IoChannel < ClientIoMessage > ,
2018-03-03 18:42:13 +01:00
miner : Arc < Miner > ,
2019-11-11 21:57:38 +01:00
) -> Result < Importer , EthcoreError > {
let block_queue = BlockQueue ::new (
config . queue . clone ( ) ,
engine . clone ( ) ,
message_channel . clone ( ) ,
config . verifier_type . verifying_seal ( ) ,
) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
Ok ( Importer {
2016-01-21 23:33:52 +01:00
import_lock : Mutex ::new ( ( ) ) ,
2018-03-03 18:42:13 +01:00
verifier : verification ::new ( config . verifier_type . clone ( ) ) ,
block_queue ,
miner ,
2018-05-09 08:49:34 +02:00
ancient_verifier : AncientVerifier ::new ( engine . clone ( ) ) ,
2017-09-26 14:19:08 +02:00
engine ,
2018-09-08 04:04:28 +02:00
bad_blocks : Default ::default ( ) ,
2018-03-03 18:42:13 +01:00
} )
2016-02-24 10:55:34 +01:00
}
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 6.0 This is triggered by a message coming from a block queue when the block is ready for insertion
2018-03-03 18:42:13 +01:00
pub fn import_verified_blocks ( & self , client : & Client ) -> usize {
2016-12-11 16:52:41 +01:00
// Shortcut out if we know we're incapable of syncing the chain.
2018-03-03 18:42:13 +01:00
if ! client . enabled . load ( AtomicOrdering ::Relaxed ) {
2016-12-11 16:52:41 +01:00
return 0 ;
}
2020-08-05 06:08:03 +02:00
2018-10-26 13:21:36 +02:00
let max_blocks_to_import = client . config . max_round_blocks_to_import ;
2018-12-19 10:24:14 +01:00
let (
imported_blocks ,
import_results ,
invalid_blocks ,
imported ,
proposed_blocks ,
duration ,
has_more_blocks_to_import ,
) = {
2016-07-11 17:02:42 +02:00
let mut imported_blocks = Vec ::with_capacity ( max_blocks_to_import ) ;
let mut invalid_blocks = HashSet ::new ( ) ;
2020-07-29 11:00:04 +02:00
let proposed_blocks = Vec ::with_capacity ( max_blocks_to_import ) ;
2016-07-11 17:02:42 +02:00
let mut import_results = Vec ::with_capacity ( max_blocks_to_import ) ;
2020-08-05 06:08:03 +02:00
2016-07-11 17:02:42 +02:00
let _import_lock = self . import_lock . lock ( ) ;
2016-10-20 14:49:12 +02:00
let blocks = self . block_queue . drain ( max_blocks_to_import ) ;
if blocks . is_empty ( ) {
return 0 ;
}
2018-02-23 19:49:08 +01:00
trace_time! ( " import_verified_blocks " ) ;
2018-03-14 12:29:52 +01:00
let start = Instant ::now ( ) ;
2020-08-05 06:08:03 +02:00
2016-07-11 17:02:42 +02:00
for block in blocks {
2018-04-13 17:34:27 +02:00
let header = block . header . clone ( ) ;
let bytes = block . bytes . clone ( ) ;
let hash = header . hash ( ) ;
2020-08-05 06:08:03 +02:00
2016-10-10 17:43:44 +02:00
let is_invalid = invalid_blocks . contains ( header . parent_hash ( ) ) ;
2016-10-11 16:19:00 +02:00
if is_invalid {
2018-04-13 17:34:27 +02:00
invalid_blocks . insert ( hash ) ;
2016-07-11 17:02:42 +02:00
continue ;
}
2020-12-02 11:31:11 +01:00
// t_nb 7.0 check and lock block
2019-02-07 14:34:07 +01:00
match self . check_and_lock_block ( & bytes , block , client ) {
Ok ( ( closed_block , pending ) ) = > {
2019-03-14 13:40:59 +01:00
imported_blocks . push ( hash ) ;
2019-03-15 13:22:47 +01:00
let transactions_len = closed_block . transactions . len ( ) ;
2020-12-02 11:31:11 +01:00
// t_nb 8.0 commit block to db
2019-03-14 13:40:59 +01:00
let route = self . commit_block (
closed_block ,
& header ,
encoded ::Block ::new ( bytes ) ,
pending ,
client ,
) ;
import_results . push ( route ) ;
client
. report
. write ( )
. accrue_block ( & header , transactions_len ) ;
2018-09-08 04:04:28 +02:00
}
Err ( err ) = > {
2018-09-25 12:55:24 +02:00
self . bad_blocks . report ( bytes , format! ( " {:?} " , err ) ) ;
invalid_blocks . insert ( hash ) ;
2016-07-11 17:02:42 +02:00
}
2020-08-05 06:08:03 +02:00
}
}
2018-03-03 18:42:13 +01:00
let imported = imported_blocks . len ( ) ;
let invalid_blocks = invalid_blocks . into_iter ( ) . collect ::< Vec < H256 > > ( ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
if ! invalid_blocks . is_empty ( ) {
self . block_queue . mark_as_bad ( & invalid_blocks ) ;
}
2018-12-19 10:24:14 +01:00
let has_more_blocks_to_import = ! self . block_queue . mark_as_good ( & imported_blocks ) ;
(
imported_blocks ,
import_results ,
invalid_blocks ,
imported ,
proposed_blocks ,
start . elapsed ( ) ,
has_more_blocks_to_import ,
)
2018-03-03 18:42:13 +01:00
} ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
{
2018-12-19 10:24:14 +01:00
if ! imported_blocks . is_empty ( ) {
2018-05-07 12:58:25 +02:00
let route = ChainRoute ::from ( import_results . as_ref ( ) ) ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 10 Notify miner about new included block.
2018-12-19 10:24:14 +01:00
if ! has_more_blocks_to_import {
2018-05-07 12:58:25 +02:00
self . miner . chain_new_blocks (
client ,
& imported_blocks ,
& invalid_blocks ,
route . enacted ( ) ,
route . retracted ( ) ,
false ,
2020-08-05 06:08:03 +02:00
) ;
2018-03-03 18:42:13 +01:00
}
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 11 notify rest of system about new block inclusion
2018-03-03 18:42:13 +01:00
client . notify ( | notify | {
notify . new_blocks ( NewBlocks ::new (
2018-12-19 10:24:14 +01:00
imported_blocks . clone ( ) ,
invalid_blocks . clone ( ) ,
route . clone ( ) ,
Vec ::new ( ) ,
proposed_blocks . clone ( ) ,
duration ,
has_more_blocks_to_import ,
2018-03-03 18:42:13 +01:00
) ) ;
} ) ;
}
2020-08-05 06:08:03 +02:00
}
2018-06-20 15:13:07 +02:00
let db = client . db . read ( ) ;
db . key_value ( ) . flush ( ) . expect ( " DB flush failed. " ) ;
2020-10-27 10:45:48 +01:00
self . block_queue . resignal_verification ( ) ;
2018-03-03 18:42:13 +01:00
imported
}
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 6.0.1 check and lock block,
2019-02-07 14:34:07 +01:00
fn check_and_lock_block (
& self ,
bytes : & [ u8 ] ,
block : PreverifiedBlock ,
client : & Client ,
) -> EthcoreResult < ( LockedBlock , Option < PendingTransition > ) > {
2018-03-03 18:42:13 +01:00
let engine = & * self . engine ;
2018-04-13 17:34:27 +02:00
let header = block . header . clone ( ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
// Check the block isn't so old we won't be able to enact it.
2020-12-02 11:31:11 +01:00
// t_nb 7.1 check if block is older then last pruned block
2018-04-03 10:01:28 +02:00
let best_block_number = client . chain . read ( ) . best_block_number ( ) ;
2018-03-03 18:42:13 +01:00
if client . pruning_info ( ) . earliest_state > header . number ( ) {
warn! ( target : " client " , " Block import failed for #{} ({}) \n Block is ancient (current best block: #{}). " , header . number ( ) , header . hash ( ) , best_block_number ) ;
2018-09-25 12:55:24 +02:00
bail! ( " Block is ancient " ) ;
2018-03-03 18:42:13 +01:00
}
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 7.2 Check if parent is in chain
2018-04-03 10:01:28 +02:00
let parent = match client . block_header_decoded ( BlockId ::Hash ( * header . parent_hash ( ) ) ) {
2018-03-03 18:42:13 +01:00
Some ( h ) = > h ,
None = > {
warn! ( target : " client " , " Block import failed for #{} ({}): Parent not found ({}) " , header . number ( ) , header . hash ( ) , header . parent_hash ( ) ) ;
2018-09-25 12:55:24 +02:00
bail! ( " Parent not found " ) ;
2018-03-03 18:42:13 +01:00
}
} ;
2020-08-05 06:08:03 +02:00
2018-04-03 10:01:28 +02:00
let chain = client . chain . read ( ) ;
2020-12-02 11:31:11 +01:00
// t_nb 7.3 verify block family
2018-03-03 18:42:13 +01:00
let verify_family_result = self . verifier . verify_block_family (
2018-04-13 17:34:27 +02:00
& header ,
2018-03-03 18:42:13 +01:00
& parent ,
engine ,
Some ( verification ::FullFamilyParams {
2018-07-25 14:36:46 +02:00
block : & block ,
2018-03-03 18:42:13 +01:00
block_provider : & * * chain ,
client ,
} ) ,
) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
if let Err ( e ) = verify_family_result {
warn! ( target : " client " , " Stage 3 block verification failed for #{} ({}) \n Error: {:?} " , header . number ( ) , header . hash ( ) , e ) ;
2018-09-25 12:55:24 +02:00
bail! ( e ) ;
2016-07-11 17:02:42 +02:00
} ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 7.4 verify block external
2018-04-13 17:34:27 +02:00
let verify_external_result = self . verifier . verify_block_external ( & header , engine ) ;
2018-03-03 18:42:13 +01:00
if let Err ( e ) = verify_external_result {
warn! ( target : " client " , " Stage 4 block verification failed for #{} ({}) \n Error: {:?} " , header . number ( ) , header . hash ( ) , e ) ;
2018-09-25 12:55:24 +02:00
bail! ( e ) ;
2018-03-03 18:42:13 +01:00
} ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
// Enact Verified Block
2020-12-02 11:31:11 +01:00
// t_nb 7.5 Get build last hashes. Get parent state db. Get epoch_transition
2018-03-03 18:42:13 +01:00
let last_hashes = client . build_last_hashes ( header . parent_hash ( ) ) ;
2020-12-02 11:31:11 +01:00
2018-03-03 18:42:13 +01:00
let db = client
. state_db
. read ( )
. boxed_clone_canon ( header . parent_hash ( ) ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let is_epoch_begin = chain
. epoch_transition ( parent . number ( ) , * header . parent_hash ( ) )
. is_some ( ) ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 8.0 Block enacting. Execution of transactions.
2018-04-13 17:34:27 +02:00
let enact_result = enact_verified (
block ,
2018-03-03 18:42:13 +01:00
engine ,
client . tracedb . read ( ) . tracing_enabled ( ) ,
db ,
& parent ,
last_hashes ,
client . factories . clone ( ) ,
is_epoch_begin ,
2018-05-16 08:58:01 +02:00
& mut chain . ancestry_with_metadata_iter ( * header . parent_hash ( ) ) ,
2018-03-03 18:42:13 +01:00
) ;
2020-08-05 06:08:03 +02:00
2018-09-25 12:55:24 +02:00
let mut locked_block = match enact_result {
Ok ( b ) = > b ,
Err ( e ) = > {
warn! ( target : " client " , " Block import failed for #{} ({}) \n Error: {:?} " , header . number ( ) , header . hash ( ) , e ) ;
bail! ( e ) ;
}
} ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 7.6 Strip receipts for blocks before validate_receipts_transition,
2018-04-19 11:25:15 +02:00
// if the expected receipts root header does not match.
// (i.e. allow inconsistency in receipts outcome before the transition block)
if header . number ( ) < engine . params ( ) . validate_receipts_transition
2019-03-15 13:22:47 +01:00
& & header . receipts_root ( ) ! = locked_block . header . receipts_root ( )
2018-04-19 11:25:15 +02:00
{
locked_block . strip_receipts_outcomes ( ) ;
}
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 7.7 Final Verification. See if block that we created (executed) matches exactly with block that we received.
2019-03-15 13:22:47 +01:00
if let Err ( e ) = self
. verifier
. verify_block_final ( & header , & locked_block . header )
{
2018-03-03 18:42:13 +01:00
warn! ( target : " client " , " Stage 5 block verification failed for #{} ({}) \n Error: {:?} " , header . number ( ) , header . hash ( ) , e ) ;
2018-09-25 12:55:24 +02:00
bail! ( e ) ;
2018-03-03 18:42:13 +01:00
}
2020-08-05 06:08:03 +02:00
2019-02-07 14:34:07 +01:00
let pending = self . check_epoch_end_signal (
& header ,
bytes ,
2019-03-15 13:22:47 +01:00
& locked_block . receipts ,
locked_block . state . db ( ) ,
2019-02-07 14:34:07 +01:00
client ,
) ? ;
2020-08-05 06:08:03 +02:00
2019-02-07 14:34:07 +01:00
Ok ( ( locked_block , pending ) )
2016-01-13 23:15:44 +01:00
}
2020-08-05 06:08:03 +02:00
2016-10-18 18:16:00 +02:00
/// Import a block with transaction receipts.
2018-03-03 18:42:13 +01:00
///
/// The block is guaranteed to be the next best blocks in the
/// first block sequence. Does no sealing or transaction validation.
2018-09-25 12:55:24 +02:00
fn import_old_block (
& self ,
unverified : Unverified ,
receipts_bytes : & [ u8 ] ,
2020-07-29 10:36:15 +02:00
db : & dyn KeyValueDB ,
2018-09-25 12:55:24 +02:00
chain : & BlockChain ,
) -> EthcoreResult < ( ) > {
2020-12-10 16:42:05 +01:00
let receipts = TypedReceipt ::decode_rlp_list ( & Rlp ::new ( receipts_bytes ) )
. unwrap_or_else ( | e | panic! ( " Receipt bytes should be valid: {:?} " , e ) ) ;
2016-10-18 18:16:00 +02:00
let _import_lock = self . import_lock . lock ( ) ;
2020-08-05 06:08:03 +02:00
2020-12-10 17:57:26 +01:00
if unverified . header . number ( ) > = chain . best_block_header ( ) . number ( ) {
panic! ( " Ancient block number is higher then best block number " ) ;
}
2016-10-18 18:16:00 +02:00
{
2018-02-23 19:49:08 +01:00
trace_time! ( " import_old_block " ) ;
2018-05-09 08:49:34 +02:00
// verify the block, passing the chain for updating the epoch verifier.
2018-07-06 15:09:39 +02:00
let mut rng = OsRng ::new ( ) ? ;
2018-08-02 11:20:46 +02:00
self . ancient_verifier
. verify ( & mut rng , & unverified . header , & chain ) ? ;
2020-08-05 06:08:03 +02:00
2016-10-18 18:16:00 +02:00
// Commit results
2017-02-20 17:21:55 +01:00
let mut batch = DBTransaction ::new ( ) ;
2018-08-02 11:20:46 +02:00
chain . insert_unordered_block (
& mut batch ,
encoded ::Block ::new ( unverified . bytes ) ,
receipts ,
None ,
false ,
true ,
) ;
2016-10-18 18:16:00 +02:00
// Final commit to the DB
2018-03-03 18:42:13 +01:00
db . write_buffered ( batch ) ;
2016-10-18 18:16:00 +02:00
chain . commit ( ) ;
}
2018-03-03 18:42:13 +01:00
db . flush ( ) . expect ( " DB flush failed. " ) ;
2018-07-30 11:45:10 +02:00
Ok ( ( ) )
2016-10-18 18:16:00 +02:00
}
2020-08-05 06:08:03 +02:00
2017-07-13 09:48:00 +02:00
// NOTE: the header of the block passed here is not necessarily sealed, as
// it is for reconstructing the state transition.
//
// The header passed is from the original block data and is sealed.
2019-01-16 16:37:26 +01:00
// TODO: should return an error if ImportRoute is none, issue #9910
2019-11-11 21:57:38 +01:00
fn commit_block < B > (
& self ,
block : B ,
header : & Header ,
block_data : encoded ::Block ,
pending : Option < PendingTransition > ,
client : & Client ,
) -> ImportRoute
where
B : Drain ,
{
2017-07-13 09:48:00 +02:00
let hash = & header . hash ( ) ;
let number = header . number ( ) ;
let parent = header . parent_hash ( ) ;
2018-03-03 18:42:13 +01:00
let chain = client . chain . read ( ) ;
2018-10-25 17:33:41 +02:00
let mut is_finalized = false ;
2020-08-05 06:08:03 +02:00
2016-06-29 21:49:12 +02:00
// Commit results
2018-07-15 11:01:47 +02:00
let block = block . drain ( ) ;
2018-07-30 11:45:10 +02:00
debug_assert_eq! ( header . hash ( ) , block_data . header_view ( ) . hash ( ) ) ;
2020-08-05 06:08:03 +02:00
2017-02-20 17:21:55 +01:00
let mut batch = DBTransaction ::new ( ) ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 9.1 Gather all ancestry actions. (Used only by AuRa)
2018-11-14 13:05:49 +01:00
let ancestry_actions = self
. engine
. ancestry_actions ( & header , & mut chain . ancestry_with_metadata_iter ( * parent ) ) ;
2020-08-05 06:08:03 +02:00
2018-07-15 11:01:47 +02:00
let receipts = block . receipts ;
let traces = block . traces . drain ( ) ;
2018-05-16 08:58:01 +02:00
let best_hash = chain . best_block_hash ( ) ;
2020-08-05 06:08:03 +02:00
2018-05-16 08:58:01 +02:00
let new = ExtendedHeader {
header : header . clone ( ) ,
2018-07-30 11:45:10 +02:00
is_finalized ,
2018-05-16 08:58:01 +02:00
parent_total_difficulty : chain
. block_details ( & parent )
. expect ( " Parent block is in the database; qed " )
. total_difficulty ,
} ;
2020-08-05 06:08:03 +02:00
2018-05-16 08:58:01 +02:00
let best = {
let hash = best_hash ;
let header = chain
. block_header_data ( & hash )
. expect ( " Best block is in the database; qed " )
. decode ( )
. expect ( " Stored block header is valid RLP; qed " ) ;
let details = chain
. block_details ( & hash )
. expect ( " Best block is in the database; qed " ) ;
2020-08-05 06:08:03 +02:00
2018-05-16 08:58:01 +02:00
ExtendedHeader {
parent_total_difficulty : details . total_difficulty - * header . difficulty ( ) ,
is_finalized : details . is_finalized ,
header : header ,
}
} ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 9.2 calcuate route between current and latest block.
2018-05-16 08:58:01 +02:00
let route = chain . tree_route ( best_hash , * parent ) . expect ( " forks are only kept when it has common ancestors; tree route from best to prospective's parent always exists; qed " ) ;
2020-12-02 11:31:11 +01:00
// t_nb 9.3 Check block total difficulty
2018-05-16 08:58:01 +02:00
let fork_choice = if route . is_from_route_finalized {
ForkChoice ::Old
} else {
self . engine . fork_choice ( & new , & best )
} ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 9.4 CHECK! I *think* this is fine, even if the state_root is equal to another
2016-06-30 12:56:58 +02:00
// already-imported block of the same number.
// TODO: Prove it with a test.
2018-07-15 11:01:47 +02:00
let mut state = block . state . drop ( ) . 1 ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 9.5 check epoch end signal, potentially generating a proof on the current
// state. Write transition into db.
2019-02-07 14:34:07 +01:00
if let Some ( pending ) = pending {
chain . insert_pending_transition ( & mut batch , header . hash ( ) , pending ) ;
}
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 9.6 push state to database Transaction. (It calls journal_under from JournalDB)
2016-10-13 12:59:32 +02:00
state
. journal_under ( & mut batch , number , hash )
. expect ( " DB commit failed " ) ;
2020-08-05 06:08:03 +02:00
2018-10-25 17:33:41 +02:00
let finalized : Vec < _ > = ancestry_actions
. into_iter ( )
. map ( | ancestry_action | {
let AncestryAction ::MarkFinalized ( a ) = ancestry_action ;
2020-08-05 06:08:03 +02:00
2018-10-25 17:33:41 +02:00
if a ! = header . hash ( ) {
2020-12-02 11:31:11 +01:00
// t_nb 9.7 if there are finalized ancester, mark that chainge in block in db. (Used by AuRa)
2018-10-25 17:33:41 +02:00
chain
. mark_finalized ( & mut batch , a )
. expect ( " Engine's ancestry action must be known blocks; qed " ) ;
} else {
// we're finalizing the current block
is_finalized = true ;
}
2020-08-05 06:08:03 +02:00
2018-10-25 17:33:41 +02:00
a
} )
. collect ( ) ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 9.8 insert block
2018-05-16 08:58:01 +02:00
let route = chain . insert_block (
& mut batch ,
block_data ,
receipts . clone ( ) ,
ExtrasInsert {
fork_choice : fork_choice ,
2018-07-30 11:45:10 +02:00
is_finalized ,
2018-05-16 08:58:01 +02:00
} ,
) ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 9.9 insert traces (if they are enabled)
2018-03-03 18:42:13 +01:00
client . tracedb . read ( ) . import (
& mut batch ,
TraceImportRequest {
2016-07-28 20:31:29 +02:00
traces : traces . into ( ) ,
2016-06-29 21:49:12 +02:00
block_hash : hash . clone ( ) ,
block_number : number ,
enacted : route . enacted . clone ( ) ,
retracted : route . retracted . len ( ) ,
} ,
) ;
2020-08-05 06:08:03 +02:00
2016-10-07 13:34:32 +02:00
let is_canon = route . enacted . last ( ) . map_or ( false , | h | h = = hash ) ;
2020-12-02 11:31:11 +01:00
// t_nb 9.10 sync cache
2016-10-07 13:34:32 +02:00
state . sync_cache ( & route . enacted , & route . retracted , is_canon ) ;
2016-07-28 23:46:24 +02:00
// Final commit to the DB
2020-12-02 11:31:11 +01:00
// t_nb 9.11 Write Transaction to database (cached)
2018-06-20 15:13:07 +02:00
client . db . read ( ) . key_value ( ) . write_buffered ( batch ) ;
2020-12-02 11:31:11 +01:00
// t_nb 9.12 commit changed to become current greatest by applying pending insertion updates (Sync point)
2016-09-06 15:31:13 +02:00
chain . commit ( ) ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 9.13 check epoch end. Related only to AuRa and it seems light engine
2018-10-25 17:33:41 +02:00
self . check_epoch_end ( & header , & finalized , & chain , client ) ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 9.14 update last hashes. They are build in step 7.5
2018-03-03 18:42:13 +01:00
client . update_last_hashes ( & parent , hash ) ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 9.15 prune ancient states
2018-03-03 18:42:13 +01:00
if let Err ( e ) = client . prune_ancient ( state , & chain ) {
2017-01-20 13:25:53 +01:00
warn! ( " Failed to prune ancient state data: {} " , e ) ;
}
2020-08-05 06:08:03 +02:00
2016-06-29 21:49:12 +02:00
route
}
2020-08-05 06:08:03 +02:00
2017-06-28 13:17:36 +02:00
// check for epoch end signal and write pending transition if it occurs.
// state for the given block must be available.
2017-07-13 09:48:00 +02:00
fn check_epoch_end_signal (
& self ,
header : & Header ,
block_bytes : & [ u8 ] ,
2020-12-10 16:42:05 +01:00
receipts : & [ TypedReceipt ] ,
2017-07-13 09:48:00 +02:00
state_db : & StateDB ,
2018-03-03 18:42:13 +01:00
client : & Client ,
2019-02-07 14:34:07 +01:00
) -> EthcoreResult < Option < PendingTransition > > {
2017-06-28 13:17:36 +02:00
use engines ::EpochChange ;
2020-08-05 06:08:03 +02:00
2017-04-19 15:35:12 +02:00
let hash = header . hash ( ) ;
2017-09-26 14:19:08 +02:00
let auxiliary = ::machine ::AuxiliaryData {
bytes : Some ( block_bytes ) ,
receipts : Some ( & receipts ) ,
} ;
2020-08-05 06:08:03 +02:00
2017-09-26 14:19:08 +02:00
match self . engine . signals_epoch_end ( header , auxiliary ) {
2017-06-28 13:17:36 +02:00
EpochChange ::Yes ( proof ) = > {
use engines ::Proof ;
2020-08-05 06:08:03 +02:00
2017-06-28 13:17:36 +02:00
let proof = match proof {
Proof ::Known ( proof ) = > proof ,
2017-07-13 09:48:00 +02:00
Proof ::WithState ( with_state ) = > {
let env_info = EnvInfo {
number : header . number ( ) ,
author : header . author ( ) . clone ( ) ,
timestamp : header . timestamp ( ) ,
difficulty : header . difficulty ( ) . clone ( ) ,
2018-03-03 18:42:13 +01:00
last_hashes : client . build_last_hashes ( header . parent_hash ( ) ) ,
2017-07-13 09:48:00 +02:00
gas_used : U256 ::default ( ) ,
gas_limit : u64 ::max_value ( ) . into ( ) ,
} ;
2020-08-05 06:08:03 +02:00
2017-07-13 09:48:00 +02:00
let call = move | addr , data | {
let mut state_db = state_db . boxed_clone ( ) ;
2019-02-20 19:09:34 +01:00
let backend = ::state ::backend ::Proving ::new ( state_db . as_hash_db_mut ( ) ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let transaction = client . contract_call_tx (
BlockId ::Hash ( * header . parent_hash ( ) ) ,
addr ,
data ,
) ;
2020-08-05 06:08:03 +02:00
2017-07-13 09:48:00 +02:00
let mut state = State ::from_existing (
backend ,
header . state_root ( ) . clone ( ) ,
self . engine . account_start_nonce ( header . number ( ) ) ,
2018-03-03 18:42:13 +01:00
client . factories . clone ( ) ,
2017-07-13 09:48:00 +02:00
)
. expect ( " state known to be available for just-imported block; qed " ) ;
2020-08-05 06:08:03 +02:00
2017-08-28 14:25:16 +02:00
let options = TransactOptions ::with_no_tracing ( ) . dont_check_nonce ( ) ;
2018-07-23 15:48:01 +02:00
let machine = self . engine . machine ( ) ;
let schedule = machine . schedule ( env_info . number ) ;
let res = Executive ::new ( & mut state , & env_info , & machine , & schedule )
2017-07-13 09:48:00 +02:00
. transact ( & transaction , options ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let res = match res {
Err ( e ) = > {
trace! ( target : " client " , " Proved call failed: {} " , e ) ;
2019-02-07 14:34:07 +01:00
Err ( e . to_string ( ) )
2018-03-03 18:42:13 +01:00
}
Ok ( res ) = > Ok ( ( res . output , state . drop ( ) . 1. extract_proof ( ) ) ) ,
} ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
res . map ( | ( output , proof ) | {
( output , proof . into_iter ( ) . map ( | x | x . into_vec ( ) ) . collect ( ) )
2020-08-05 06:08:03 +02:00
} )
2018-03-03 18:42:13 +01:00
} ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
match with_state . generate_proof ( & call ) {
Ok ( proof ) = > proof ,
Err ( e ) = > {
warn! ( target : " client " , " Failed to generate transition proof for block {}: {} " , hash , e ) ;
warn! ( target : " client " , " Snapshots produced by this client may be incomplete " ) ;
2019-02-07 14:34:07 +01:00
return Err ( EngineError ::FailedSystemCall ( e ) . into ( ) ) ;
2020-08-05 06:08:03 +02:00
}
}
2018-03-03 18:42:13 +01:00
}
} ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
debug! ( target : " client " , " Block {} signals epoch end. " , hash ) ;
2020-08-05 06:08:03 +02:00
2019-02-07 14:34:07 +01:00
Ok ( Some ( PendingTransition { proof : proof } ) )
2018-03-03 18:42:13 +01:00
}
2019-02-07 14:34:07 +01:00
EpochChange ::No = > Ok ( None ) ,
2018-03-03 18:42:13 +01:00
EpochChange ::Unsure ( _ ) = > {
warn! ( target : " client " , " Detected invalid engine implementation. " ) ;
warn! ( target : " client " , " Engine claims to require more block data, but everything provided. " ) ;
2019-02-07 14:34:07 +01:00
Err ( EngineError ::InvalidEngine . into ( ) )
2020-08-05 06:08:03 +02:00
}
2018-03-03 18:42:13 +01:00
}
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
// check for ending of epoch and write transition if it occurs.
2018-10-25 17:33:41 +02:00
fn check_epoch_end < ' a > (
& self ,
header : & ' a Header ,
finalized : & ' a [ H256 ] ,
chain : & BlockChain ,
client : & Client ,
) {
2018-03-03 18:42:13 +01:00
let is_epoch_end = self . engine . is_epoch_end (
header ,
2018-10-25 17:33:41 +02:00
finalized ,
2018-04-03 10:01:28 +02:00
& ( | hash | client . block_header_decoded ( BlockId ::Hash ( hash ) ) ) ,
2018-03-03 18:42:13 +01:00
& ( | hash | chain . get_pending_transition ( hash ) ) , // TODO: limit to current epoch.
) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
if let Some ( proof ) = is_epoch_end {
debug! ( target : " client " , " Epoch transition at block {} " , header . hash ( ) ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let mut batch = DBTransaction ::new ( ) ;
chain . insert_epoch_transition (
& mut batch ,
header . number ( ) ,
EpochTransition {
block_hash : header . hash ( ) ,
block_number : header . number ( ) ,
proof : proof ,
} ,
) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
// always write the batch directly since epoch transition proofs are
// fetched from a DB iterator and DB iterators are only available on
// flushed data.
2018-06-20 15:13:07 +02:00
client
. db
. read ( )
. key_value ( )
. write ( batch )
. expect ( " DB flush failed " ) ;
2018-03-03 18:42:13 +01:00
}
}
}
impl Client {
/// Create a new client with given parameters.
/// The database is assumed to have been initialized with the correct columns.
pub fn new (
config : ClientConfig ,
spec : & Spec ,
2020-07-29 10:36:15 +02:00
db : Arc < dyn BlockChainDB > ,
2018-03-03 18:42:13 +01:00
miner : Arc < Miner > ,
message_channel : IoChannel < ClientIoMessage > ,
) -> Result < Arc < Client > , ::error ::Error > {
let trie_spec = match config . fat_db {
true = > TrieSpec ::Fat ,
false = > TrieSpec ::Secure ,
} ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let trie_factory = TrieFactory ::new ( trie_spec ) ;
let factories = Factories {
vm : VmFactory ::new ( config . vm_type . clone ( ) , config . jump_table_size ) ,
trie : trie_factory ,
accountdb : Default ::default ( ) ,
} ;
2020-08-05 06:08:03 +02:00
2018-06-20 15:13:07 +02:00
let journal_db = journaldb ::new ( db . key_value ( ) . clone ( ) , config . pruning , ::db ::COL_STATE ) ;
2018-03-03 18:42:13 +01:00
let mut state_db = StateDB ::new ( journal_db , config . state_cache_size ) ;
if state_db . journal_db ( ) . is_empty ( ) {
// Sets the correct state root.
state_db = spec . ensure_db_good ( state_db , & factories ) ? ;
let mut batch = DBTransaction ::new ( ) ;
state_db . journal_under ( & mut batch , 0 , & spec . genesis_header ( ) . hash ( ) ) ? ;
2018-07-02 11:04:48 +02:00
db . key_value ( ) . write ( batch ) ? ;
2018-03-03 18:42:13 +01:00
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let gb = spec . genesis_block ( ) ;
let chain = Arc ::new ( BlockChain ::new ( config . blockchain . clone ( ) , & gb , db . clone ( ) ) ) ;
let tracedb = RwLock ::new ( TraceDB ::new (
config . tracing . clone ( ) ,
db . clone ( ) ,
chain . clone ( ) ,
) ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
trace! (
" Cleanup journal: DB Earliest = {:?}, Latest = {:?} " ,
state_db . journal_db ( ) . earliest_era ( ) ,
state_db . journal_db ( ) . latest_era ( )
) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let history = if config . history < MIN_HISTORY_SIZE {
info! ( target : " client " , " Ignoring pruning history parameter of {} \
, falling back to minimum of { } " ,
config . history , MIN_HISTORY_SIZE ) ;
MIN_HISTORY_SIZE
} else {
config . history
} ;
2020-08-05 06:08:03 +02:00
2018-04-03 10:01:28 +02:00
if ! chain
. block_header_data ( & chain . best_block_hash ( ) )
. map_or ( true , | h | state_db . journal_db ( ) . contains ( & h . state_root ( ) ) )
{
2018-03-03 18:42:13 +01:00
warn! (
" State root not found for block #{} ({:x}) " ,
chain . best_block_number ( ) ,
chain . best_block_hash ( )
) ;
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let engine = spec . engine . clone ( ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let awake = match config . mode {
Mode ::Dark ( .. ) | Mode ::Off = > false ,
_ = > true ,
} ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let importer = Importer ::new ( & config , engine . clone ( ) , message_channel . clone ( ) , miner ) ? ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let registrar_address = engine
. additional_params ( )
. get ( " registrar " )
. and_then ( | s | Address ::from_str ( s ) . ok ( ) ) ;
if let Some ( ref addr ) = registrar_address {
trace! ( target : " client " , " Found registrar at {} " , addr ) ;
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let client = Arc ::new ( Client {
enabled : AtomicBool ::new ( true ) ,
sleep_state : Mutex ::new ( SleepState ::new ( awake ) ) ,
liveness : AtomicBool ::new ( awake ) ,
mode : Mutex ::new ( config . mode . clone ( ) ) ,
chain : RwLock ::new ( chain ) ,
2019-06-25 15:38:29 +02:00
tracedb ,
engine ,
2018-03-03 18:42:13 +01:00
pruning : config . pruning . clone ( ) ,
2018-06-20 15:13:07 +02:00
db : RwLock ::new ( db . clone ( ) ) ,
2018-03-03 18:42:13 +01:00
state_db : RwLock ::new ( state_db ) ,
report : RwLock ::new ( Default ::default ( ) ) ,
2018-07-13 12:23:57 +02:00
io_channel : RwLock ::new ( message_channel ) ,
2018-03-03 18:42:13 +01:00
notify : RwLock ::new ( Vec ::new ( ) ) ,
2018-07-02 19:00:06 +02:00
queue_transactions : IoChannelQueue ::new ( config . transaction_verification_queue_size ) ,
2018-05-09 08:49:34 +02:00
queue_ancient_blocks : IoChannelQueue ::new ( MAX_ANCIENT_BLOCKS_QUEUE_SIZE ) ,
2018-06-05 19:49:46 +02:00
queued_ancient_blocks : Default ::default ( ) ,
ancient_blocks_import_lock : Default ::default ( ) ,
2018-05-09 08:49:34 +02:00
queue_consensus_message : IoChannelQueue ::new ( usize ::max_value ( ) ) ,
2018-03-03 18:42:13 +01:00
last_hashes : RwLock ::new ( VecDeque ::new ( ) ) ,
2019-06-25 15:38:29 +02:00
factories ,
history ,
2018-03-03 18:42:13 +01:00
on_user_defaults_change : Mutex ::new ( None ) ,
registrar_address ,
exit_handler : Mutex ::new ( None ) ,
importer ,
2018-07-02 19:00:06 +02:00
config ,
2018-03-03 18:42:13 +01:00
} ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
// prune old states.
{
let state_db = client . state_db . read ( ) . boxed_clone ( ) ;
let chain = client . chain . read ( ) ;
client . prune_ancient ( state_db , & chain ) ? ;
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
// ensure genesis epoch proof in the DB.
{
let chain = client . chain . read ( ) ;
let gh = spec . genesis_header ( ) ;
if chain . epoch_transition ( 0 , gh . hash ( ) ) . is_none ( ) {
trace! ( target : " client " , " No genesis transition found. " ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let proof = client . with_proving_caller ( BlockId ::Number ( 0 ) , | call | {
client . engine . genesis_epoch_data ( & gh , call )
} ) ;
let proof = match proof {
Ok ( proof ) = > proof ,
Err ( e ) = > {
warn! ( target : " client " , " Error generating genesis epoch data: {}. Snapshots generated may not be complete. " , e ) ;
Vec ::new ( )
}
} ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
debug! ( target : " client " , " Obtained genesis transition proof: {:?} " , proof ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let mut batch = DBTransaction ::new ( ) ;
chain . insert_epoch_transition (
& mut batch ,
0 ,
EpochTransition {
block_hash : gh . hash ( ) ,
block_number : 0 ,
proof : proof ,
} ,
) ;
2020-08-05 06:08:03 +02:00
2018-06-20 15:13:07 +02:00
client . db . read ( ) . key_value ( ) . write_buffered ( batch ) ;
2018-03-03 18:42:13 +01:00
}
2020-08-05 06:08:03 +02:00
}
2018-03-03 18:42:13 +01:00
// ensure buffered changes are flushed.
2018-07-02 11:04:48 +02:00
client . db . read ( ) . key_value ( ) . flush ( ) ? ;
2018-03-03 18:42:13 +01:00
Ok ( client )
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
/// Wakes up client if it's a sleep.
pub fn keep_alive ( & self ) {
let should_wake = match * self . mode . lock ( ) {
Mode ::Dark ( .. ) | Mode ::Passive ( .. ) = > true ,
_ = > false ,
} ;
if should_wake {
self . wake_up ( ) ;
( * self . sleep_state . lock ( ) ) . last_activity = Some ( Instant ::now ( ) ) ;
}
2020-08-05 06:08:03 +02:00
}
2018-03-03 18:42:13 +01:00
/// Adds an actor to be notified on certain events
2020-07-29 10:36:15 +02:00
pub fn add_notify ( & self , target : Arc < dyn ChainNotify > ) {
2018-03-03 18:42:13 +01:00
self . notify . write ( ) . push ( Arc ::downgrade ( & target ) ) ;
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
/// Returns engine reference.
2020-07-29 10:36:15 +02:00
pub fn engine ( & self ) -> & dyn EthEngine {
2018-03-03 18:42:13 +01:00
& * self . engine
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
fn notify < F > ( & self , f : F )
where
2020-07-29 10:36:15 +02:00
F : Fn ( & dyn ChainNotify ) ,
2018-03-03 18:42:13 +01:00
{
2018-05-09 08:49:34 +02:00
for np in & * self . notify . read ( ) {
2018-03-03 18:42:13 +01:00
if let Some ( n ) = np . upgrade ( ) {
f ( & * n ) ;
2017-04-19 15:35:12 +02:00
}
2020-08-05 06:08:03 +02:00
}
}
2018-03-03 18:42:13 +01:00
/// Register an action to be done if a mode/spec_name change happens.
pub fn on_user_defaults_change < F > ( & self , f : F )
where
F : 'static + FnMut ( Option < Mode > ) + Send ,
{
* self . on_user_defaults_change . lock ( ) = Some ( Box ::new ( f ) ) ;
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
/// Flush the block import queue.
pub fn flush_queue ( & self ) {
self . importer . block_queue . flush ( ) ;
2018-11-28 11:30:05 +01:00
while ! self . importer . block_queue . is_empty ( ) {
2018-03-03 18:42:13 +01:00
self . import_verified_blocks ( ) ;
}
2020-08-05 06:08:03 +02:00
}
2018-03-03 18:42:13 +01:00
/// The env info as of the best block.
pub fn latest_env_info ( & self ) -> EnvInfo {
self . env_info ( BlockId ::Latest )
. expect ( " Best block header always stored; qed " )
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
/// The env info as of a given block.
/// returns `None` if the block unknown.
pub fn env_info ( & self , id : BlockId ) -> Option < EnvInfo > {
self . block_header ( id ) . map ( | header | EnvInfo {
number : header . number ( ) ,
author : header . author ( ) ,
timestamp : header . timestamp ( ) ,
difficulty : header . difficulty ( ) ,
last_hashes : self . build_last_hashes ( & header . parent_hash ( ) ) ,
gas_used : U256 ::default ( ) ,
gas_limit : header . gas_limit ( ) ,
} )
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
fn build_last_hashes ( & self , parent_hash : & H256 ) -> Arc < LastHashes > {
{
let hashes = self . last_hashes . read ( ) ;
if hashes . front ( ) . map_or ( false , | h | h = = parent_hash ) {
let mut res = Vec ::from ( hashes . clone ( ) ) ;
res . resize ( 256 , H256 ::default ( ) ) ;
return Arc ::new ( res ) ;
2020-08-05 06:08:03 +02:00
}
2018-03-03 18:42:13 +01:00
}
let mut last_hashes = LastHashes ::new ( ) ;
last_hashes . resize ( 256 , H256 ::default ( ) ) ;
last_hashes [ 0 ] = parent_hash . clone ( ) ;
let chain = self . chain . read ( ) ;
for i in 0 .. 255 {
match chain . block_details ( & last_hashes [ i ] ) {
Some ( details ) = > {
last_hashes [ i + 1 ] = details . parent . clone ( ) ;
}
None = > break ,
2020-08-05 06:08:03 +02:00
}
2018-03-03 18:42:13 +01:00
}
let mut cached_hashes = self . last_hashes . write ( ) ;
* cached_hashes = VecDeque ::from ( last_hashes . clone ( ) ) ;
Arc ::new ( last_hashes )
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
/// This is triggered by a message coming from a block queue when the block is ready for insertion
pub fn import_verified_blocks ( & self ) -> usize {
self . importer . import_verified_blocks ( self )
2017-06-28 13:17:36 +02:00
}
2020-08-05 06:08:03 +02:00
2017-06-28 13:17:36 +02:00
// use a state-proving closure for the given block.
fn with_proving_caller < F , T > ( & self , id : BlockId , with_call : F ) -> T
2017-09-26 14:19:08 +02:00
where
F : FnOnce ( & ::machine ::Call ) -> T ,
2017-06-28 13:17:36 +02:00
{
let call = | a , d | {
let tx = self . contract_call_tx ( id , a , d ) ;
let ( result , items ) = self
. prove_transaction ( tx , id )
. ok_or_else ( | | format! ( " Unable to make call. State unavailable? " ) ) ? ;
2020-08-05 06:08:03 +02:00
2017-06-28 13:17:36 +02:00
let items = items . into_iter ( ) . map ( | x | x . to_vec ( ) ) . collect ( ) ;
Ok ( ( result , items ) )
} ;
2020-08-05 06:08:03 +02:00
2017-06-28 13:17:36 +02:00
with_call ( & call )
}
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 9.15 prune ancient states until below the memory limit or only the minimum amount remain.
2018-07-31 11:55:18 +02:00
fn prune_ancient (
& self ,
mut state_db : StateDB ,
chain : & BlockChain ,
) -> Result < ( ) , ::error ::Error > {
2017-01-20 13:25:53 +01:00
let number = match state_db . journal_db ( ) . latest_era ( ) {
Some ( n ) = > n ,
None = > return Ok ( ( ) ) ,
} ;
2020-08-05 06:08:03 +02:00
2017-01-20 13:25:53 +01:00
// prune all ancient eras until we're below the memory target,
// but have at least the minimum number of states.
loop {
let needs_pruning = state_db . journal_db ( ) . is_pruned ( )
& & state_db . journal_db ( ) . journal_size ( ) > = self . config . history_mem ;
2020-08-05 06:08:03 +02:00
2017-01-20 13:25:53 +01:00
if ! needs_pruning {
break ;
}
match state_db . journal_db ( ) . earliest_era ( ) {
Some ( era ) if era + self . history < = number = > {
trace! ( target : " client " , " Pruning state for ancient era {} " , era ) ;
match chain . block_hash ( era ) {
Some ( ancient_hash ) = > {
2017-02-20 17:21:55 +01:00
let mut batch = DBTransaction ::new ( ) ;
2017-01-20 13:25:53 +01:00
state_db . mark_canonical ( & mut batch , era , & ancient_hash ) ? ;
2018-06-20 15:13:07 +02:00
self . db . read ( ) . key_value ( ) . write_buffered ( batch ) ;
2017-01-20 13:25:53 +01:00
state_db . journal_db ( ) . flush ( ) ;
}
None = > debug! ( target : " client " , " Missing expected hash for block {} " , era ) ,
2020-08-05 06:08:03 +02:00
}
2017-01-20 13:25:53 +01:00
}
_ = > break , // means that every era is kept, no pruning necessary.
2020-08-05 06:08:03 +02:00
}
2017-01-20 13:25:53 +01:00
}
2020-08-05 06:08:03 +02:00
2017-01-20 13:25:53 +01:00
Ok ( ( ) )
}
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 9.14 update last hashes. They are build in step 7.5
2016-07-17 09:18:15 +02:00
fn update_last_hashes ( & self , parent : & H256 , hash : & H256 ) {
let mut hashes = self . last_hashes . write ( ) ;
if hashes . front ( ) . map_or ( false , | h | h = = parent ) {
if hashes . len ( ) > 255 {
hashes . pop_back ( ) ;
}
hashes . push_front ( hash . clone ( ) ) ;
2020-08-05 06:08:03 +02:00
}
2016-07-17 09:18:15 +02:00
}
2020-08-05 06:08:03 +02:00
2016-12-06 19:23:15 +01:00
/// Get shared miner reference.
2018-04-13 17:34:27 +02:00
#[ cfg(test) ]
2016-12-06 19:23:15 +01:00
pub fn miner ( & self ) -> Arc < Miner > {
2018-03-03 18:42:13 +01:00
self . importer . miner . clone ( )
2016-12-06 19:23:15 +01:00
}
2020-08-05 06:08:03 +02:00
2018-11-18 00:06:34 +01:00
#[ cfg(test) ]
pub fn state_db ( & self ) -> ::parking_lot ::RwLockReadGuard < StateDB > {
self . state_db . read ( )
}
2020-08-05 06:08:03 +02:00
2018-11-18 00:06:34 +01:00
#[ cfg(test) ]
pub fn chain ( & self ) -> Arc < BlockChain > {
self . chain . read ( ) . clone ( )
}
2020-08-05 06:08:03 +02:00
2016-12-11 12:32:01 +01:00
/// Replace io channel. Useful for testing.
pub fn set_io_channel ( & self , io_channel : IoChannel < ClientIoMessage > ) {
2018-07-13 12:23:57 +02:00
* self . io_channel . write ( ) = io_channel ;
2016-09-27 12:12:18 +02:00
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
/// Get a copy of the best block's state.
2019-11-11 21:57:38 +01:00
pub fn latest_state_and_header ( & self ) -> ( State < StateDB > , Header ) {
2020-12-10 17:57:26 +01:00
let mut nb_tries = 5 ;
// Here, we are taking latest block and then latest state. If in between those two calls `best` block got prunned app will panic.
// This is something that should not happend often and it is edge case.
// Locking read best_block lock would be more straighforward, but can introduce overlaping locks,
// because of this we are just taking 5 tries to get best state in most cases it will work on first try.
while nb_tries ! = 0 {
let header = self . best_block_header ( ) ;
match State ::from_existing (
self . state_db . read ( ) . boxed_clone_canon ( & header . hash ( ) ) ,
* header . state_root ( ) ,
self . engine . account_start_nonce ( header . number ( ) ) ,
self . factories . clone ( ) ,
) {
Ok ( ret ) = > return ( ret , header ) ,
Err ( _ ) = > {
warn! ( " Couldn't fetch state of best block header: {:?} " , header ) ;
nb_tries - = 1 ;
}
}
}
panic! ( " Couldn't get latest state in 5 tries " ) ;
2018-03-03 18:42:13 +01:00
}
2020-08-05 06:08:03 +02:00
2016-07-27 21:34:32 +02:00
/// Attempt to get a copy of a specific block's final state.
2020-08-05 06:08:03 +02:00
///
2016-05-25 17:35:15 +02:00
/// This will not fail if given BlockId::Latest.
2016-12-09 23:01:43 +01:00
/// Otherwise, this can fail (but may not) if the DB prunes state or the block
2016-12-23 18:46:17 +01:00
/// is unknown.
pub fn state_at ( & self , id : BlockId ) -> Option < State < StateDB > > {
// fast path for latest state.
2017-02-21 12:35:21 +01:00
if let BlockId ::Latest = id {
let ( state , _ ) = self . latest_state_and_header ( ) ;
2016-05-26 11:46:45 +02:00
return Some ( state ) ;
2020-08-05 06:08:03 +02:00
}
2019-11-11 21:57:38 +01:00
let block_number = match self . block_number ( id ) {
Some ( num ) = > num ,
None = > return None ,
2020-08-05 06:08:03 +02:00
} ;
2019-11-11 21:57:38 +01:00
self . block_header ( id ) . and_then ( | header | {
let db = self . state_db . read ( ) . boxed_clone ( ) ;
2020-08-05 06:08:03 +02:00
2017-09-10 18:03:35 +02:00
// early exit for pruned blocks
if db . is_pruned ( ) & & self . pruning_info ( ) . earliest_state > block_number {
2016-06-02 21:01:47 +02:00
return None ;
}
2020-08-05 06:08:03 +02:00
2018-01-02 09:43:08 +01:00
let root = header . state_root ( ) ;
2017-05-05 16:01:19 +02:00
State ::from_existing (
db ,
2020-08-05 06:08:03 +02:00
root ,
2017-05-05 16:01:19 +02:00
self . engine . account_start_nonce ( block_number ) ,
2017-06-28 09:10:57 +02:00
self . factories . clone ( ) ,
2020-08-05 06:08:03 +02:00
)
2017-06-28 09:10:57 +02:00
. ok ( )
2016-05-25 17:35:15 +02:00
} )
}
2020-08-05 06:08:03 +02:00
2016-07-27 21:34:32 +02:00
/// Attempt to get a copy of a specific block's beginning state.
///
2016-12-09 23:01:43 +01:00
/// This will not fail if given BlockId::Latest.
2016-07-27 21:34:32 +02:00
/// Otherwise, this can fail (but may not) if the DB prunes state.
2017-02-21 12:35:21 +01:00
pub fn state_at_beginning ( & self , id : BlockId ) -> Option < State < StateDB > > {
2018-03-03 18:42:13 +01:00
match self . block_number ( id ) {
2018-06-14 20:56:27 +02:00
None = > None ,
Some ( 0 ) = > self . state_at ( id ) ,
2018-03-03 18:42:13 +01:00
Some ( n ) = > self . state_at ( BlockId ::Number ( n - 1 ) ) ,
2016-07-27 21:34:32 +02:00
}
}
2020-08-05 06:08:03 +02:00
2018-08-13 09:47:10 +02:00
/// Get a copy of the best block's state.
2018-03-03 18:42:13 +01:00
pub fn state ( & self ) -> impl StateInfo {
2016-01-26 15:00:22 +01:00
let ( state , _ ) = self . latest_state_and_header ( ) ;
2020-08-05 06:08:03 +02:00
state
}
2016-01-26 15:00:22 +01:00
/// Get info on the cache.
2019-11-11 21:57:38 +01:00
pub fn blockchain_cache_info ( & self ) -> BlockChainCacheSize {
self . chain . read ( ) . cache_size ( )
2016-01-26 15:00:22 +01:00
}
2020-08-05 06:08:03 +02:00
2016-01-18 19:23:28 +01:00
/// Get the report.
2016-02-25 14:09:39 +01:00
pub fn report ( & self ) -> ClientReport {
2016-09-06 15:31:13 +02:00
let mut report = self . report . read ( ) . clone ( ) ;
2020-09-14 16:08:57 +02:00
self . state_db . read ( ) . get_sizes ( & mut report . item_sizes ) ;
2016-01-18 23:23:32 +01:00
report
2020-08-05 06:08:03 +02:00
}
2016-01-18 23:23:32 +01:00
/// Tick the client.
// TODO: manage by real events.
pub fn tick ( & self , prevent_sleep : bool ) {
2016-11-13 15:52:33 +01:00
self . check_garbage ( ) ;
2016-01-18 23:23:32 +01:00
if ! prevent_sleep {
2017-10-10 20:30:11 +02:00
self . check_snooze ( ) ;
2020-08-05 06:08:03 +02:00
}
}
2016-01-18 23:23:32 +01:00
fn check_garbage ( & self ) {
2016-07-13 19:59:59 +02:00
self . chain . read ( ) . collect_garbage ( ) ;
self . importer . block_queue . collect_garbage ( ) ;
self . tracedb . read ( ) . collect_garbage ( ) ;
2020-08-05 06:08:03 +02:00
}
2016-11-13 15:52:33 +01:00
fn check_snooze ( & self ) {
2016-07-13 19:59:59 +02:00
let mode = self . mode . lock ( ) . clone ( ) ;
2018-01-02 09:43:08 +01:00
match mode {
Mode ::Dark ( timeout ) = > {
let mut ss = self . sleep_state . lock ( ) ;
2017-10-10 20:30:11 +02:00
if let Some ( t ) = ss . last_activity {
2016-11-13 15:52:33 +01:00
if Instant ::now ( ) > t + timeout {
2016-09-06 15:31:13 +02:00
self . sleep ( false ) ;
2016-10-31 16:58:35 +01:00
ss . last_activity = None ;
2020-08-05 06:08:03 +02:00
}
}
}
2016-07-05 17:50:46 +02:00
Mode ::Passive ( timeout , wakeup_after ) = > {
2016-07-13 19:59:59 +02:00
let mut ss = self . sleep_state . lock ( ) ;
2016-07-05 17:50:46 +02:00
let now = Instant ::now ( ) ;
if let Some ( t ) = ss . last_activity {
2016-07-07 09:39:32 +02:00
if now > t + timeout {
2019-08-12 18:55:11 +02:00
self . sleep ( false ) ;
2016-07-05 17:50:46 +02:00
ss . last_activity = None ;
ss . last_autosleep = Some ( now ) ;
}
}
if let Some ( t ) = ss . last_autosleep {
if now > t + wakeup_after {
2016-07-13 19:59:59 +02:00
self . wake_up ( ) ;
2016-07-05 17:50:46 +02:00
ss . last_activity = Some ( now ) ;
ss . last_autosleep = None ;
2020-08-05 06:08:03 +02:00
}
}
}
_ = > { }
}
}
2016-07-05 17:50:46 +02:00
/// Take a snapshot at the given block.
/// If the ID given is "latest", this will default to 1000 blocks behind.
pub fn take_snapshot < W : snapshot_io ::SnapshotWriter + Send > (
2020-08-05 06:08:03 +02:00
& self ,
2019-06-25 15:38:29 +02:00
writer : W ,
2018-03-03 18:42:13 +01:00
at : BlockId ,
2016-07-05 17:50:46 +02:00
p : & snapshot ::Progress ,
) -> Result < ( ) , EthcoreError > {
let db = self . state_db . read ( ) . journal_db ( ) . boxed_clone ( ) ;
2016-07-07 09:39:32 +02:00
let best_block_number = self . chain_info ( ) . best_block_number ;
let block_number = self
. block_number ( at )
2016-08-08 18:41:30 +02:00
. ok_or_else ( | | snapshot ::Error ::InvalidStartingBlock ( at ) ) ? ;
2020-08-05 06:08:03 +02:00
2016-08-08 18:41:30 +02:00
if db . is_pruned ( ) & & self . pruning_info ( ) . earliest_state > block_number {
2019-06-25 15:38:29 +02:00
return Err ( snapshot ::Error ::OldBlockPrunedDB . into ( ) ) ;
2020-08-05 06:08:03 +02:00
}
2019-06-25 15:38:29 +02:00
let history = ::std ::cmp ::min ( self . history , 1000 ) ;
2020-08-05 06:08:03 +02:00
2019-06-25 15:38:29 +02:00
let start_hash = match at {
BlockId ::Latest = > {
2018-01-02 09:43:08 +01:00
let start_num = match db . earliest_era ( ) {
2016-08-05 17:00:46 +02:00
Some ( era ) = > ::std ::cmp ::max ( era , best_block_number . saturating_sub ( history ) ) ,
None = > best_block_number . saturating_sub ( history ) ,
} ;
2020-08-05 06:08:03 +02:00
2016-08-05 17:00:46 +02:00
match self . block_hash ( BlockId ::Number ( start_num ) ) {
2019-03-06 15:30:35 +01:00
Some ( h ) = > h ,
None = > return Err ( snapshot ::Error ::InvalidStartingBlock ( at ) . into ( ) ) ,
2020-08-05 06:08:03 +02:00
}
}
2019-03-06 15:30:35 +01:00
_ = > match self . block_hash ( at ) {
2016-09-06 15:31:13 +02:00
Some ( hash ) = > hash ,
2019-03-06 15:30:35 +01:00
None = > return Err ( snapshot ::Error ::InvalidStartingBlock ( at ) . into ( ) ) ,
2020-08-05 06:08:03 +02:00
} ,
2019-03-06 15:30:35 +01:00
} ;
2020-08-05 06:08:03 +02:00
2017-05-05 16:01:19 +02:00
let processing_threads = self . config . snapshot . processing_threads ;
let chunker = self
. engine
. snapshot_components ( )
2016-08-08 18:41:30 +02:00
. ok_or ( snapshot ::Error ::SnapshotsUnsupported ) ? ;
2016-10-17 13:05:57 +02:00
snapshot ::take_snapshot (
2020-08-05 06:08:03 +02:00
chunker ,
2016-10-17 13:05:57 +02:00
& self . chain . read ( ) ,
2016-08-08 18:41:30 +02:00
start_hash ,
2016-10-17 13:05:57 +02:00
db . as_hash_db ( ) ,
2020-08-05 06:08:03 +02:00
writer ,
p ,
2016-10-17 13:05:57 +02:00
processing_threads ,
2020-08-05 06:08:03 +02:00
) ? ;
2016-10-17 13:05:57 +02:00
Ok ( ( ) )
2020-08-05 06:08:03 +02:00
}
2016-10-17 13:05:57 +02:00
/// Ask the client what the history parameter is.
2017-05-17 12:41:33 +02:00
pub fn pruning_history ( & self ) -> u64 {
2016-12-09 23:01:43 +01:00
self . history
2020-08-05 06:08:03 +02:00
}
2016-12-09 23:01:43 +01:00
fn block_hash ( chain : & BlockChain , id : BlockId ) -> Option < H256 > {
2016-08-08 18:41:30 +02:00
match id {
BlockId ::Hash ( hash ) = > Some ( hash ) ,
BlockId ::Number ( number ) = > chain . block_hash ( number ) ,
BlockId ::Earliest = > chain . block_hash ( 0 ) ,
BlockId ::Latest = > Some ( chain . best_block_hash ( ) ) ,
2020-08-05 06:08:03 +02:00
}
}
2016-08-08 18:41:30 +02:00
fn transaction_address ( & self , id : TransactionId ) -> Option < TransactionAddress > {
2016-03-20 17:29:39 +01:00
match id {
2018-09-13 12:58:49 +02:00
TransactionId ::Hash ( ref hash ) = > self . chain . read ( ) . transaction_address ( hash ) ,
TransactionId ::Location ( id , index ) = > {
Self ::block_hash ( & self . chain . read ( ) , id ) . map ( | hash | TransactionAddress {
2018-03-03 18:42:13 +01:00
block_hash : hash ,
index : index ,
2020-08-05 06:08:03 +02:00
} )
}
}
}
2018-03-03 18:42:13 +01:00
fn wake_up ( & self ) {
2016-12-09 23:01:43 +01:00
if ! self . liveness . load ( AtomicOrdering ::Relaxed ) {
2017-08-04 15:58:14 +02:00
self . liveness . store ( true , AtomicOrdering ::Relaxed ) ;
self . notify ( | n | n . start ( ) ) ;
info! ( target : " mode " , " wake_up: Waking. " ) ;
2020-08-05 06:08:03 +02:00
}
2016-02-10 19:29:27 +01:00
}
2020-08-05 06:08:03 +02:00
2016-12-09 23:01:43 +01:00
fn sleep ( & self , force : bool ) {
2018-03-03 18:42:13 +01:00
if self . liveness . load ( AtomicOrdering ::Relaxed ) {
// only sleep if the import queue is mostly empty.
if force | | ( self . queue_info ( ) . total_queue_size ( ) < = MAX_QUEUE_SIZE_TO_SLEEP_ON ) {
self . liveness . store ( false , AtomicOrdering ::Relaxed ) ;
self . notify ( | n | n . stop ( ) ) ;
2017-10-10 20:30:11 +02:00
info! ( target : " mode " , " sleep: Sleeping. " ) ;
2020-08-05 06:08:03 +02:00
} else {
2018-03-03 18:42:13 +01:00
info! ( target : " mode " , " sleep: Cannot sleep - syncing ongoing. " ) ;
// TODO: Consider uncommenting.
//(*self.sleep_state.lock()).last_activity = Some(Instant::now());
2020-08-05 06:08:03 +02:00
}
}
}
2018-03-03 18:42:13 +01:00
// transaction for calling contracts from services like engine.
2016-07-05 17:50:46 +02:00
// from the null sender, with 50M gas.
2019-08-12 18:55:11 +02:00
fn contract_call_tx (
& self ,
block_id : BlockId ,
2016-07-05 17:50:46 +02:00
address : Address ,
data : Bytes ,
) -> SignedTransaction {
let from = Address ::default ( ) ;
2020-12-10 16:42:05 +01:00
TypedTransaction ::Legacy ( transaction ::Transaction {
2018-04-13 17:34:27 +02:00
nonce : self
2016-07-05 17:50:46 +02:00
. nonce ( & from , block_id )
. unwrap_or_else ( | | self . engine . account_start_nonce ( 0 ) ) ,
2017-10-10 20:30:11 +02:00
action : Action ::Call ( address ) ,
2017-04-19 14:58:19 +02:00
gas : U256 ::from ( 50_000_000 ) ,
2017-10-10 20:30:11 +02:00
gas_price : U256 ::default ( ) ,
2016-07-05 17:50:46 +02:00
value : U256 ::default ( ) ,
2017-10-10 20:30:11 +02:00
data : data ,
2020-12-10 16:42:05 +01:00
} )
2017-04-19 14:58:19 +02:00
. fake_sign ( from )
2020-08-05 06:08:03 +02:00
}
2017-04-19 14:58:19 +02:00
fn do_virtual_call (
machine : & ::machine ::EthereumMachine ,
env_info : & EnvInfo ,
state : & mut State < StateDB > ,
t : & SignedTransaction ,
analytics : CallAnalytics ,
) -> Result < Executed , CallError > {
fn call < V , T > (
2017-08-28 14:25:16 +02:00
state : & mut State < StateDB > ,
2017-04-19 14:58:19 +02:00
env_info : & EnvInfo ,
machine : & ::machine ::EthereumMachine ,
state_diff : bool ,
transaction : & SignedTransaction ,
2019-01-04 14:05:46 +01:00
options : TransactOptions < T , V > ,
2017-10-20 15:40:25 +02:00
) -> Result < Executed < T ::Output , V ::Output > , CallError >
2020-08-05 06:08:03 +02:00
where
2019-01-04 14:05:46 +01:00
T : trace ::Tracer ,
2017-06-28 09:10:57 +02:00
V : trace ::VMTracer ,
2020-08-05 06:08:03 +02:00
{
2017-06-28 09:10:57 +02:00
let options = options . dont_check_nonce ( ) . save_output_from_contract ( ) ;
let original_state = if state_diff {
Some ( state . clone ( ) )
} else {
None
2017-04-19 14:58:19 +02:00
} ;
2018-01-10 11:34:34 +01:00
let schedule = machine . schedule ( env_info . number ) ;
2020-08-05 06:08:03 +02:00
2018-01-10 11:34:34 +01:00
let mut ret = Executive ::new ( state , env_info , & machine , & schedule )
2017-08-28 14:25:16 +02:00
. transact_virtual ( transaction , options ) ? ;
2020-08-05 06:08:03 +02:00
2017-10-20 15:40:25 +02:00
if let Some ( original ) = original_state {
ret . state_diff = Some ( state . diff_from ( original ) . map_err ( ExecutionError ::from ) ? ) ;
2020-08-05 06:08:03 +02:00
}
2017-09-05 13:22:19 +02:00
Ok ( ret )
2020-08-05 06:08:03 +02:00
}
2017-08-28 14:25:16 +02:00
let state_diff = analytics . state_diffing ;
2020-08-05 06:08:03 +02:00
2018-07-23 15:48:01 +02:00
match ( analytics . transaction_tracing , analytics . vm_tracing ) {
( true , true ) = > call (
state ,
env_info ,
machine ,
2017-09-26 14:19:08 +02:00
state_diff ,
2020-08-05 06:08:03 +02:00
t ,
2018-07-23 15:48:01 +02:00
TransactOptions ::with_tracing_and_vm_tracing ( ) ,
2020-08-05 06:08:03 +02:00
) ,
2017-08-28 14:25:16 +02:00
( true , false ) = > call (
state ,
env_info ,
machine ,
state_diff ,
2020-08-05 06:08:03 +02:00
t ,
2017-08-28 14:25:16 +02:00
TransactOptions ::with_tracing ( ) ,
2017-09-26 14:19:08 +02:00
) ,
( false , true ) = > call (
state ,
env_info ,
machine ,
state_diff ,
t ,
TransactOptions ::with_vm_tracing ( ) ,
) ,
( false , false ) = > call (
state ,
env_info ,
machine ,
state_diff ,
t ,
TransactOptions ::with_no_tracing ( ) ,
2020-08-05 06:08:03 +02:00
) ,
2017-08-04 15:58:14 +02:00
}
2020-08-05 06:08:03 +02:00
}
2018-03-03 18:42:13 +01:00
fn block_number_ref ( & self , id : & BlockId ) -> Option < BlockNumber > {
match * id {
BlockId ::Number ( number ) = > Some ( number ) ,
BlockId ::Hash ( ref hash ) = > self . chain . read ( ) . block_number ( hash ) ,
BlockId ::Earliest = > Some ( 0 ) ,
BlockId ::Latest = > Some ( self . chain . read ( ) . best_block_number ( ) ) ,
}
2020-08-05 06:08:03 +02:00
}
2018-04-03 10:01:28 +02:00
/// Retrieve a decoded header given `BlockId`
///
/// This method optimizes access patterns for latest block header
/// to avoid excessive RLP encoding, decoding and hashing.
fn block_header_decoded ( & self , id : BlockId ) -> Option < Header > {
match id {
BlockId ::Latest = > Some ( self . chain . read ( ) . best_block_header ( ) ) ,
BlockId ::Hash ( ref hash ) if hash = = & self . chain . read ( ) . best_block_hash ( ) = > {
Some ( self . chain . read ( ) . best_block_header ( ) )
2020-08-05 06:08:03 +02:00
}
2018-04-03 10:01:28 +02:00
BlockId ::Number ( number ) if number = = self . chain . read ( ) . best_block_number ( ) = > {
Some ( self . chain . read ( ) . best_block_header ( ) )
2020-08-05 06:08:03 +02:00
}
2018-05-09 12:05:56 +02:00
_ = > self . block_header ( id ) . and_then ( | h | h . decode ( ) . ok ( ) ) ,
2018-04-03 10:01:28 +02:00
}
}
2018-03-03 18:42:13 +01:00
}
impl snapshot ::DatabaseRestore for Client {
/// Restart the client with a new backend
fn restore_db ( & self , new_db : & str ) -> Result < ( ) , EthcoreError > {
trace! ( target : " snapshot " , " Replacing client database with {:?} " , new_db ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let _import_lock = self . importer . import_lock . lock ( ) ;
let mut state_db = self . state_db . write ( ) ;
let mut chain = self . chain . write ( ) ;
let mut tracedb = self . tracedb . write ( ) ;
self . importer . miner . clear ( ) ;
let db = self . db . write ( ) ;
2018-09-10 17:21:57 +02:00
db . restore ( new_db ) ? ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let cache_size = state_db . cache_size ( ) ;
2018-06-20 15:13:07 +02:00
* state_db = StateDB ::new (
journaldb ::new ( db . key_value ( ) . clone ( ) , self . pruning , ::db ::COL_STATE ) ,
cache_size ,
) ;
2018-03-03 18:42:13 +01:00
* chain = Arc ::new ( BlockChain ::new (
self . config . blockchain . clone ( ) ,
& [ ] ,
db . clone ( ) ,
) ) ;
* tracedb = TraceDB ::new ( self . config . tracing . clone ( ) , db . clone ( ) , chain . clone ( ) ) ;
Ok ( ( ) )
}
}
2019-01-16 16:37:26 +01:00
impl BlockChainReset for Client {
fn reset ( & self , num : u32 ) -> Result < ( ) , String > {
if num as u64 > self . pruning_history ( ) {
return Err ( " Attempting to reset to block with pruned state " . into ( ) ) ;
2019-06-11 20:56:03 +02:00
} else if num = = 0 {
return Err ( " invalid number of blocks to reset " . into ( ) ) ;
2019-01-16 16:37:26 +01:00
}
2020-08-05 06:08:03 +02:00
2019-06-11 20:56:03 +02:00
let mut blocks_to_delete = Vec ::with_capacity ( num as usize ) ;
let mut best_block_hash = self . chain . read ( ) . best_block_hash ( ) ;
let mut batch = DBTransaction ::with_capacity ( blocks_to_delete . len ( ) ) ;
2020-08-05 06:08:03 +02:00
2019-06-11 20:56:03 +02:00
for _ in 0 .. num {
let current_header = self
. chain
. read ( )
. block_header_data ( & best_block_hash )
. expect (
" best_block_hash was fetched from db; block_header_data should exist in db; qed " ,
) ;
best_block_hash = current_header . parent_hash ( ) ;
2020-08-05 06:08:03 +02:00
2019-06-11 20:56:03 +02:00
let ( number , hash ) = ( current_header . number ( ) , current_header . hash ( ) ) ;
batch . delete ( ::db ::COL_HEADERS , & hash ) ;
batch . delete ( ::db ::COL_BODIES , & hash ) ;
Writable ::delete ::< BlockDetails , H264 > ( & mut batch , ::db ::COL_EXTRA , & hash ) ;
2019-01-16 16:37:26 +01:00
Writable ::delete ::< H256 , BlockNumberKey > ( & mut batch , ::db ::COL_EXTRA , & number ) ;
2020-08-05 06:08:03 +02:00
2019-06-11 20:56:03 +02:00
blocks_to_delete . push ( ( number , hash ) ) ;
2019-01-16 16:37:26 +01:00
}
2020-08-05 06:08:03 +02:00
2019-06-11 20:56:03 +02:00
let hashes = blocks_to_delete
. iter ( )
. map ( | ( _ , hash ) | hash )
. collect ::< Vec < _ > > ( ) ;
info! (
" Deleting block hashes {} " ,
Colour ::Red . bold ( ) . paint ( format! ( " {:#?} " , hashes ) )
) ;
2020-08-05 06:08:03 +02:00
2019-06-11 20:56:03 +02:00
let mut best_block_details = Readable ::read ::< BlockDetails , H264 > (
& * * self . db . read ( ) . key_value ( ) ,
::db ::COL_EXTRA ,
& best_block_hash ,
)
. expect ( " block was previously imported; best_block_details should exist; qed " ) ;
2020-08-05 06:08:03 +02:00
2019-06-11 20:56:03 +02:00
let ( _ , last_hash ) = blocks_to_delete
. last ( )
. expect ( " num is > 0; blocks_to_delete can't be empty; qed " ) ;
// remove the last block as a child so that it can be re-imported
// ethcore/blockchain/src/blockchain.rs/Blockchain::is_known_child()
best_block_details . children . retain ( | h | * h ! = * last_hash ) ;
batch . write ( ::db ::COL_EXTRA , & best_block_hash , & best_block_details ) ;
2019-01-16 16:37:26 +01:00
// update the new best block hash
2019-06-11 20:56:03 +02:00
batch . put ( ::db ::COL_EXTRA , b " best " , & best_block_hash ) ;
2020-08-05 06:08:03 +02:00
2019-01-16 16:37:26 +01:00
self . db
. read ( )
. key_value ( )
2019-06-11 20:56:03 +02:00
. write ( batch )
. map_err ( | err | format! ( " could not delete blocks; io error occurred: {} " , err ) ) ? ;
2020-08-05 06:08:03 +02:00
2019-01-16 16:37:26 +01:00
info! (
" New best block hash {} " ,
Colour ::Green . bold ( ) . paint ( format! ( " {:?} " , best_block_hash ) )
) ;
2020-08-05 06:08:03 +02:00
2019-01-16 16:37:26 +01:00
Ok ( ( ) )
}
}
2018-03-03 18:42:13 +01:00
impl Nonce for Client {
fn nonce ( & self , address : & Address , id : BlockId ) -> Option < U256 > {
self . state_at ( id ) . and_then ( | s | s . nonce ( address ) . ok ( ) )
}
}
impl Balance for Client {
fn balance ( & self , address : & Address , state : StateOrBlock ) -> Option < U256 > {
match state {
StateOrBlock ::State ( s ) = > s . balance ( address ) . ok ( ) ,
StateOrBlock ::Block ( id ) = > self . state_at ( id ) . and_then ( | s | s . balance ( address ) . ok ( ) ) ,
}
}
}
impl AccountData for Client { }
impl ChainInfo for Client {
fn chain_info ( & self ) -> BlockChainInfo {
let mut chain_info = self . chain . read ( ) . chain_info ( ) ;
chain_info . pending_total_difficulty =
chain_info . total_difficulty + self . importer . block_queue . total_difficulty ( ) ;
chain_info
}
}
impl BlockInfo for Client {
2018-07-30 11:45:10 +02:00
fn block_header ( & self , id : BlockId ) -> Option < encoded ::Header > {
2018-03-03 18:42:13 +01:00
let chain = self . chain . read ( ) ;
Self ::block_hash ( & chain , id ) . and_then ( | hash | chain . block_header_data ( & hash ) )
}
2018-04-03 10:01:28 +02:00
fn best_block_header ( & self ) -> Header {
2018-03-03 18:42:13 +01:00
self . chain . read ( ) . best_block_header ( )
}
fn block ( & self , id : BlockId ) -> Option < encoded ::Block > {
let chain = self . chain . read ( ) ;
2018-04-03 10:01:28 +02:00
Self ::block_hash ( & chain , id ) . and_then ( | hash | chain . block ( & hash ) )
2018-03-03 18:42:13 +01:00
}
fn code_hash ( & self , address : & Address , id : BlockId ) -> Option < H256 > {
2018-07-31 07:27:57 +02:00
self . state_at ( id )
. and_then ( | s | s . code_hash ( address ) . unwrap_or ( None ) )
2018-03-03 18:42:13 +01:00
}
}
impl TransactionInfo for Client {
fn transaction_block ( & self , id : TransactionId ) -> Option < H256 > {
self . transaction_address ( id ) . map ( | addr | addr . block_hash )
}
}
impl BlockChainTrait for Client { }
impl RegistryInfo for Client {
fn registry_address ( & self , name : String , block : BlockId ) -> Option < Address > {
2018-09-13 11:04:39 +02:00
use ethabi ::FunctionOutputDecoder ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let address = self . registrar_address ? ;
2020-08-05 06:08:03 +02:00
2018-09-13 11:04:39 +02:00
let ( data , decoder ) = registry ::functions ::get_address ::call ( keccak ( name . as_bytes ( ) ) , " A " ) ;
let value = decoder
. decode ( & self . call_contract ( block , address , data ) . ok ( ) ? )
. ok ( ) ? ;
if value . is_zero ( ) {
None
} else {
Some ( value )
}
2018-03-03 18:42:13 +01:00
}
}
impl CallContract for Client {
fn call_contract (
& self ,
block_id : BlockId ,
address : Address ,
data : Bytes ,
) -> Result < Bytes , String > {
let state_pruned = | | CallError ::StatePruned . to_string ( ) ;
let state = & mut self . state_at ( block_id ) . ok_or_else ( & state_pruned ) ? ;
2018-04-03 10:01:28 +02:00
let header = self
. block_header_decoded ( block_id )
. ok_or_else ( & state_pruned ) ? ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let transaction = self . contract_call_tx ( block_id , address , data ) ;
2020-08-05 06:08:03 +02:00
2018-04-03 10:01:28 +02:00
self . call ( & transaction , Default ::default ( ) , state , & header )
2018-03-03 18:42:13 +01:00
. map_err ( | e | format! ( " {:?} " , e ) )
. map ( | executed | executed . output )
}
}
impl ImportBlock for Client {
2020-12-02 11:31:11 +01:00
// t_nb 2.0 import block to client
2018-09-24 12:28:54 +02:00
fn import_block ( & self , unverified : Unverified ) -> EthcoreResult < H256 > {
2020-12-02 11:31:11 +01:00
// t_nb 2.1 check if header hash is known to us.
2018-08-02 11:20:46 +02:00
if self . chain . read ( ) . is_known ( & unverified . hash ( ) ) {
2018-09-24 12:28:54 +02:00
bail! ( EthcoreErrorKind ::Import ( ImportErrorKind ::AlreadyInChain ) ) ;
2018-08-02 11:20:46 +02:00
}
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 2.2 check if parent is known
2018-08-02 11:20:46 +02:00
let status = self . block_status ( BlockId ::Hash ( unverified . parent_hash ( ) ) ) ;
2018-08-31 13:13:01 +02:00
if status = = BlockStatus ::Unknown {
2018-09-24 12:28:54 +02:00
bail! ( EthcoreErrorKind ::Block ( BlockError ::UnknownParent (
unverified . parent_hash ( )
) ) ) ;
2018-03-03 18:42:13 +01:00
}
2020-08-05 06:08:03 +02:00
2018-11-28 11:30:05 +01:00
let raw = if self . importer . block_queue . is_empty ( ) {
Some ( (
unverified . bytes . clone ( ) ,
unverified . header . hash ( ) ,
* unverified . header . difficulty ( ) ,
) )
} else {
None
} ;
2020-08-05 06:08:03 +02:00
2020-12-02 11:31:11 +01:00
// t_nb 2.3
2018-09-24 12:28:54 +02:00
match self . importer . block_queue . import ( unverified ) {
2018-11-28 11:30:05 +01:00
Ok ( hash ) = > {
2020-12-02 11:31:11 +01:00
// t_nb 2.4 If block is okay and the queue is empty we propagate the block in a `PriorityTask` to be rebrodcasted
2018-11-28 11:30:05 +01:00
if let Some ( ( raw , hash , difficulty ) ) = raw {
self . notify ( move | n | n . block_pre_import ( & raw , & hash , & difficulty ) ) ;
}
Ok ( hash )
}
2020-12-02 11:31:11 +01:00
// t_nb 2.5 if block is not okay print error. we only care about block errors (not import errors)
2020-09-04 17:01:08 +02:00
Err ( ( Some ( block ) , EthcoreError ( EthcoreErrorKind ::Block ( err ) , _ ) ) ) = > {
2018-10-03 19:44:58 +02:00
self . importer
. bad_blocks
2020-09-04 17:01:08 +02:00
. report ( block . bytes , err . to_string ( ) ) ;
bail! ( EthcoreErrorKind ::Block ( err ) )
}
Err ( ( None , EthcoreError ( EthcoreErrorKind ::Block ( err ) , _ ) ) ) = > {
error! ( target : " client " , " BlockError {} detected but it was missing raw_bytes of the block " , err ) ;
2018-09-24 12:28:54 +02:00
bail! ( EthcoreErrorKind ::Block ( err ) )
2018-09-08 04:04:28 +02:00
}
2018-10-03 19:44:58 +02:00
Err ( ( _ , e ) ) = > Err ( e ) ,
2018-09-08 04:04:28 +02:00
}
2018-03-03 18:42:13 +01:00
}
2016-03-08 15:46:44 +01:00
}
2016-02-29 14:57:41 +01:00
2018-03-03 18:42:13 +01:00
impl StateClient for Client {
type State = State < ::state_db ::StateDB > ;
2016-09-07 19:38:59 +02:00
2019-11-11 21:57:38 +01:00
fn latest_state_and_header ( & self ) -> ( Self ::State , Header ) {
Client ::latest_state_and_header ( self )
2018-03-03 18:42:13 +01:00
}
2016-09-06 15:31:13 +02:00
2018-03-03 18:42:13 +01:00
fn state_at ( & self , id : BlockId ) -> Option < Self ::State > {
Client ::state_at ( self , id )
2016-09-06 15:31:13 +02:00
}
}
2018-03-03 18:42:13 +01:00
impl Call for Client {
type State = State < ::state_db ::StateDB > ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
fn call (
& self ,
transaction : & SignedTransaction ,
analytics : CallAnalytics ,
state : & mut Self ::State ,
header : & Header ,
) -> Result < Executed , CallError > {
let env_info = EnvInfo {
number : header . number ( ) ,
author : header . author ( ) . clone ( ) ,
timestamp : header . timestamp ( ) ,
difficulty : header . difficulty ( ) . clone ( ) ,
last_hashes : self . build_last_hashes ( header . parent_hash ( ) ) ,
gas_used : U256 ::default ( ) ,
gas_limit : U256 ::max_value ( ) ,
} ;
2018-01-10 11:34:34 +01:00
let machine = self . engine . machine ( ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
Self ::do_virtual_call ( & machine , & env_info , state , transaction , analytics )
2017-08-04 15:58:14 +02:00
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
fn call_many (
& self ,
transactions : & [ ( SignedTransaction , CallAnalytics ) ] ,
state : & mut Self ::State ,
header : & Header ,
) -> Result < Vec < Executed > , CallError > {
let mut env_info = EnvInfo {
number : header . number ( ) ,
author : header . author ( ) . clone ( ) ,
timestamp : header . timestamp ( ) ,
difficulty : header . difficulty ( ) . clone ( ) ,
last_hashes : self . build_last_hashes ( header . parent_hash ( ) ) ,
gas_used : U256 ::default ( ) ,
gas_limit : U256 ::max_value ( ) ,
} ;
2020-08-05 06:08:03 +02:00
2017-08-04 15:58:14 +02:00
let mut results = Vec ::with_capacity ( transactions . len ( ) ) ;
2018-01-10 11:34:34 +01:00
let machine = self . engine . machine ( ) ;
2020-08-05 06:08:03 +02:00
2017-08-04 15:58:14 +02:00
for & ( ref t , analytics ) in transactions {
2018-03-03 18:42:13 +01:00
let ret = Self ::do_virtual_call ( machine , & env_info , state , t , analytics ) ? ;
2017-08-04 15:58:14 +02:00
env_info . gas_used = ret . cumulative_gas_used ;
results . push ( ret ) ;
2017-02-26 13:10:50 +01:00
}
2020-08-05 06:08:03 +02:00
2017-08-04 15:58:14 +02:00
Ok ( results )
2016-03-19 21:37:11 +01:00
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
fn estimate_gas (
& self ,
t : & SignedTransaction ,
state : & Self ::State ,
header : & Header ,
) -> Result < U256 , CallError > {
2018-04-27 15:04:27 +02:00
let ( mut upper , max_upper , env_info ) = {
2018-03-03 18:42:13 +01:00
let init = * header . gas_limit ( ) ;
2017-12-01 12:12:11 +01:00
let max = init * U256 ::from ( 10 ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let env_info = EnvInfo {
number : header . number ( ) ,
author : header . author ( ) . clone ( ) ,
timestamp : header . timestamp ( ) ,
difficulty : header . difficulty ( ) . clone ( ) ,
last_hashes : self . build_last_hashes ( header . parent_hash ( ) ) ,
gas_used : U256 ::default ( ) ,
gas_limit : max ,
} ;
2020-08-05 06:08:03 +02:00
2017-12-01 12:12:11 +01:00
( init , max , env_info )
2017-01-10 18:56:41 +01:00
} ;
2020-08-05 06:08:03 +02:00
2017-01-13 09:51:36 +01:00
let sender = t . sender ( ) ;
2017-11-07 09:21:30 +01:00
let options = | | TransactOptions ::with_tracing ( ) . dont_check_nonce ( ) ;
2020-08-05 06:08:03 +02:00
2018-09-25 13:35:07 +02:00
let exec = | gas | {
2017-01-13 09:51:36 +01:00
let mut tx = t . as_unsigned ( ) . clone ( ) ;
2020-12-10 16:42:05 +01:00
tx . tx_mut ( ) . gas = gas ;
2017-01-13 09:51:36 +01:00
let tx = tx . fake_sign ( sender ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
let mut clone = state . clone ( ) ;
2018-07-23 15:48:01 +02:00
let machine = self . engine . machine ( ) ;
let schedule = machine . schedule ( env_info . number ) ;
2018-09-25 13:35:07 +02:00
Executive ::new ( & mut clone , & env_info , & machine , & schedule )
2017-08-28 14:25:16 +02:00
. transact_virtual ( & tx , options ( ) )
2017-01-10 18:56:41 +01:00
} ;
2020-08-05 06:08:03 +02:00
2019-03-27 08:01:05 +01:00
let cond = | gas | exec ( gas ) . ok ( ) . map_or ( false , | r | r . exception . is_none ( ) ) ;
2020-08-05 06:08:03 +02:00
2018-09-25 13:35:07 +02:00
if ! cond ( upper ) {
2017-12-01 12:12:11 +01:00
upper = max_upper ;
2018-09-25 13:35:07 +02:00
match exec ( upper ) {
2019-03-27 08:01:05 +01:00
Ok ( v ) = > {
if let Some ( exception ) = v . exception {
return Err ( CallError ::Exceptional ( exception ) ) ;
}
}
Err ( _e ) = > {
2018-09-25 13:35:07 +02:00
trace! ( target : " estimate_gas " , " estimate_gas failed with {} " , upper ) ;
let err = ExecutionError ::Internal ( format! (
" Requires higher than upper limit of {} " ,
upper
) ) ;
return Err ( err . into ( ) ) ;
2017-01-10 18:56:41 +01:00
}
}
2020-08-05 06:08:03 +02:00
}
2017-04-19 14:30:00 +02:00
let lower = t
2020-12-10 16:42:05 +01:00
. tx ( )
2017-04-19 14:30:00 +02:00
. gas_required ( & self . engine . schedule ( env_info . number ) )
. into ( ) ;
2018-09-25 13:35:07 +02:00
if cond ( lower ) {
2017-01-10 18:56:41 +01:00
trace! ( target : " estimate_gas " , " estimate_gas succeeded with {} " , lower ) ;
return Ok ( lower ) ;
}
2020-08-05 06:08:03 +02:00
2017-01-10 18:56:41 +01:00
/// Find transition point between `lower` and `upper` where `cond` changes from `false` to `true`.
/// Returns the lowest value between `lower` and `upper` for which `cond` returns true.
/// We assert: `cond(lower) = false`, `cond(upper) = true`
2017-02-26 13:10:50 +01:00
fn binary_chop < F , E > ( mut lower : U256 , mut upper : U256 , mut cond : F ) -> Result < U256 , E >
2018-09-25 13:35:07 +02:00
where
F : FnMut ( U256 ) -> bool ,
2017-02-26 13:10:50 +01:00
{
2017-01-10 18:56:41 +01:00
while upper - lower > 1. into ( ) {
2018-09-04 20:13:51 +02:00
let mid = ( lower + upper ) / 2 ;
2017-01-10 18:56:41 +01:00
trace! ( target : " estimate_gas " , " {} .. {} .. {} " , lower , mid , upper ) ;
2018-09-25 13:35:07 +02:00
let c = cond ( mid ) ;
2017-01-10 18:56:41 +01:00
match c {
true = > upper = mid ,
false = > lower = mid ,
} ;
trace! ( target : " estimate_gas " , " {} => {} .. {} " , c , lower , upper ) ;
}
2017-02-26 13:10:50 +01:00
Ok ( upper )
2017-01-10 18:56:41 +01:00
}
2020-08-05 06:08:03 +02:00
2017-01-10 18:56:41 +01:00
// binary chop to non-excepting call with gas somewhere between 21000 and block gas limit
trace! ( target : " estimate_gas " , " estimate_gas chopping {} .. {} " , lower , upper ) ;
2017-02-26 13:10:50 +01:00
binary_chop ( lower , upper , cond )
2017-01-10 18:56:41 +01:00
}
2018-03-03 18:42:13 +01:00
}
impl EngineInfo for Client {
2020-07-29 10:36:15 +02:00
fn engine ( & self ) -> & dyn EthEngine {
2018-03-03 18:42:13 +01:00
Client ::engine ( self )
}
}
2017-01-10 18:56:41 +01:00
2018-09-08 04:04:28 +02:00
impl BadBlocks for Client {
fn bad_blocks ( & self ) -> Vec < ( Unverified , String ) > {
self . importer . bad_blocks . bad_blocks ( )
}
}
2018-03-03 18:42:13 +01:00
impl BlockChainClient for Client {
2016-12-09 23:01:43 +01:00
fn replay ( & self , id : TransactionId , analytics : CallAnalytics ) -> Result < Executed , CallError > {
2016-12-27 12:53:56 +01:00
let address = self
. transaction_address ( id )
. ok_or ( CallError ::TransactionNotFound ) ? ;
2018-01-10 11:34:34 +01:00
let block = BlockId ::Hash ( address . block_hash ) ;
2020-08-05 06:08:03 +02:00
2018-01-10 11:34:34 +01:00
const PROOF : & 'static str =
" The transaction address contains a valid index within block; qed " ;
2018-07-10 16:38:13 +02:00
Ok ( self
. replay_block_transactions ( block , analytics ) ?
. nth ( address . index )
. expect ( PROOF )
. 1 )
2018-01-10 11:34:34 +01:00
}
2020-08-05 06:08:03 +02:00
2018-07-10 16:38:13 +02:00
fn replay_block_transactions (
& self ,
block : BlockId ,
analytics : CallAnalytics ,
2020-07-29 10:36:15 +02:00
) -> Result < Box < dyn Iterator < Item = ( H256 , Executed ) > > , CallError > {
2018-01-10 11:34:34 +01:00
let mut env_info = self . env_info ( block ) . ok_or ( CallError ::StatePruned ) ? ;
let body = self . block_body ( block ) . ok_or ( CallError ::StatePruned ) ? ;
let mut state = self
. state_at_beginning ( block )
. ok_or ( CallError ::StatePruned ) ? ;
let txs = body . transactions ( ) ;
let engine = self . engine . clone ( ) ;
2020-08-05 06:08:03 +02:00
2018-01-10 11:34:34 +01:00
const PROOF : & 'static str =
" Transactions fetched from blockchain; blockchain transactions are valid; qed " ;
const EXECUTE_PROOF : & 'static str = " Transaction replayed; qed " ;
2020-08-05 06:08:03 +02:00
2018-01-10 11:34:34 +01:00
Ok ( Box ::new ( txs . into_iter ( ) . map ( move | t | {
2018-07-10 16:38:13 +02:00
let transaction_hash = t . hash ( ) ;
2018-01-10 11:34:34 +01:00
let t = SignedTransaction ::new ( t ) . expect ( PROOF ) ;
let machine = engine . machine ( ) ;
let x = Self ::do_virtual_call ( machine , & env_info , & mut state , & t , analytics )
. expect ( EXECUTE_PROOF ) ;
env_info . gas_used = env_info . gas_used + x . gas_used ;
2018-07-10 16:38:13 +02:00
( transaction_hash , x )
2018-01-10 11:34:34 +01:00
} ) ) )
2016-07-27 21:34:32 +02:00
}
2020-08-05 06:08:03 +02:00
2018-06-06 14:14:45 +02:00
fn mode ( & self ) -> Mode {
2016-11-05 10:38:00 +01:00
let r = self . mode . lock ( ) . clone ( ) . into ( ) ;
trace! ( target : " mode " , " Asked for mode = {:?}. returning {:?} " , & * self . mode . lock ( ) , r ) ;
r
}
2020-08-05 06:08:03 +02:00
2016-12-11 16:52:41 +01:00
fn disable ( & self ) {
2018-06-06 14:14:45 +02:00
self . set_mode ( Mode ::Off ) ;
2016-12-11 16:52:41 +01:00
self . enabled . store ( false , AtomicOrdering ::Relaxed ) ;
2016-12-15 22:05:54 +01:00
self . clear_queue ( ) ;
2016-12-11 16:52:41 +01:00
}
2020-08-05 06:08:03 +02:00
2018-06-06 14:14:45 +02:00
fn set_mode ( & self , new_mode : Mode ) {
2016-11-05 10:38:00 +01:00
trace! ( target : " mode " , " Client::set_mode({:?}) " , new_mode ) ;
2016-12-11 16:52:41 +01:00
if ! self . enabled . load ( AtomicOrdering ::Relaxed ) {
return ;
}
2016-11-05 10:38:00 +01:00
{
let mut mode = self . mode . lock ( ) ;
* mode = new_mode . clone ( ) . into ( ) ;
trace! ( target : " mode " , " Mode now {:?} " , & * mode ) ;
2017-03-13 12:10:53 +01:00
if let Some ( ref mut f ) = * self . on_user_defaults_change . lock ( ) {
2016-11-17 13:48:25 +01:00
trace! ( target : " mode " , " Making callback... " ) ;
2017-03-13 12:10:53 +01:00
f ( Some ( ( & * mode ) . clone ( ) ) )
2016-11-05 10:38:00 +01:00
}
2020-08-05 06:08:03 +02:00
}
2016-11-05 10:38:00 +01:00
match new_mode {
2018-06-06 14:14:45 +02:00
Mode ::Active = > self . wake_up ( ) ,
2019-08-12 18:55:11 +02:00
Mode ::Off = > self . sleep ( true ) ,
2016-10-31 16:58:35 +01:00
_ = > {
( * self . sleep_state . lock ( ) ) . last_activity = Some ( Instant ::now ( ) ) ;
}
2020-08-05 06:08:03 +02:00
}
}
2017-03-13 12:10:53 +01:00
fn spec_name ( & self ) -> String {
self . config . spec_name . clone ( )
}
2020-08-05 06:08:03 +02:00
2019-03-04 20:24:53 +01:00
fn set_spec_name ( & self , new_spec_name : String ) -> Result < ( ) , ( ) > {
2017-03-13 12:10:53 +01:00
trace! ( target : " mode " , " Client::set_spec_name({:?}) " , new_spec_name ) ;
if ! self . enabled . load ( AtomicOrdering ::Relaxed ) {
2019-03-04 20:24:53 +01:00
return Err ( ( ) ) ;
2017-03-13 12:10:53 +01:00
}
if let Some ( ref h ) = * self . exit_handler . lock ( ) {
2018-04-04 11:50:28 +02:00
( * h ) ( new_spec_name ) ;
2019-03-04 20:24:53 +01:00
Ok ( ( ) )
2017-03-13 12:10:53 +01:00
} else {
warn! ( " Not hypervised; cannot change chain. " ) ;
2019-03-04 20:24:53 +01:00
Err ( ( ) )
2017-03-13 12:10:53 +01:00
}
2020-08-05 06:08:03 +02:00
}
2016-12-11 02:02:40 +01:00
fn block_number ( & self , id : BlockId ) -> Option < BlockNumber > {
2017-09-10 18:03:35 +02:00
self . block_number_ref ( & id )
2016-12-11 02:02:40 +01:00
}
2020-08-05 06:08:03 +02:00
2016-12-28 13:44:51 +01:00
fn block_body ( & self , id : BlockId ) -> Option < encoded ::Body > {
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
Self ::block_hash ( & chain , id ) . and_then ( | hash | chain . block_body ( & hash ) )
2016-01-07 21:35:06 +01:00
}
2020-08-05 06:08:03 +02:00
2016-12-09 23:01:43 +01:00
fn block_status ( & self , id : BlockId ) -> BlockStatus {
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
2018-03-03 18:42:13 +01:00
match Self ::block_hash ( & chain , id ) {
2016-09-06 15:31:13 +02:00
Some ( ref hash ) if chain . is_known ( hash ) = > BlockStatus ::InChain ,
2018-03-03 18:42:13 +01:00
Some ( hash ) = > self . importer . block_queue . status ( & hash ) . into ( ) ,
2016-02-10 19:29:27 +01:00
None = > BlockStatus ::Unknown ,
2016-01-07 21:35:06 +01:00
}
2020-08-05 06:08:03 +02:00
}
2020-09-05 19:45:31 +02:00
fn is_processing_fork ( & self ) -> bool {
let chain = self . chain . read ( ) ;
self . importer
. block_queue
. is_processing_fork ( & chain . best_block_hash ( ) , & chain )
}
2016-12-09 23:01:43 +01:00
fn block_total_difficulty ( & self , id : BlockId ) -> Option < U256 > {
2017-08-04 15:58:14 +02:00
let chain = self . chain . read ( ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
Self ::block_hash ( & chain , id )
. and_then ( | hash | chain . block_details ( & hash ) )
. map ( | d | d . total_difficulty )
2016-03-05 16:46:04 +01:00
}
2020-08-05 06:08:03 +02:00
2016-12-09 23:01:43 +01:00
fn storage_root ( & self , address : & Address , id : BlockId ) -> Option < H256 > {
2017-02-26 13:10:50 +01:00
self . state_at ( id )
. and_then ( | s | s . storage_root ( address ) . ok ( ) )
. and_then ( | x | x )
2016-11-27 11:11:56 +01:00
}
2020-08-05 06:08:03 +02:00
2016-12-09 23:01:43 +01:00
fn block_hash ( & self , id : BlockId ) -> Option < H256 > {
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
2018-03-03 18:42:13 +01:00
Self ::block_hash ( & chain , id )
2016-02-08 10:58:08 +01:00
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
fn code ( & self , address : & Address , state : StateOrBlock ) -> Option < Option < Bytes > > {
let result = match state {
StateOrBlock ::State ( s ) = > s . code ( address ) . ok ( ) ,
StateOrBlock ::Block ( id ) = > self . state_at ( id ) . and_then ( | s | s . code ( address ) . ok ( ) ) ,
} ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
// Converting from `Option<Option<Arc<Bytes>>>` to `Option<Option<Bytes>>`
result . map ( | c | c . map ( | c | ( & * c ) . clone ( ) ) )
2016-03-13 12:09:30 +01:00
}
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
fn storage_at ( & self , address : & Address , position : & H256 , state : StateOrBlock ) -> Option < H256 > {
match state {
StateOrBlock ::State ( s ) = > s . storage_at ( address , position ) . ok ( ) ,
StateOrBlock ::Block ( id ) = > self
. state_at ( id )
. and_then ( | s | s . storage_at ( address , position ) . ok ( ) ) ,
2016-05-25 17:54:20 +02:00
}
2020-08-05 06:08:03 +02:00
}
2016-12-09 23:01:43 +01:00
fn list_accounts (
& self ,
id : BlockId ,
after : Option < & Address > ,
count : u64 ,
) -> Option < Vec < Address > > {
2016-10-03 11:13:10 +02:00
if ! self . factories . trie . is_fat ( ) {
trace! ( target : " fatdb " , " list_accounts: Not a fat DB " ) ;
return None ;
}
2020-08-05 06:08:03 +02:00
2016-10-03 11:13:10 +02:00
let state = match self . state_at ( id ) {
Some ( state ) = > state ,
_ = > return None ,
} ;
2020-08-05 06:08:03 +02:00
2016-10-03 11:13:10 +02:00
let ( root , db ) = state . drop ( ) ;
2019-02-20 19:09:34 +01:00
let db = & db . as_hash_db ( ) ;
let trie = match self . factories . trie . readonly ( db , & root ) {
2016-10-03 11:13:10 +02:00
Ok ( trie ) = > trie ,
_ = > {
trace! ( target : " fatdb " , " list_accounts: Couldn't open the DB " ) ;
return None ;
}
} ;
2020-08-05 06:08:03 +02:00
2016-11-27 11:11:56 +01:00
let mut iter = match trie . iter ( ) {
2016-10-03 11:13:10 +02:00
Ok ( iter ) = > iter ,
_ = > return None ,
} ;
2020-08-05 06:08:03 +02:00
2016-11-27 11:11:56 +01:00
if let Some ( after ) = after {
if let Err ( e ) = iter . seek ( after ) {
trace! ( target : " fatdb " , " list_accounts: Couldn't seek the DB: {:?} " , e ) ;
2018-08-01 18:03:41 +02:00
} else {
// Position the iterator after the `after` element
iter . next ( ) ;
2016-11-27 11:11:56 +01:00
}
2020-08-05 06:08:03 +02:00
}
2016-10-03 11:13:10 +02:00
let accounts = iter
. filter_map ( | item | item . ok ( ) . map ( | ( addr , _ ) | Address ::from_slice ( & addr ) ) )
2016-11-27 11:11:56 +01:00
. take ( count as usize )
. collect ( ) ;
2020-08-05 06:08:03 +02:00
2016-10-03 11:13:10 +02:00
Some ( accounts )
}
2020-08-05 06:08:03 +02:00
2016-12-09 23:01:43 +01:00
fn list_storage (
& self ,
id : BlockId ,
account : & Address ,
after : Option < & H256 > ,
count : u64 ,
) -> Option < Vec < H256 > > {
2016-11-27 11:11:56 +01:00
if ! self . factories . trie . is_fat ( ) {
2018-07-02 18:50:05 +02:00
trace! ( target : " fatdb " , " list_storage: Not a fat DB " ) ;
2016-11-27 11:11:56 +01:00
return None ;
}
2020-08-05 06:08:03 +02:00
2016-11-27 11:11:56 +01:00
let state = match self . state_at ( id ) {
Some ( state ) = > state ,
_ = > return None ,
} ;
2020-08-05 06:08:03 +02:00
2016-11-27 11:11:56 +01:00
let root = match state . storage_root ( account ) {
2017-02-26 13:10:50 +01:00
Ok ( Some ( root ) ) = > root ,
2016-11-27 11:11:56 +01:00
_ = > return None ,
} ;
2020-08-05 06:08:03 +02:00
2016-11-27 11:11:56 +01:00
let ( _ , db ) = state . drop ( ) ;
2019-02-20 19:09:34 +01:00
let account_db = & self
. factories
. accountdb
. readonly ( db . as_hash_db ( ) , keccak ( account ) ) ;
let account_db = & account_db . as_hash_db ( ) ;
let trie = match self . factories . trie . readonly ( account_db , & root ) {
2016-11-27 11:11:56 +01:00
Ok ( trie ) = > trie ,
_ = > {
trace! ( target : " fatdb " , " list_storage: Couldn't open the DB " ) ;
return None ;
}
} ;
2020-08-05 06:08:03 +02:00
2016-11-27 11:11:56 +01:00
let mut iter = match trie . iter ( ) {
Ok ( iter ) = > iter ,
_ = > return None ,
} ;
2020-08-05 06:08:03 +02:00
2016-11-27 11:11:56 +01:00
if let Some ( after ) = after {
if let Err ( e ) = iter . seek ( after ) {
2018-08-01 18:03:41 +02:00
trace! ( target : " fatdb " , " list_storage: Couldn't seek the DB: {:?} " , e ) ;
} else {
// Position the iterator after the `after` element
iter . next ( ) ;
2016-11-27 11:11:56 +01:00
}
2020-08-05 06:08:03 +02:00
}
2016-11-27 11:11:56 +01:00
let keys = iter
. filter_map ( | item | item . ok ( ) . map ( | ( key , _ ) | H256 ::from_slice ( & key ) ) )
. take ( count as usize )
. collect ( ) ;
2020-08-05 06:08:03 +02:00
2016-11-27 11:11:56 +01:00
Some ( keys )
}
2020-08-05 06:08:03 +02:00
2016-12-09 23:01:43 +01:00
fn transaction ( & self , id : TransactionId ) -> Option < LocalizedTransaction > {
2016-09-06 15:31:13 +02:00
self . transaction_address ( id )
. and_then ( | address | self . chain . read ( ) . transaction ( & address ) )
2016-03-20 17:29:39 +01:00
}
2020-08-05 06:08:03 +02:00
2016-12-28 13:44:51 +01:00
fn uncle ( & self , id : UncleId ) -> Option < encoded ::Header > {
2016-07-01 12:26:44 +02:00
let index = id . position ;
2016-12-28 13:44:51 +01:00
self . block_body ( id . block )
. and_then ( | body | body . view ( ) . uncle_rlp_at ( index ) )
. map ( encoded ::Header ::new )
2016-03-22 16:07:42 +01:00
}
2020-08-05 06:08:03 +02:00
2016-12-09 23:01:43 +01:00
fn transaction_receipt ( & self , id : TransactionId ) -> Option < LocalizedReceipt > {
2018-09-25 19:06:14 +02:00
// NOTE Don't use block_receipts here for performance reasons
let address = self . transaction_address ( id ) ? ;
let hash = address . block_hash ;
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
2018-09-25 19:06:14 +02:00
let number = chain . block_number ( & hash ) ? ;
let body = chain . block_body ( & hash ) ? ;
let mut receipts = chain . block_receipts ( & hash ) ? . receipts ;
receipts . truncate ( address . index + 1 ) ;
2020-08-05 06:08:03 +02:00
2018-09-25 19:06:14 +02:00
let transaction = body
. view ( )
. localized_transaction_at ( & hash , number , address . index ) ? ;
let receipt = receipts . pop ( ) ? ;
let gas_used = receipts . last ( ) . map_or_else ( | | 0. into ( ) , | r | r . gas_used ) ;
let no_of_logs = receipts
. into_iter ( )
. map ( | receipt | receipt . logs . len ( ) )
. sum ::< usize > ( ) ;
2020-08-05 06:08:03 +02:00
2018-09-25 19:06:14 +02:00
let receipt = transaction_receipt (
self . engine ( ) . machine ( ) ,
transaction ,
2018-11-18 00:06:34 +01:00
receipt ,
2018-09-25 19:06:14 +02:00
gas_used ,
no_of_logs ,
) ;
Some ( receipt )
2020-08-05 06:08:03 +02:00
}
2018-09-25 19:06:14 +02:00
fn localized_block_receipts ( & self , id : BlockId ) -> Option < Vec < LocalizedReceipt > > {
let hash = self . block_hash ( id ) ? ;
2020-08-05 06:08:03 +02:00
2018-09-25 19:06:14 +02:00
let chain = self . chain . read ( ) ;
let receipts = chain . block_receipts ( & hash ) ? ;
2016-09-06 15:31:13 +02:00
let number = chain . block_number ( & hash ) ? ;
let body = chain . block_body ( & hash ) ? ;
let engine = self . engine . clone ( ) ;
2020-08-05 06:08:03 +02:00
2016-09-06 15:31:13 +02:00
let mut gas_used = 0. into ( ) ;
2017-04-20 16:21:53 +02:00
let mut no_of_logs = 0 ;
2020-08-05 06:08:03 +02:00
2017-04-20 16:21:53 +02:00
Some (
body . view ( )
. localized_transactions ( & hash , number )
. into_iter ( )
2018-09-25 19:06:14 +02:00
. zip ( receipts . receipts )
2017-04-20 16:21:53 +02:00
. map ( move | ( transaction , receipt ) | {
let result = transaction_receipt (
engine . machine ( ) ,
2016-02-27 01:37:12 +01:00
transaction ,
2020-08-05 06:08:03 +02:00
receipt ,
2016-02-27 01:37:12 +01:00
gas_used ,
no_of_logs ,
2020-08-05 06:08:03 +02:00
) ;
2016-05-24 21:56:17 +02:00
gas_used = result . cumulative_gas_used ;
2018-09-25 19:06:14 +02:00
no_of_logs + = result . logs . len ( ) ;
2020-08-05 06:08:03 +02:00
result
} )
2016-05-24 21:56:17 +02:00
. collect ( ) ,
2020-08-05 06:08:03 +02:00
)
}
2016-05-24 21:56:17 +02:00
fn tree_route ( & self , from : & H256 , to : & H256 ) -> Option < TreeRoute > {
2019-03-14 21:34:26 +01:00
let chain = self . chain . read ( ) ;
match chain . is_known ( from ) & & chain . is_known ( to ) {
true = > chain . tree_route ( from . clone ( ) , to . clone ( ) ) ,
2017-09-26 14:19:08 +02:00
false = > None ,
2016-05-24 21:56:17 +02:00
}
2020-08-05 06:08:03 +02:00
}
2016-03-11 20:09:14 +01:00
fn find_uncles ( & self , hash : & H256 ) -> Option < Vec < H256 > > {
2018-01-02 09:43:08 +01:00
self . chain . read ( ) . find_uncle_hashes ( hash , MAX_UNCLE_AGE )
2016-01-07 21:35:06 +01:00
}
2020-08-05 06:08:03 +02:00
2016-01-22 04:54:38 +01:00
fn block_receipts ( & self , hash : & H256 ) -> Option < BlockReceipts > {
2018-03-03 18:42:13 +01:00
self . chain . read ( ) . block_receipts ( hash )
2016-01-07 21:35:06 +01:00
}
2020-08-05 06:08:03 +02:00
2018-11-28 11:30:05 +01:00
fn queue_info ( & self ) -> BlockQueueInfo {
self . importer . block_queue . queue_info ( )
}
2020-08-05 06:08:03 +02:00
2016-01-21 23:33:52 +01:00
fn is_queue_empty ( & self ) -> bool {
2018-03-03 18:42:13 +01:00
self . importer . block_queue . is_empty ( )
2016-01-07 21:35:06 +01:00
}
2020-08-05 06:08:03 +02:00
2016-08-17 19:25:02 +02:00
fn clear_queue ( & self ) {
self . importer . block_queue . clear ( ) ;
2020-08-05 06:08:03 +02:00
}
2016-08-17 19:25:02 +02:00
fn additional_params ( & self ) -> BTreeMap < String , String > {
2018-08-13 09:47:10 +02:00
self . engine . additional_params ( ) . into_iter ( ) . collect ( )
2020-08-05 06:08:03 +02:00
}
2018-08-13 09:47:10 +02:00
fn logs ( & self , filter : Filter ) -> Result < Vec < LocalizedLogEntry > , BlockId > {
2018-03-03 18:42:13 +01:00
let chain = self . chain . read ( ) ;
2020-08-05 06:08:03 +02:00
2018-08-13 09:47:10 +02:00
// First, check whether `filter.from_block` and `filter.to_block` is on the canon chain. If so, we can use the
// optimized version.
let is_canon = | id | {
match id {
// If it is referred by number, then it is always on the canon chain.
& BlockId ::Earliest | & BlockId ::Latest | & BlockId ::Number ( _ ) = > true ,
// If it is referred by hash, we see whether a hash -> number -> hash conversion gives us the same
// result.
& BlockId ::Hash ( ref hash ) = > chain . is_canon ( hash ) ,
}
} ;
2020-08-05 06:08:03 +02:00
2018-08-13 09:47:10 +02:00
let blocks = if is_canon ( & filter . from_block ) & & is_canon ( & filter . to_block ) {
// If we are on the canon chain, use bloom filter to fetch required hashes.
//
// If we are sure the block does not exist (where val > best_block_number), then return error. Note that we
// don't need to care about pending blocks here because RPC query sets pending back to latest (or handled
// pending logs themselves).
let from = match self . block_number_ref ( & filter . from_block ) {
Some ( val ) if val < = chain . best_block_number ( ) = > val ,
_ = > return Err ( filter . from_block . clone ( ) ) ,
} ;
let to = match self . block_number_ref ( & filter . to_block ) {
Some ( val ) if val < = chain . best_block_number ( ) = > val ,
_ = > return Err ( filter . to_block . clone ( ) ) ,
} ;
2020-08-05 06:08:03 +02:00
2018-08-13 09:47:10 +02:00
// If from is greater than to, then the current bloom filter behavior is to just return empty
// result. There's no point to continue here.
if from > to {
return Err ( filter . to_block . clone ( ) ) ;
}
2020-08-05 06:08:03 +02:00
2018-08-13 09:47:10 +02:00
chain
. blocks_with_bloom ( & filter . bloom_possibilities ( ) , from , to )
. into_iter ( )
. filter_map ( | n | chain . block_hash ( n ) )
. collect ::< Vec < H256 > > ( )
} else {
// Otherwise, we use a slower version that finds a link between from_block and to_block.
let from_hash = match Self ::block_hash ( & chain , filter . from_block ) {
Some ( val ) = > val ,
None = > return Err ( filter . from_block . clone ( ) ) ,
} ;
let from_number = match chain . block_number ( & from_hash ) {
Some ( val ) = > val ,
None = > return Err ( BlockId ::Hash ( from_hash ) ) ,
} ;
let to_hash = match Self ::block_hash ( & chain , filter . to_block ) {
Some ( val ) = > val ,
None = > return Err ( filter . to_block . clone ( ) ) ,
} ;
2020-08-05 06:08:03 +02:00
2018-08-13 09:47:10 +02:00
let blooms = filter . bloom_possibilities ( ) ;
let bloom_match = | header : & encoded ::Header | {
blooms
. iter ( )
. any ( | bloom | header . log_bloom ( ) . contains_bloom ( bloom ) )
} ;
2020-08-05 06:08:03 +02:00
2018-08-13 09:47:10 +02:00
let ( blocks , last_hash ) = {
let mut blocks = Vec ::new ( ) ;
let mut current_hash = to_hash ;
2020-08-05 06:08:03 +02:00
2018-08-13 09:47:10 +02:00
loop {
let header = match chain . block_header_data ( & current_hash ) {
Some ( val ) = > val ,
None = > return Err ( BlockId ::Hash ( current_hash ) ) ,
} ;
if bloom_match ( & header ) {
blocks . push ( current_hash ) ;
}
2020-08-05 06:08:03 +02:00
2018-08-13 09:47:10 +02:00
// Stop if `from` block is reached.
if header . number ( ) < = from_number {
break ;
}
current_hash = header . parent_hash ( ) ;
2018-05-02 09:40:27 +02:00
}
2020-08-05 06:08:03 +02:00
2018-08-13 09:47:10 +02:00
blocks . reverse ( ) ;
( blocks , current_hash )
2018-05-02 09:40:27 +02:00
} ;
2020-08-05 06:08:03 +02:00
2018-08-13 09:47:10 +02:00
// Check if we've actually reached the expected `from` block.
if last_hash ! = from_hash | | blocks . is_empty ( ) {
// In this case, from_hash is the cause (for not matching last_hash).
return Err ( BlockId ::Hash ( from_hash ) ) ;
}
2020-08-05 06:08:03 +02:00
2018-08-13 09:47:10 +02:00
blocks
2018-05-02 09:40:27 +02:00
} ;
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
Ok ( chain . logs ( blocks , | entry | filter . matches ( entry ) , filter . limit ) )
2016-02-17 12:35:37 +01:00
}
2020-08-05 06:08:03 +02:00
2016-05-02 12:17:30 +02:00
fn filter_traces ( & self , filter : TraceFilter ) -> Option < Vec < LocalizedTrace > > {
2018-05-01 14:47:04 +02:00
if ! self . tracedb . read ( ) . tracing_enabled ( ) {
return None ;
}
2020-08-05 06:08:03 +02:00
2018-02-18 23:02:02 +01:00
let start = self . block_number ( filter . range . start ) ? ;
let end = self . block_number ( filter . range . end ) ? ;
2020-08-05 06:08:03 +02:00
2018-02-18 23:02:02 +01:00
let db_filter = trace ::Filter {
range : start as usize .. end as usize ,
from_address : filter . from_address . into ( ) ,
to_address : filter . to_address . into ( ) ,
} ;
2020-08-05 06:08:03 +02:00
2018-02-18 23:02:02 +01:00
let traces = self
. tracedb
. read ( )
. filter ( & db_filter )
. into_iter ( )
. skip ( filter . after . unwrap_or ( 0 ) )
. take ( filter . count . unwrap_or ( usize ::max_value ( ) ) )
. collect ( ) ;
Some ( traces )
2016-05-02 12:17:30 +02:00
}
2020-08-05 06:08:03 +02:00
2016-05-02 12:17:30 +02:00
fn trace ( & self , trace : TraceId ) -> Option < LocalizedTrace > {
2018-05-01 14:47:04 +02:00
if ! self . tracedb . read ( ) . tracing_enabled ( ) {
return None ;
}
2020-08-05 06:08:03 +02:00
2016-05-02 12:17:30 +02:00
let trace_address = trace . address ;
self . transaction_address ( trace . transaction )
. and_then ( | tx_address | {
2016-12-09 23:01:43 +01:00
self . block_number ( BlockId ::Hash ( tx_address . block_hash ) )
2016-09-06 15:31:13 +02:00
. and_then ( | number | {
self . tracedb
. read ( )
. trace ( number , tx_address . index , trace_address )
2016-05-02 12:17:30 +02:00
} )
2020-08-05 06:08:03 +02:00
} )
2016-05-02 12:17:30 +02:00
}
2020-08-05 06:08:03 +02:00
2016-12-09 23:01:43 +01:00
fn transaction_traces ( & self , transaction : TransactionId ) -> Option < Vec < LocalizedTrace > > {
2018-05-01 14:47:04 +02:00
if ! self . tracedb . read ( ) . tracing_enabled ( ) {
return None ;
}
2020-08-05 06:08:03 +02:00
2016-05-02 12:17:30 +02:00
self . transaction_address ( transaction )
. and_then ( | tx_address | {
2016-12-09 23:01:43 +01:00
self . block_number ( BlockId ::Hash ( tx_address . block_hash ) )
2016-09-06 15:31:13 +02:00
. and_then ( | number | {
self . tracedb
. read ( )
. transaction_traces ( number , tx_address . index )
2016-05-02 12:17:30 +02:00
} )
2020-08-05 06:08:03 +02:00
} )
2016-05-02 12:17:30 +02:00
}
2020-08-05 06:08:03 +02:00
2016-12-09 23:01:43 +01:00
fn block_traces ( & self , block : BlockId ) -> Option < Vec < LocalizedTrace > > {
2018-05-01 14:47:04 +02:00
if ! self . tracedb . read ( ) . tracing_enabled ( ) {
return None ;
}
2020-08-05 06:08:03 +02:00
2016-05-02 12:17:30 +02:00
self . block_number ( block )
2016-09-06 15:31:13 +02:00
. and_then ( | number | self . tracedb . read ( ) . block_traces ( number ) )
2016-05-02 12:17:30 +02:00
}
2020-08-05 06:08:03 +02:00
2016-04-28 21:47:44 +02:00
fn last_hashes ( & self ) -> LastHashes {
2018-03-03 18:42:13 +01:00
( * self . build_last_hashes ( & self . chain . read ( ) . best_block_hash ( ) ) ) . clone ( )
2016-04-28 21:47:44 +02:00
}
2020-08-05 06:08:03 +02:00
2018-08-02 12:58:02 +02:00
fn transactions_to_propagate ( & self ) -> Vec < Arc < VerifiedTransaction > > {
const PROPAGATE_FOR_BLOCKS : u32 = 4 ;
const MIN_TX_TO_PROPAGATE : usize = 256 ;
2020-08-05 06:08:03 +02:00
2018-08-02 12:58:02 +02:00
let block_gas_limit = * self . best_block_header ( ) . gas_limit ( ) ;
let min_tx_gas : U256 = self . latest_schedule ( ) . tx_gas . into ( ) ;
2020-08-05 06:08:03 +02:00
2018-08-02 12:58:02 +02:00
let max_len = if min_tx_gas . is_zero ( ) {
usize ::max_value ( )
} else {
cmp ::max (
MIN_TX_TO_PROPAGATE ,
cmp ::min (
( block_gas_limit / min_tx_gas ) * PROPAGATE_FOR_BLOCKS ,
// never more than usize
usize ::max_value ( ) . into ( ) ,
)
. as_u64 ( ) as usize ,
)
} ;
2018-06-12 08:22:54 +02:00
self . importer
. miner
. ready_transactions ( self , max_len , ::miner ::PendingOrdering ::Priority )
2016-05-31 19:52:53 +02:00
}
2020-08-05 06:08:03 +02:00
2017-08-21 13:46:58 +02:00
fn signing_chain_id ( & self ) -> Option < u64 > {
self . engine . signing_chain_id ( & self . latest_env_info ( ) )
2016-11-03 22:22:25 +01:00
}
2020-08-05 06:08:03 +02:00
2016-12-09 23:01:43 +01:00
fn block_extra_info ( & self , id : BlockId ) -> Option < BTreeMap < String , String > > {
2018-04-03 10:01:28 +02:00
self . block_header_decoded ( id )
. map ( | header | self . engine . extra_info ( & header ) )
2016-11-04 17:35:02 +01:00
}
2020-08-05 06:08:03 +02:00
2016-12-09 23:01:43 +01:00
fn uncle_extra_info ( & self , id : UncleId ) -> Option < BTreeMap < String , String > > {
2016-11-04 17:35:02 +01:00
self . uncle ( id )
2018-05-09 12:05:56 +02:00
. and_then ( | h | h . decode ( ) . map ( | dh | self . engine . extra_info ( & dh ) ) . ok ( ) )
2016-11-04 17:35:02 +01:00
}
2020-08-05 06:08:03 +02:00
2016-11-09 23:25:54 +01:00
fn pruning_info ( & self ) -> PruningInfo {
PruningInfo {
2016-11-10 14:05:47 +01:00
earliest_chain : self . chain . read ( ) . first_block_number ( ) . unwrap_or ( 1 ) ,
2018-01-02 09:43:08 +01:00
earliest_state : self
. state_db
. read ( )
. journal_db ( )
. earliest_era ( )
. unwrap_or ( 0 ) ,
2016-11-09 23:25:54 +01:00
}
}
2020-08-05 06:08:03 +02:00
2018-04-13 17:34:27 +02:00
fn transact_contract ( & self , address : Address , data : Bytes ) -> Result < ( ) , transaction ::Error > {
let authoring_params = self . importer . miner . authoring_params ( ) ;
2019-03-31 10:39:38 +02:00
let service_transaction_checker = self . importer . miner . service_transaction_checker ( ) ;
let gas_price = if let Some ( checker ) = service_transaction_checker {
match checker . check_address ( self , authoring_params . author ) {
Ok ( true ) = > U256 ::zero ( ) ,
_ = > self . importer . miner . sensible_gas_price ( ) ,
}
} else {
self . importer . miner . sensible_gas_price ( )
2019-01-28 10:58:34 +01:00
} ;
2020-12-10 16:42:05 +01:00
let transaction = TypedTransaction ::Legacy ( transaction ::Transaction {
2018-04-13 17:34:27 +02:00
nonce : self . latest_nonce ( & authoring_params . author ) ,
2017-01-24 10:03:58 +01:00
action : Action ::Call ( address ) ,
2018-04-13 17:34:27 +02:00
gas : self . importer . miner . sensible_gas_limit ( ) ,
2019-01-28 10:58:34 +01:00
gas_price ,
2017-01-24 10:03:58 +01:00
value : U256 ::zero ( ) ,
data : data ,
2020-12-10 16:42:05 +01:00
} ) ;
2017-08-21 13:46:58 +02:00
let chain_id = self . engine . signing_chain_id ( & self . latest_env_info ( ) ) ;
2018-04-13 17:34:27 +02:00
let signature = self
. engine
2020-12-10 16:42:05 +01:00
. sign ( transaction . signature_hash ( chain_id ) )
2018-04-13 17:34:27 +02:00
. map_err ( | e | transaction ::Error ::InvalidSignature ( e . to_string ( ) ) ) ? ;
2017-08-21 13:46:58 +02:00
let signed = SignedTransaction ::new ( transaction . with_signature ( signature , chain_id ) ) ? ;
2018-03-03 18:42:13 +01:00
self . importer
. miner
. import_own_transaction ( self , signed . into ( ) )
2017-01-24 10:03:58 +01:00
}
2020-08-05 06:08:03 +02:00
2016-12-10 23:58:39 +01:00
fn registrar_address ( & self ) -> Option < Address > {
2018-02-09 09:32:06 +01:00
self . registrar_address . clone ( )
2016-12-10 23:58:39 +01:00
}
2016-01-07 21:35:06 +01:00
}
2016-02-10 12:50:27 +01:00
2018-05-09 08:49:34 +02:00
impl IoClient for Client {
fn queue_transactions ( & self , transactions : Vec < Bytes > , peer_id : usize ) {
2018-06-05 19:49:46 +02:00
trace_time! ( " queue_transactions " ) ;
2018-05-09 08:49:34 +02:00
let len = transactions . len ( ) ;
2018-07-13 12:23:57 +02:00
self . queue_transactions
. queue ( & self . io_channel . read ( ) , len , move | client | {
2018-05-09 08:49:34 +02:00
trace_time! ( " import_queued_transactions " ) ;
2020-12-10 16:42:05 +01:00
let best_block_number = client . best_block_header ( ) . number ( ) ;
2018-05-09 08:49:34 +02:00
let txs : Vec < UnverifiedTransaction > = transactions
. iter ( )
2020-12-10 16:42:05 +01:00
. filter_map ( | bytes | {
client
. engine
. decode_transaction ( bytes , best_block_number )
. ok ( )
} )
2018-05-09 08:49:34 +02:00
. collect ( ) ;
2020-08-05 06:08:03 +02:00
2018-05-09 08:49:34 +02:00
client . notify ( | notify | {
notify . transactions_received ( & txs , peer_id ) ;
} ) ;
2020-08-05 06:08:03 +02:00
2018-05-09 08:49:34 +02:00
client
. importer
. miner
. import_external_transactions ( client , txs ) ;
} )
. unwrap_or_else ( | e | {
debug! ( target : " client " , " Ignoring {} transactions: {} " , len , e ) ;
} ) ;
}
2020-08-05 06:08:03 +02:00
2018-09-24 12:28:54 +02:00
fn queue_ancient_block (
& self ,
unverified : Unverified ,
receipts_bytes : Bytes ,
) -> EthcoreResult < H256 > {
2018-06-05 19:49:46 +02:00
trace_time! ( " queue_ancient_block " ) ;
2020-08-05 06:08:03 +02:00
2018-08-02 11:20:46 +02:00
let hash = unverified . hash ( ) ;
2018-05-09 08:49:34 +02:00
{
// check block order
2018-05-17 10:58:35 +02:00
if self . chain . read ( ) . is_known ( & hash ) {
2018-09-24 12:28:54 +02:00
bail! ( EthcoreErrorKind ::Import ( ImportErrorKind ::AlreadyInChain ) ) ;
2018-05-09 08:49:34 +02:00
}
2018-08-02 11:20:46 +02:00
let parent_hash = unverified . parent_hash ( ) ;
2018-06-05 19:49:46 +02:00
// NOTE To prevent race condition with import, make sure to check queued blocks first
// (and attempt to acquire lock)
2018-08-02 11:20:46 +02:00
let is_parent_pending = self . queued_ancient_blocks . read ( ) . 0. contains ( & parent_hash ) ;
2019-01-09 14:47:14 +01:00
if ! is_parent_pending & & ! self . chain . read ( ) . is_known ( & parent_hash ) {
bail! ( EthcoreErrorKind ::Block ( BlockError ::UnknownParent (
parent_hash
) ) ) ;
2018-05-09 08:49:34 +02:00
}
2020-08-05 06:08:03 +02:00
}
2018-06-05 19:49:46 +02:00
// we queue blocks here and trigger an IO message.
{
let mut queued = self . queued_ancient_blocks . write ( ) ;
queued . 0. insert ( hash ) ;
2018-08-02 11:20:46 +02:00
queued . 1. push_back ( ( unverified , receipts_bytes ) ) ;
2018-06-05 19:49:46 +02:00
}
2020-08-05 06:08:03 +02:00
2018-06-05 19:49:46 +02:00
let queued = self . queued_ancient_blocks . clone ( ) ;
let lock = self . ancient_blocks_import_lock . clone ( ) ;
2018-08-24 10:42:24 +02:00
self . queue_ancient_blocks
. queue ( & self . io_channel . read ( ) , 1 , move | client | {
2018-06-05 19:49:46 +02:00
trace_time! ( " import_ancient_block " ) ;
// Make sure to hold the lock here to prevent importing out of order.
// We use separate lock, cause we don't want to block queueing.
let _lock = lock . lock ( ) ;
for _i in 0 .. MAX_ANCIENT_BLOCKS_TO_IMPORT {
let first = queued . write ( ) . 1. pop_front ( ) ;
2018-08-02 11:20:46 +02:00
if let Some ( ( unverified , receipts_bytes ) ) = first {
let hash = unverified . hash ( ) ;
2018-06-07 11:15:21 +02:00
let result = client . importer . import_old_block (
2018-08-02 11:20:46 +02:00
unverified ,
2018-06-05 19:49:46 +02:00
& receipts_bytes ,
2018-06-20 15:13:07 +02:00
& * * client . db . read ( ) . key_value ( ) ,
2018-06-07 11:15:21 +02:00
& * client . chain . read ( ) ,
) ;
if let Err ( e ) = result {
2018-06-05 19:49:46 +02:00
error! ( target : " client " , " Error importing ancient block: {} " , e ) ;
2020-08-05 06:08:03 +02:00
2019-01-09 14:47:14 +01:00
let mut queued = queued . write ( ) ;
queued . 0. clear ( ) ;
queued . 1. clear ( ) ;
2018-06-07 11:15:21 +02:00
}
2018-06-05 19:49:46 +02:00
// remove from pending
queued . write ( ) . 0. remove ( & hash ) ;
} else {
break ;
}
2020-08-05 06:08:03 +02:00
}
2018-08-24 10:42:24 +02:00
} ) ? ;
2020-08-05 06:08:03 +02:00
2018-08-24 10:42:24 +02:00
Ok ( hash )
2018-05-09 08:49:34 +02:00
}
2020-08-05 06:08:03 +02:00
2018-05-09 08:49:34 +02:00
fn queue_consensus_message ( & self , message : Bytes ) {
2018-07-13 12:23:57 +02:00
match self
. queue_consensus_message
. queue ( & self . io_channel . read ( ) , 1 , move | client | {
2018-05-09 08:49:34 +02:00
if let Err ( e ) = client . engine ( ) . handle_message ( & message ) {
debug! ( target : " poa " , " Invalid message received: {} " , e ) ;
}
} ) {
Ok ( _ ) = > ( ) ,
Err ( e ) = > {
debug! ( target : " poa " , " Ignoring the message, error queueing: {} " , e ) ;
}
}
}
}
2018-03-03 18:42:13 +01:00
impl ReopenBlock for Client {
fn reopen_block ( & self , block : ClosedBlock ) -> OpenBlock {
let engine = & * self . engine ;
let mut block = block . reopen ( engine ) ;
2019-03-15 13:22:47 +01:00
let max_uncles = engine . maximum_uncle_count ( block . header . number ( ) ) ;
if block . uncles . len ( ) < max_uncles {
2018-03-03 18:42:13 +01:00
let chain = self . chain . read ( ) ;
let h = chain . best_block_hash ( ) ;
// Add new uncles
let uncles = chain
2019-03-14 21:34:26 +01:00
. find_uncle_hashes ( & h , MAX_UNCLE_AGE )
2018-03-03 18:42:13 +01:00
. unwrap_or_else ( Vec ::new ) ;
2020-08-05 06:08:03 +02:00
2018-03-03 18:42:13 +01:00
for h in uncles {
2019-03-15 13:22:47 +01:00
if ! block . uncles . iter ( ) . any ( | header | header . hash ( ) = = h ) {
2018-04-03 10:01:28 +02:00
let uncle = chain
. block_header_data ( & h )
. expect ( " find_uncle_hashes only returns hashes for existing headers; qed " ) ;
2018-05-09 12:05:56 +02:00
let uncle = uncle . decode ( ) . expect ( " decoding failure " ) ;
block . push_uncle ( uncle ) . expect (
" pushing up to maximum_uncle_count;
2018-03-03 18:42:13 +01:00
push_uncle is not ok only if more than maximum_uncle_count is pushed ;
so all push_uncle are Ok ;
qed " ,
) ;
2019-03-15 13:22:47 +01:00
if block . uncles . len ( ) > = max_uncles {
break ;
2018-03-03 18:42:13 +01:00
}
2020-08-05 06:08:03 +02:00
}
2018-03-03 18:42:13 +01:00
}
2020-08-05 06:08:03 +02:00
}
2018-03-03 18:42:13 +01:00
block
2016-10-28 16:42:24 +02:00
}
2018-03-03 18:42:13 +01:00
}
2016-10-28 16:42:24 +02:00
2018-03-03 18:42:13 +01:00
impl PrepareOpenBlock for Client {
2018-07-16 13:53:55 +02:00
fn prepare_open_block (
& self ,
author : Address ,
gas_range_target : ( U256 , U256 ) ,
extra_data : Bytes ,
) -> Result < OpenBlock , EthcoreError > {
2016-08-05 17:00:46 +02:00
let engine = & * self . engine ;
2016-09-06 15:31:13 +02:00
let chain = self . chain . read ( ) ;
2018-04-03 10:01:28 +02:00
let best_header = chain . best_block_header ( ) ;
let h = best_header . hash ( ) ;
2020-08-05 06:08:03 +02:00
2017-06-28 13:17:36 +02:00
let is_epoch_begin = chain . epoch_transition ( best_header . number ( ) , h ) . is_some ( ) ;
2016-06-06 14:33:12 +02:00
let mut open_block = OpenBlock ::new (
2016-05-31 16:41:15 +02:00
engine ,
2016-08-24 16:53:36 +02:00
self . factories . clone ( ) ,
2017-12-22 04:33:17 +01:00
self . tracedb . read ( ) . tracing_enabled ( ) ,
2018-01-02 09:43:08 +01:00
self . state_db . read ( ) . boxed_clone_canon ( & h ) ,
2018-04-03 10:01:28 +02:00
& best_header ,
2018-03-03 18:42:13 +01:00
self . build_last_hashes ( & h ) ,
2016-05-31 16:41:15 +02:00
author ,
2016-06-23 14:29:16 +02:00
gas_range_target ,
2016-05-31 16:41:15 +02:00
extra_data ,
2017-06-28 13:17:36 +02:00
is_epoch_begin ,
2019-03-15 15:43:54 +01:00
chain . ancestry_with_metadata_iter ( best_header . hash ( ) ) ,
2018-07-16 13:53:55 +02:00
) ? ;
2020-08-05 06:08:03 +02:00
2016-05-31 16:41:15 +02:00
// Add uncles
2016-09-06 15:31:13 +02:00
chain
2019-03-14 21:34:26 +01:00
. find_uncle_headers ( & h , MAX_UNCLE_AGE )
2016-10-10 17:43:44 +02:00
. unwrap_or_else ( Vec ::new )
2016-05-31 16:41:15 +02:00
. into_iter ( )
2019-03-15 13:22:47 +01:00
. take ( engine . maximum_uncle_count ( open_block . header . number ( ) ) )
2016-05-31 16:41:15 +02:00
. foreach ( | h | {
2018-05-09 12:05:56 +02:00
open_block
. push_uncle ( h . decode ( ) . expect ( " decoding failure " ) )
. expect (
" pushing maximum_uncle_count;
2016-10-10 17:43:44 +02:00
open_block was just created ;
push_uncle is not ok only if more than maximum_uncle_count is pushed ;
so all push_uncle are Ok ;
qed " ,
) ;
2016-05-31 16:41:15 +02:00
} ) ;
2018-07-16 13:53:55 +02:00
Ok ( open_block )
2016-05-31 16:41:15 +02:00
}
2018-03-03 18:42:13 +01:00
}
2016-06-29 16:23:29 +02:00
2018-03-03 18:42:13 +01:00
impl BlockProducer for Client { }
2016-06-29 21:49:12 +02:00
2018-03-03 18:42:13 +01:00
impl ScheduleInfo for Client {
fn latest_schedule ( & self ) -> Schedule {
self . engine . schedule ( self . latest_env_info ( ) . number )
2016-12-08 12:03:34 +01:00
}
2018-03-03 18:42:13 +01:00
}
2016-12-08 12:03:34 +01:00
2018-03-03 18:42:13 +01:00
impl ImportSealedBlock for Client {
2018-09-24 12:28:54 +02:00
fn import_sealed_block ( & self , block : SealedBlock ) -> EthcoreResult < H256 > {
2018-03-14 12:29:52 +01:00
let start = Instant ::now ( ) ;
2018-11-28 11:30:05 +01:00
let raw = block . rlp_bytes ( ) ;
2019-03-15 13:22:47 +01:00
let header = block . header . clone ( ) ;
2018-11-28 11:30:05 +01:00
let hash = header . hash ( ) ;
2019-11-11 21:57:38 +01:00
self . notify ( | n | n . block_pre_import ( & raw , & hash , header . difficulty ( ) ) ) ;
2020-08-05 06:08:03 +02:00
2016-10-18 18:16:00 +02:00
let route = {
2018-11-21 22:30:03 +01:00
// Do a super duper basic verification to detect potential bugs
if let Err ( e ) = self . engine . verify_block_basic ( & header ) {
self . importer . bad_blocks . report (
block . rlp_bytes ( ) ,
format! ( " Detected an issue with locally sealed block: {} " , e ) ,
) ;
return Err ( e . into ( ) ) ;
}
2020-08-05 06:08:03 +02:00
2016-10-18 18:16:00 +02:00
// scope for self.import_lock
2018-03-03 18:42:13 +01:00
let _import_lock = self . importer . import_lock . lock ( ) ;
2018-02-23 19:49:08 +01:00
trace_time! ( " import_sealed_block " ) ;
2020-08-05 06:08:03 +02:00
2016-10-18 18:16:00 +02:00
let block_data = block . rlp_bytes ( ) ;
2020-08-05 06:08:03 +02:00
2019-02-07 14:34:07 +01:00
let pending = self . importer . check_epoch_end_signal (
& header ,
& block_data ,
2019-03-15 13:22:47 +01:00
& block . receipts ,
block . state . db ( ) ,
2019-02-07 14:34:07 +01:00
self ,
) ? ;
let route = self . importer . commit_block (
block ,
& header ,
encoded ::Block ::new ( block_data ) ,
pending ,
self ,
) ;
2018-11-28 11:30:05 +01:00
trace! ( target : " client " , " Imported sealed block #{} ({}) " , header . number ( ) , hash ) ;
2018-01-02 09:43:08 +01:00
self . state_db
. write ( )
. sync_cache ( & route . enacted , & route . retracted , false ) ;
2016-10-18 18:16:00 +02:00
route
} ;
2018-05-07 12:58:25 +02:00
let route = ChainRoute ::from ( [ route ] . as_ref ( ) ) ;
2018-07-13 12:23:57 +02:00
self . importer . miner . chain_new_blocks (
self ,
2018-11-28 11:30:05 +01:00
& [ hash ] ,
2018-07-13 12:23:57 +02:00
& [ ] ,
route . enacted ( ) ,
route . retracted ( ) ,
self . engine . seals_internally ( ) . is_some ( ) ,
) ;
2016-07-29 09:56:55 +02:00
self . notify ( | notify | {
notify . new_blocks ( NewBlocks ::new (
2018-12-19 10:24:14 +01:00
vec! [ hash ] ,
vec! [ ] ,
route . clone ( ) ,
vec! [ hash ] ,
vec! [ ] ,
start . elapsed ( ) ,
false ,
2016-07-29 09:56:55 +02:00
) ) ;
} ) ;
2018-06-20 15:13:07 +02:00
self . db
. read ( )
. key_value ( )
. flush ( )
. expect ( " DB flush failed. " ) ;
2018-11-28 11:30:05 +01:00
Ok ( hash )
2016-06-29 21:49:12 +02:00
}
2016-01-07 21:35:06 +01:00
}
2016-02-10 12:50:27 +01:00
2018-03-03 18:42:13 +01:00
impl BroadcastProposalBlock for Client {
fn broadcast_proposal_block ( & self , block : SealedBlock ) {
2018-04-27 15:04:27 +02:00
const DURATION_ZERO : Duration = Duration ::from_millis ( 0 ) ;
2018-03-03 18:42:13 +01:00
self . notify ( | notify | {
notify . new_blocks ( NewBlocks ::new (
2018-12-19 10:24:14 +01:00
vec! [ ] ,
vec! [ ] ,
ChainRoute ::default ( ) ,
vec! [ ] ,
vec! [ block . rlp_bytes ( ) ] ,
DURATION_ZERO ,
false ,
2018-03-03 18:42:13 +01:00
) ) ;
} ) ;
}
}
impl SealedBlockImporter for Client { }
2018-04-13 17:34:27 +02:00
impl ::miner ::TransactionVerifierClient for Client { }
impl ::miner ::BlockChainClient for Client { }
2018-03-03 18:42:13 +01:00
2017-09-05 17:54:05 +02:00
impl super ::traits ::EngineClient for Client {
2019-11-11 21:57:38 +01:00
fn update_sealing ( & self , force : ForceUpdateSealing ) {
self . importer . miner . update_sealing ( self , force )
2017-01-10 12:23:59 +01:00
}
2020-08-05 06:08:03 +02:00
2017-01-10 12:23:59 +01:00
fn submit_seal ( & self , block_hash : H256 , seal : Vec < Bytes > ) {
2019-11-11 21:57:38 +01:00
let import = self
. importer
. miner
. submit_seal ( block_hash , seal )
. and_then ( | block | self . import_sealed_block ( block ) ) ;
2018-04-13 17:34:27 +02:00
if let Err ( err ) = import {
warn! ( target : " poa " , " Wrong internal seal submission! {:?} " , err ) ;
2017-01-10 12:23:59 +01:00
}
2020-08-05 06:08:03 +02:00
}
2017-01-10 12:23:59 +01:00
fn broadcast_consensus_message ( & self , message : Bytes ) {
2019-11-11 21:57:38 +01:00
self . notify ( | notify | notify . broadcast ( ChainMessageType ::Consensus ( message . clone ( ) ) ) ) ;
2017-01-10 12:23:59 +01:00
}
2020-08-05 06:08:03 +02:00
2017-06-28 13:17:36 +02:00
fn epoch_transition_for ( & self , parent_hash : H256 ) -> Option < ::engines ::EpochTransition > {
self . chain . read ( ) . epoch_transition_for ( parent_hash )
}
2020-08-05 06:08:03 +02:00
2020-07-29 10:36:15 +02:00
fn as_full_client ( & self ) -> Option < & dyn BlockChainClient > {
2017-09-05 17:24:35 +02:00
Some ( self )
}
2020-08-05 06:08:03 +02:00
2017-09-05 17:54:05 +02:00
fn block_number ( & self , id : BlockId ) -> Option < BlockNumber > {
BlockChainClient ::block_number ( self , id )
}
2020-08-05 06:08:03 +02:00
2019-01-04 14:05:46 +01:00
fn block_header ( & self , id : BlockId ) -> Option < encoded ::Header > {
2018-02-15 01:39:29 +01:00
BlockChainClient ::block_header ( self , id )
}
2017-01-10 12:23:59 +01:00
}
2017-04-19 14:58:19 +02:00
impl ProvingBlockChainClient for Client {
2017-03-23 13:17:05 +01:00
fn prove_storage ( & self , key1 : H256 , key2 : H256 , id : BlockId ) -> Option < ( Vec < Bytes > , H256 ) > {
2016-12-05 16:55:33 +01:00
self . state_at ( id )
2017-03-23 13:17:05 +01:00
. and_then ( move | state | state . prove_storage ( key1 , key2 ) . ok ( ) )
2016-11-15 14:53:30 +01:00
}
2020-08-05 06:08:03 +02:00
2017-03-23 13:17:05 +01:00
fn prove_account (
& self ,
key1 : H256 ,
id : BlockId ,
) -> Option < ( Vec < Bytes > , ::types ::basic_account ::BasicAccount ) > {
2016-12-05 16:55:33 +01:00
self . state_at ( id )
2017-03-23 13:17:05 +01:00
. and_then ( move | state | state . prove_account ( key1 ) . ok ( ) )
2016-11-15 14:53:30 +01:00
}
2020-08-05 06:08:03 +02:00
2017-04-19 14:58:19 +02:00
fn prove_transaction (
& self ,
transaction : SignedTransaction ,
id : BlockId ,
) -> Option < ( Bytes , Vec < DBValue > ) > {
2017-09-05 17:54:05 +02:00
let ( header , mut env_info ) = match ( self . block_header ( id ) , self . env_info ( id ) ) {
2017-03-08 14:39:44 +01:00
( Some ( s ) , Some ( e ) ) = > ( s , e ) ,
_ = > return None ,
} ;
2020-08-05 06:08:03 +02:00
2020-12-10 16:42:05 +01:00
env_info . gas_limit = transaction . tx ( ) . gas . clone ( ) ;
2018-01-02 09:43:08 +01:00
let mut jdb = self . state_db . read ( ) . journal_db ( ) . boxed_clone ( ) ;
2020-08-05 06:08:03 +02:00
2018-10-08 21:30:46 +02:00
state ::prove_transaction_virtual (
2019-02-20 19:09:34 +01:00
jdb . as_hash_db_mut ( ) ,
2017-09-05 17:54:05 +02:00
header . state_root ( ) . clone ( ) ,
& transaction ,
2017-09-26 14:19:08 +02:00
self . engine . machine ( ) ,
2017-09-05 17:54:05 +02:00
& env_info ,
self . factories . clone ( ) ,
)
}
2020-08-05 06:08:03 +02:00
2017-09-05 17:54:05 +02:00
fn epoch_signal ( & self , hash : H256 ) -> Option < Vec < u8 > > {
// pending transitions are never deleted, and do not contain
// finality proofs by definition.
self . chain
. read ( )
. get_pending_transition ( hash )
. map ( | pending | pending . proof )
2017-03-08 14:39:44 +01:00
}
2016-10-27 15:26:29 +02:00
}
2016-11-15 14:53:30 +01:00
2018-11-18 00:06:34 +01:00
impl SnapshotClient for Client { }
2019-11-11 21:57:38 +01:00
impl ImportExportBlocks for Client {
fn export_blocks < ' a > (
& self ,
mut out : Box < dyn std ::io ::Write + ' a > ,
from : BlockId ,
to : BlockId ,
format : Option < DataFormat > ,
) -> Result < ( ) , String > {
let from = self
. block_number ( from )
. ok_or ( " Starting block could not be found " ) ? ;
let to = self
. block_number ( to )
. ok_or ( " End block could not be found " ) ? ;
let format = format . unwrap_or_default ( ) ;
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
for i in from ..= to {
if i % 10000 = = 0 {
info! ( " #{} " , i ) ;
}
let b = self
. block ( BlockId ::Number ( i ) )
. ok_or ( " Error exporting incomplete chain " ) ?
. into_inner ( ) ;
match format {
DataFormat ::Binary = > {
out . write ( & b )
. map_err ( | e | format! ( " Couldn't write to stream. Cause: {} " , e ) ) ? ;
}
DataFormat ::Hex = > {
out . write_fmt ( format_args! ( " {} \n " , b . pretty ( ) ) )
. map_err ( | e | format! ( " Couldn't write to stream. Cause: {} " , e ) ) ? ;
2019-03-26 23:31:52 +01:00
}
2020-08-05 06:08:03 +02:00
}
}
2019-11-11 21:57:38 +01:00
Ok ( ( ) )
}
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
fn import_blocks < ' a > (
& self ,
mut source : Box < dyn std ::io ::Read + ' a > ,
format : Option < DataFormat > ,
) -> Result < ( ) , String > {
const READAHEAD_BYTES : usize = 8 ;
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
let mut first_bytes : Vec < u8 > = vec! [ 0 ; READAHEAD_BYTES ] ;
let mut first_read = 0 ;
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
let format = match format {
Some ( format ) = > format ,
None = > {
first_read = source
. read ( & mut first_bytes )
. map_err ( | _ | " Error reading from the file/stream. " ) ? ;
match first_bytes [ 0 ] {
0xf9 = > DataFormat ::Binary ,
_ = > DataFormat ::Hex ,
}
2020-08-05 06:08:03 +02:00
}
2019-11-11 21:57:38 +01:00
} ;
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
let do_import = | bytes : Vec < u8 > | {
let block = Unverified ::from_rlp ( bytes ) . map_err ( | _ | " Invalid block rlp " ) ? ;
let number = block . header . number ( ) ;
while self . queue_info ( ) . is_full ( ) {
std ::thread ::sleep ( Duration ::from_secs ( 1 ) ) ;
}
match self . import_block ( block ) {
Err ( Error ( EthcoreErrorKind ::Import ( ImportErrorKind ::AlreadyInChain ) , _ ) ) = > {
trace! ( " Skipping block #{}: already in chain. " , number ) ;
}
Err ( e ) = > {
return Err ( format! ( " Cannot import block # {} : {:?} " , number , e ) ) ;
}
Ok ( _ ) = > { }
}
Ok ( ( ) )
} ;
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
match format {
DataFormat ::Binary = > loop {
let ( mut bytes , n ) = if first_read > 0 {
( first_bytes . clone ( ) , first_read )
} else {
let mut bytes = vec! [ 0 ; READAHEAD_BYTES ] ;
let n = source
. read ( & mut bytes )
. map_err ( | err | format! ( " Error reading from the file/stream: {:?} " , err ) ) ? ;
( bytes , n )
} ;
if n = = 0 {
break ;
}
first_read = 0 ;
let s = PayloadInfo ::from ( & bytes )
. map_err ( | e | format! ( " Invalid RLP in the file/stream: {:?} " , e ) ) ?
. total ( ) ;
bytes . resize ( s , 0 ) ;
source
. read_exact ( & mut bytes [ n .. ] )
. map_err ( | err | format! ( " Error reading from the file/stream: {:?} " , err ) ) ? ;
do_import ( bytes ) ? ;
} ,
DataFormat ::Hex = > {
for line in BufReader ::new ( source ) . lines ( ) {
let s = line
. map_err ( | err | format! ( " Error reading from the file/stream: {:?} " , err ) ) ? ;
let s = if first_read > 0 {
from_utf8 ( & first_bytes )
. map_err ( | err | format! ( " Invalid UTF-8: {:?} " , err ) ) ?
. to_owned ( )
+ & ( s [ .. ] )
} else {
s
} ;
first_read = 0 ;
let bytes = s
. from_hex ( )
. map_err ( | err | format! ( " Invalid hex in file/stream: {:?} " , err ) ) ? ;
do_import ( bytes ) ? ;
}
}
} ;
self . flush_queue ( ) ;
Ok ( ( ) )
2016-12-09 14:52:08 +01:00
}
}
2016-12-29 19:48:28 +01:00
/// Returns `LocalizedReceipt` given `LocalizedTransaction`
/// and a vector of receipts from given block up to transaction index.
2018-09-25 19:06:14 +02:00
fn transaction_receipt (
machine : & ::machine ::EthereumMachine ,
mut tx : LocalizedTransaction ,
2020-12-10 16:42:05 +01:00
receipt : TypedReceipt ,
2018-09-25 19:06:14 +02:00
prior_gas_used : U256 ,
prior_no_of_logs : usize ,
) -> LocalizedReceipt {
2017-01-13 09:51:36 +01:00
let sender = tx . sender ( ) ;
2016-12-29 19:48:28 +01:00
let transaction_hash = tx . hash ( ) ;
let block_hash = tx . block_hash ;
let block_number = tx . block_number ;
let transaction_index = tx . transaction_index ;
2020-12-10 16:42:05 +01:00
let transaction_type = tx . tx_type ( ) ;
let receipt = receipt . receipt ( ) . clone ( ) ;
2020-08-05 06:08:03 +02:00
2016-12-29 19:48:28 +01:00
LocalizedReceipt {
2018-06-07 16:47:41 +02:00
from : sender ,
2020-12-10 16:42:05 +01:00
to : match tx . tx ( ) . action {
2018-06-07 16:47:41 +02:00
Action ::Create = > None ,
Action ::Call ( ref address ) = > Some ( address . clone ( ) . into ( ) ) ,
} ,
2016-12-29 19:48:28 +01:00
transaction_hash : transaction_hash ,
transaction_index : transaction_index ,
2020-12-10 16:42:05 +01:00
transaction_type : transaction_type ,
2016-12-29 19:48:28 +01:00
block_hash : block_hash ,
2017-04-19 14:30:00 +02:00
block_number : block_number ,
2016-12-29 19:48:28 +01:00
cumulative_gas_used : receipt . gas_used ,
gas_used : receipt . gas_used - prior_gas_used ,
2020-12-10 16:42:05 +01:00
contract_address : match tx . tx ( ) . action {
2016-12-29 19:48:28 +01:00
Action ::Call ( _ ) = > None ,
2017-09-26 14:19:08 +02:00
Action ::Create = > Some (
contract_address (
machine . create_address_scheme ( block_number ) ,
& sender ,
2020-12-10 16:42:05 +01:00
& tx . tx ( ) . nonce ,
& tx . tx ( ) . data ,
2017-09-26 14:19:08 +02:00
)
. 0 ,
2020-08-05 06:08:03 +02:00
) ,
2016-12-29 19:48:28 +01:00
} ,
logs : receipt
. logs
. into_iter ( )
. enumerate ( )
. map ( | ( i , log ) | LocalizedLogEntry {
entry : log ,
block_hash : block_hash ,
block_number : block_number ,
transaction_hash : transaction_hash ,
transaction_index : transaction_index ,
transaction_log_index : i ,
2018-09-25 19:06:14 +02:00
log_index : prior_no_of_logs + i ,
2016-12-29 19:48:28 +01:00
} )
. collect ( ) ,
log_bloom : receipt . log_bloom ,
2020-12-10 16:42:05 +01:00
outcome : receipt . outcome . clone ( ) ,
2016-12-29 19:48:28 +01:00
}
}
2019-11-11 21:57:38 +01:00
/// Queue some items to be processed by IO client.
struct IoChannelQueue {
/// Using a *signed* integer for counting currently queued messages since the
/// order in which the counter is incremented and decremented is not defined.
/// Using an unsigned integer can (and will) result in integer underflow,
/// incorrectly rejecting messages and returning a FullQueue error.
currently_queued : Arc < AtomicI64 > ,
limit : i64 ,
}
impl IoChannelQueue {
pub fn new ( limit : usize ) -> Self {
let limit = i64 ::try_from ( limit ) . unwrap_or ( i64 ::max_value ( ) ) ;
IoChannelQueue {
currently_queued : Default ::default ( ) ,
limit ,
2020-08-05 06:08:03 +02:00
}
2019-11-11 21:57:38 +01:00
}
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
pub fn queue < F > (
& self ,
channel : & IoChannel < ClientIoMessage > ,
count : usize ,
fun : F ,
) -> EthcoreResult < ( ) >
where
F : Fn ( & Client ) + Send + Sync + 'static ,
{
let queue_size = self . currently_queued . load ( AtomicOrdering ::Relaxed ) ;
if queue_size > = self . limit {
let err_limit = usize ::try_from ( self . limit ) . unwrap_or ( usize ::max_value ( ) ) ;
bail! ( " The queue is full ({}) " , err_limit ) ;
} ;
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
let count = i64 ::try_from ( count ) . unwrap_or ( i64 ::max_value ( ) ) ;
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
let currently_queued = self . currently_queued . clone ( ) ;
let _ok = channel . send ( ClientIoMessage ::execute ( move | client | {
currently_queued . fetch_sub ( count , AtomicOrdering ::SeqCst ) ;
fun ( client ) ;
} ) ) ? ;
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
self . currently_queued
. fetch_add ( count , AtomicOrdering ::SeqCst ) ;
Ok ( ( ) )
}
2020-09-14 16:08:57 +02:00
}
impl PrometheusMetrics for Client {
fn prometheus_metrics ( & self , r : & mut prometheus ::Registry ) {
// gas, tx & blocks
let report = self . report ( ) ;
for ( key , value ) in report . item_sizes . iter ( ) {
prometheus_gauge (
r ,
& key ,
format! ( " Total item number of {} " , key ) . as_str ( ) ,
* value as i64 ,
) ;
}
prometheus_counter (
r ,
" import_gas " ,
" Gas processed " ,
report . gas_processed . as_u64 ( ) as i64 ,
) ;
prometheus_counter (
r ,
" import_blocks " ,
" Blocks imported " ,
report . blocks_imported as i64 ,
) ;
prometheus_counter (
r ,
" import_txs " ,
" Transactions applied " ,
report . transactions_applied as i64 ,
) ;
let state_db = self . state_db . read ( ) ;
prometheus_gauge (
r ,
" statedb_cache_size " ,
" State DB cache size " ,
state_db . cache_size ( ) as i64 ,
) ;
// blockchain cache
let blockchain_cache_info = self . blockchain_cache_info ( ) ;
prometheus_gauge (
r ,
" blockchaincache_block_details " ,
" BlockDetails cache size " ,
blockchain_cache_info . block_details as i64 ,
) ;
prometheus_gauge (
r ,
" blockchaincache_block_recipts " ,
" Block receipts size " ,
blockchain_cache_info . block_receipts as i64 ,
) ;
prometheus_gauge (
r ,
" blockchaincache_blocks " ,
" Blocks cache size " ,
blockchain_cache_info . blocks as i64 ,
) ;
prometheus_gauge (
r ,
" blockchaincache_txaddrs " ,
" Transaction addresses cache size " ,
blockchain_cache_info . transaction_addresses as i64 ,
) ;
prometheus_gauge (
r ,
" blockchaincache_size " ,
" Total blockchain cache size " ,
blockchain_cache_info . total ( ) as i64 ,
) ;
// chain info
let chain = self . chain_info ( ) ;
let gap = chain
. ancient_block_number
. map ( | x | U256 ::from ( x + 1 ) )
. and_then ( | first | {
chain
. first_block_number
. map ( | last | ( first , U256 ::from ( last ) ) )
} ) ;
if let Some ( ( first , last ) ) = gap {
prometheus_gauge (
r ,
" chain_warpsync_gap_first " ,
" Warp sync gap, first block " ,
first . as_u64 ( ) as i64 ,
) ;
prometheus_gauge (
r ,
" chain_warpsync_gap_last " ,
" Warp sync gap, last block " ,
last . as_u64 ( ) as i64 ,
) ;
}
prometheus_gauge (
r ,
" chain_block " ,
" Best block number " ,
chain . best_block_number as i64 ,
) ;
// prunning info
let prunning = self . pruning_info ( ) ;
prometheus_gauge (
r ,
" prunning_earliest_chain " ,
" The first block which everything can be served after " ,
prunning . earliest_chain as i64 ,
) ;
prometheus_gauge (
r ,
" prunning_earliest_state " ,
" The first block where state requests may be served " ,
prunning . earliest_state as i64 ,
) ;
// queue info
let queue = self . queue_info ( ) ;
prometheus_gauge (
r ,
" queue_mem_used " ,
" Queue heap memory used in bytes " ,
queue . mem_used as i64 ,
) ;
prometheus_gauge (
r ,
" queue_size_total " ,
" The total size of the queues " ,
queue . total_queue_size ( ) as i64 ,
) ;
prometheus_gauge (
r ,
" queue_size_unverified " ,
" Number of queued items pending verification " ,
queue . unverified_queue_size as i64 ,
) ;
prometheus_gauge (
r ,
" queue_size_verified " ,
" Number of verified queued items pending import " ,
queue . verified_queue_size as i64 ,
) ;
prometheus_gauge (
r ,
" queue_size_verifying " ,
" Number of items being verified " ,
queue . verifying_queue_size as i64 ,
) ;
}
2019-11-11 21:57:38 +01:00
}
2016-11-15 14:53:30 +01:00
#[ cfg(test) ]
mod tests {
2019-11-11 21:57:38 +01:00
use blockchain ::{ BlockProvider , ExtrasInsert } ;
use spec ::Spec ;
2020-07-29 10:57:15 +02:00
use test_helpers ::generate_dummy_client_with_spec_and_data ;
2020-08-05 06:08:03 +02:00
2016-11-15 14:53:30 +01:00
#[ test ]
fn should_not_cache_details_before_commit ( ) {
2018-03-03 18:42:13 +01:00
use client ::{ BlockChainClient , ChainInfo } ;
2018-04-09 16:14:33 +02:00
use test_helpers ::{ generate_dummy_client , get_good_dummy_block_hash } ;
2020-08-05 06:08:03 +02:00
2016-11-15 14:53:30 +01:00
use kvdb ::DBTransaction ;
use std ::{
sync ::{
atomic ::{ AtomicBool , Ordering } ,
2020-08-05 06:08:03 +02:00
Arc ,
} ,
thread ,
2017-10-10 20:01:27 +02:00
time ::Duration ,
} ;
2019-01-04 14:05:46 +01:00
use types ::encoded ;
2020-08-05 06:08:03 +02:00
2016-11-15 14:53:30 +01:00
let client = generate_dummy_client ( 0 ) ;
let genesis = client . chain_info ( ) . best_block_hash ;
let ( new_hash , new_block ) = get_good_dummy_block_hash ( ) ;
2020-08-05 06:08:03 +02:00
2016-11-15 14:53:30 +01:00
let go = {
2018-07-10 12:17:53 +02:00
// Separate thread uncommitted transaction
2016-11-15 14:53:30 +01:00
let go = Arc ::new ( AtomicBool ::new ( false ) ) ;
let go_thread = go . clone ( ) ;
2017-04-06 19:26:17 +02:00
let another_client = client . clone ( ) ;
2016-11-15 14:53:30 +01:00
thread ::spawn ( move | | {
2017-02-20 17:21:55 +01:00
let mut batch = DBTransaction ::new ( ) ;
2018-07-30 11:45:10 +02:00
another_client . chain . read ( ) . insert_block (
& mut batch ,
encoded ::Block ::new ( new_block ) ,
Vec ::new ( ) ,
ExtrasInsert {
2018-05-16 08:58:01 +02:00
fork_choice : ::engines ::ForkChoice ::New ,
is_finalized : false ,
} ,
) ;
2016-11-15 14:53:30 +01:00
go_thread . store ( true , Ordering ::SeqCst ) ;
} ) ;
go
} ;
2020-08-05 06:08:03 +02:00
2016-11-15 14:53:30 +01:00
while ! go . load ( Ordering ::SeqCst ) {
thread ::park_timeout ( Duration ::from_millis ( 5 ) ) ;
}
2020-08-05 06:08:03 +02:00
2016-11-15 14:53:30 +01:00
assert! ( client . tree_route ( & genesis , & new_hash ) . is_none ( ) ) ;
}
2020-08-05 06:08:03 +02:00
2018-09-25 19:06:14 +02:00
#[ test ]
fn should_return_block_receipts ( ) {
use client ::{ BlockChainClient , BlockId , TransactionId } ;
use test_helpers ::generate_dummy_client_with_data ;
2020-08-05 06:08:03 +02:00
2018-09-25 19:06:14 +02:00
let client = generate_dummy_client_with_data ( 2 , 2 , & [ 1. into ( ) , 1. into ( ) ] ) ;
2018-11-18 00:06:34 +01:00
let receipts = client . localized_block_receipts ( BlockId ::Latest ) . unwrap ( ) ;
2020-08-05 06:08:03 +02:00
2018-09-25 19:06:14 +02:00
assert_eq! ( receipts . len ( ) , 2 ) ;
assert_eq! ( receipts [ 0 ] . transaction_index , 0 ) ;
assert_eq! ( receipts [ 0 ] . block_number , 2 ) ;
assert_eq! ( receipts [ 0 ] . cumulative_gas_used , 53_000. into ( ) ) ;
assert_eq! ( receipts [ 0 ] . gas_used , 53_000. into ( ) ) ;
2020-08-05 06:08:03 +02:00
2018-09-25 19:06:14 +02:00
assert_eq! ( receipts [ 1 ] . transaction_index , 1 ) ;
assert_eq! ( receipts [ 1 ] . block_number , 2 ) ;
assert_eq! ( receipts [ 1 ] . cumulative_gas_used , 106_000. into ( ) ) ;
assert_eq! ( receipts [ 1 ] . gas_used , 53_000. into ( ) ) ;
2020-08-05 06:08:03 +02:00
2018-09-25 19:06:14 +02:00
let receipt = client . transaction_receipt ( TransactionId ::Hash ( receipts [ 0 ] . transaction_hash ) ) ;
assert_eq! ( receipt , Some ( receipts [ 0 ] . clone ( ) ) ) ;
2020-08-05 06:08:03 +02:00
2018-09-25 19:06:14 +02:00
let receipt = client . transaction_receipt ( TransactionId ::Hash ( receipts [ 1 ] . transaction_hash ) ) ;
assert_eq! ( receipt , Some ( receipts [ 1 ] . clone ( ) ) ) ;
}
2020-08-05 06:08:03 +02:00
2016-12-29 19:48:28 +01:00
#[ test ]
fn should_return_correct_log_index ( ) {
use super ::transaction_receipt ;
use ethkey ::KeyPair ;
2019-01-04 14:05:46 +01:00
use hash ::keccak ;
use types ::{
log_entry ::{ LocalizedLogEntry , LogEntry } ,
2020-12-10 16:42:05 +01:00
receipt ::{ LegacyReceipt , LocalizedReceipt , TransactionOutcome , TypedReceipt } ,
transaction ::{ Action , LocalizedTransaction , Transaction , TypedTransaction } ,
2019-01-04 14:05:46 +01:00
} ;
2020-08-05 06:08:03 +02:00
2016-12-29 19:48:28 +01:00
// given
2017-08-30 19:18:28 +02:00
let key = KeyPair ::from_secret_slice ( & keccak ( " test " ) ) . unwrap ( ) ;
2016-12-29 19:48:28 +01:00
let secret = key . secret ( ) ;
2017-09-26 14:19:08 +02:00
let machine = ::ethereum ::new_frontier_test_machine ( ) ;
2020-08-05 06:08:03 +02:00
2016-12-29 19:48:28 +01:00
let block_number = 1 ;
let block_hash = 5. into ( ) ;
2017-09-21 10:11:53 +02:00
let state_root = 99. into ( ) ;
2016-12-29 19:48:28 +01:00
let gas_used = 10. into ( ) ;
2020-12-10 16:42:05 +01:00
let raw_tx = TypedTransaction ::Legacy ( Transaction {
2016-12-29 19:48:28 +01:00
nonce : 0. into ( ) ,
gas_price : 0. into ( ) ,
gas : 21000. into ( ) ,
action : Action ::Call ( 10. into ( ) ) ,
value : 0. into ( ) ,
data : vec ! [ ] ,
2020-12-10 16:42:05 +01:00
} ) ;
2016-12-29 19:48:28 +01:00
let tx1 = raw_tx . clone ( ) . sign ( secret , None ) ;
let transaction = LocalizedTransaction {
2017-01-13 09:51:36 +01:00
signed : tx1 . clone ( ) . into ( ) ,
2016-12-29 19:48:28 +01:00
block_number : block_number ,
block_hash : block_hash ,
transaction_index : 1 ,
2017-01-13 09:51:36 +01:00
cached_sender : Some ( tx1 . sender ( ) ) ,
2016-12-29 19:48:28 +01:00
} ;
let logs = vec! [
LogEntry {
address : 5. into ( ) ,
topics : vec ! [ ] ,
data : vec ! [ ] ,
} ,
LogEntry {
address : 15. into ( ) ,
topics : vec ! [ ] ,
data : vec ! [ ] ,
} ,
] ;
2020-12-10 16:42:05 +01:00
let receipt = TypedReceipt ::Legacy ( LegacyReceipt {
2017-09-21 10:11:53 +02:00
outcome : TransactionOutcome ::StateRoot ( state_root ) ,
2016-12-29 19:48:28 +01:00
gas_used : gas_used ,
log_bloom : Default ::default ( ) ,
logs : logs . clone ( ) ,
2020-12-10 16:42:05 +01:00
} ) ;
2020-08-05 06:08:03 +02:00
2016-12-29 19:48:28 +01:00
// when
2018-09-25 19:06:14 +02:00
let receipt = transaction_receipt ( & machine , transaction , receipt , 5. into ( ) , 1 ) ;
2020-08-05 06:08:03 +02:00
2016-12-29 19:48:28 +01:00
// then
assert_eq! (
receipt ,
LocalizedReceipt {
2018-06-07 16:47:41 +02:00
from : tx1 . sender ( ) . into ( ) ,
2020-12-10 16:42:05 +01:00
to : match tx1 . tx ( ) . action {
2018-06-07 16:47:41 +02:00
Action ::Create = > None ,
Action ::Call ( ref address ) = > Some ( address . clone ( ) . into ( ) ) ,
} ,
2016-12-29 19:48:28 +01:00
transaction_hash : tx1 . hash ( ) ,
transaction_index : 1 ,
2020-12-10 16:42:05 +01:00
transaction_type : tx1 . tx_type ( ) ,
2016-12-29 19:48:28 +01:00
block_hash : block_hash ,
block_number : block_number ,
cumulative_gas_used : gas_used ,
2018-09-04 20:13:51 +02:00
gas_used : gas_used - 5 ,
2016-12-29 19:48:28 +01:00
contract_address : None ,
logs : vec ! [
LocalizedLogEntry {
entry : logs [ 0 ] . clone ( ) ,
block_hash : block_hash ,
block_number : block_number ,
transaction_hash : tx1 . hash ( ) ,
transaction_index : 1 ,
transaction_log_index : 0 ,
log_index : 1 ,
} ,
LocalizedLogEntry {
entry : logs [ 1 ] . clone ( ) ,
block_hash : block_hash ,
block_number : block_number ,
transaction_hash : tx1 . hash ( ) ,
transaction_index : 1 ,
transaction_log_index : 1 ,
log_index : 2 ,
}
] ,
log_bloom : Default ::default ( ) ,
2017-09-21 10:11:53 +02:00
outcome : TransactionOutcome ::StateRoot ( state_root ) ,
2016-12-29 19:48:28 +01:00
}
) ;
}
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
#[ test ]
fn should_mark_finalization_correctly_for_parent ( ) {
let client =
generate_dummy_client_with_spec_and_data ( Spec ::new_test_with_finality , 2 , 0 , & [ ] ) ;
let chain = client . chain ( ) ;
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
let block1_details = chain . block_hash ( 1 ) . and_then ( | h | chain . block_details ( & h ) ) ;
assert! ( block1_details . is_some ( ) ) ;
let block1_details = block1_details . unwrap ( ) ;
assert_eq! ( block1_details . children . len ( ) , 1 ) ;
assert! ( block1_details . is_finalized ) ;
2020-08-05 06:08:03 +02:00
2019-11-11 21:57:38 +01:00
let block2_details = chain . block_hash ( 2 ) . and_then ( | h | chain . block_details ( & h ) ) ;
assert! ( block2_details . is_some ( ) ) ;
let block2_details = block2_details . unwrap ( ) ;
assert_eq! ( block2_details . children . len ( ) , 0 ) ;
assert! ( ! block2_details . is_finalized ) ;
2018-05-09 08:49:34 +02:00
}
}