2020-01-17 14:27:28 +01:00
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
2019-01-07 11:33:07 +01:00
// This file is part of Parity Ethereum.
2016-04-30 17:41:24 +02:00
2019-01-07 11:33:07 +01:00
// Parity Ethereum is free software: you can redistribute it and/or modify
2016-04-30 17:41:24 +02:00
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
2019-01-07 11:33:07 +01:00
// Parity Ethereum is distributed in the hope that it will be useful,
2016-04-30 17:41:24 +02:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
2019-01-07 11:33:07 +01:00
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
2016-04-30 17:41:24 +02:00
//! Simple executive tracer.
2019-09-05 16:11:51 +02:00
use std ::cmp ::min ;
2018-01-10 13:35:18 +01:00
use ethereum_types ::{ U256 , Address } ;
2018-10-02 16:33:19 +02:00
use vm ::{ Error as VmError , ActionParams } ;
2019-08-28 18:24:06 +02:00
use log ::{ debug , warn } ;
2019-07-08 18:17:48 +02:00
use crate ::{
Tracer , VMTracer , FlatTrace ,
trace ::{ Call , Create , Action , Res , CreateResult , CallResult , VMTrace , VMOperation , VMExecutedOperation , MemoryDiff , StorageDiff , Suicide , Reward , RewardType } ,
} ;
2016-04-30 17:41:24 +02:00
/// Simple executive tracer. Traces all calls and creates. Ignores delegatecalls.
#[ derive(Default) ]
pub struct ExecutiveTracer {
2016-07-28 20:31:29 +02:00
traces : Vec < FlatTrace > ,
2018-10-02 16:33:19 +02:00
index_stack : Vec < usize > ,
vecindex_stack : Vec < usize > ,
sublen_stack : Vec < usize > ,
skip_one : bool ,
2016-10-27 08:28:12 +02:00
}
2016-09-28 17:24:26 +02:00
2016-04-30 17:41:24 +02:00
impl Tracer for ExecutiveTracer {
2017-10-20 15:40:25 +02:00
type Output = FlatTrace ;
2018-10-02 16:33:19 +02:00
fn prepare_trace_call ( & mut self , params : & ActionParams , depth : usize , is_builtin : bool ) {
assert! ( ! self . skip_one , " skip_one is used only for builtin contracts that do not have subsequent calls; in prepare_trace_call it cannot be true; qed " ) ;
2016-04-30 17:41:24 +02:00
2018-10-02 16:33:19 +02:00
if depth ! = 0 & & is_builtin & & params . value . value ( ) = = U256 ::zero ( ) {
self . skip_one = true ;
return ;
}
if let Some ( parentlen ) = self . sublen_stack . last_mut ( ) {
* parentlen + = 1 ;
}
2016-04-30 17:41:24 +02:00
2016-07-28 20:31:29 +02:00
let trace = FlatTrace {
2018-10-02 16:33:19 +02:00
trace_address : self . index_stack . clone ( ) ,
subtraces : self . sublen_stack . last ( ) . cloned ( ) . unwrap_or ( 0 ) ,
action : Action ::Call ( Call ::from ( params . clone ( ) ) ) ,
2016-04-30 17:41:24 +02:00
result : Res ::Call ( CallResult {
2018-10-02 16:33:19 +02:00
gas_used : U256 ::zero ( ) ,
output : Vec ::new ( )
2016-07-28 20:31:29 +02:00
} ) ,
2016-04-30 17:41:24 +02:00
} ;
2018-10-02 16:33:19 +02:00
self . vecindex_stack . push ( self . traces . len ( ) ) ;
2016-04-30 17:41:24 +02:00
self . traces . push ( trace ) ;
2018-10-02 16:33:19 +02:00
self . index_stack . push ( 0 ) ;
self . sublen_stack . push ( 0 ) ;
2016-04-30 17:41:24 +02:00
}
2018-10-02 16:33:19 +02:00
fn prepare_trace_create ( & mut self , params : & ActionParams ) {
assert! ( ! self . skip_one , " skip_one is used only for builtin contracts that do not have subsequent calls; in prepare_trace_create it cannot be true; qed " ) ;
if let Some ( parentlen ) = self . sublen_stack . last_mut ( ) {
* parentlen + = 1 ;
}
2016-07-28 20:31:29 +02:00
let trace = FlatTrace {
2018-10-02 16:33:19 +02:00
trace_address : self . index_stack . clone ( ) ,
subtraces : self . sublen_stack . last ( ) . cloned ( ) . unwrap_or ( 0 ) ,
action : Action ::Create ( Create ::from ( params . clone ( ) ) ) ,
2016-04-30 17:41:24 +02:00
result : Res ::Create ( CreateResult {
2018-10-02 16:33:19 +02:00
gas_used : U256 ::zero ( ) ,
code : Vec ::new ( ) ,
2019-06-03 15:36:21 +02:00
address : Address ::zero ( ) ,
2016-07-28 20:31:29 +02:00
} ) ,
2016-04-30 17:41:24 +02:00
} ;
2018-10-02 16:33:19 +02:00
self . vecindex_stack . push ( self . traces . len ( ) ) ;
2016-04-30 17:41:24 +02:00
self . traces . push ( trace ) ;
2018-10-02 16:33:19 +02:00
self . index_stack . push ( 0 ) ;
self . sublen_stack . push ( 0 ) ;
2016-04-30 17:41:24 +02:00
}
2018-10-02 16:33:19 +02:00
fn done_trace_call ( & mut self , gas_used : U256 , output : & [ u8 ] ) {
if self . skip_one {
self . skip_one = false ;
return ;
}
let vecindex = self . vecindex_stack . pop ( ) . expect ( " Executive invoked prepare_trace_call before this function; vecindex_stack is never empty; qed " ) ;
let sublen = self . sublen_stack . pop ( ) . expect ( " Executive invoked prepare_trace_call before this function; sublen_stack is never empty; qed " ) ;
self . index_stack . pop ( ) ;
self . traces [ vecindex ] . result = Res ::Call ( CallResult {
gas_used ,
output : output . into ( ) ,
} ) ;
self . traces [ vecindex ] . subtraces = sublen ;
if let Some ( index ) = self . index_stack . last_mut ( ) {
* index + = 1 ;
}
2016-04-30 17:41:24 +02:00
}
2018-10-02 16:33:19 +02:00
fn done_trace_create ( & mut self , gas_used : U256 , code : & [ u8 ] , address : Address ) {
assert! ( ! self . skip_one , " skip_one is only set with prepare_trace_call for builtin contracts with no subsequent calls; skip_one cannot be true after the same level prepare_trace_create; qed " ) ;
let vecindex = self . vecindex_stack . pop ( ) . expect ( " Executive invoked prepare_trace_create before this function; vecindex_stack is never empty; qed " ) ;
let sublen = self . sublen_stack . pop ( ) . expect ( " Executive invoked prepare_trace_create before this function; sublen_stack is never empty; qed " ) ;
self . index_stack . pop ( ) ;
self . traces [ vecindex ] . result = Res ::Create ( CreateResult {
gas_used , address ,
code : code . into ( ) ,
} ) ;
self . traces [ vecindex ] . subtraces = sublen ;
if let Some ( index ) = self . index_stack . last_mut ( ) {
* index + = 1 ;
}
}
fn done_trace_failed ( & mut self , error : & VmError ) {
if self . skip_one {
self . skip_one = false ;
return ;
}
let vecindex = self . vecindex_stack . pop ( ) . expect ( " Executive invoked prepare_trace_create/call before this function; vecindex_stack is never empty; qed " ) ;
let sublen = self . sublen_stack . pop ( ) . expect ( " Executive invoked prepare_trace_create/call before this function; vecindex_stack is never empty; qed " ) ;
self . index_stack . pop ( ) ;
let is_create = match self . traces [ vecindex ] . action {
Action ::Create ( _ ) = > true ,
_ = > false ,
2016-04-30 17:41:24 +02:00
} ;
2018-10-02 16:33:19 +02:00
if is_create {
self . traces [ vecindex ] . result = Res ::FailedCreate ( error . into ( ) ) ;
} else {
self . traces [ vecindex ] . result = Res ::FailedCall ( error . into ( ) ) ;
}
self . traces [ vecindex ] . subtraces = sublen ;
if let Some ( index ) = self . index_stack . last_mut ( ) {
* index + = 1 ;
}
2016-04-30 17:41:24 +02:00
}
2016-07-28 20:31:29 +02:00
fn trace_suicide ( & mut self , address : Address , balance : U256 , refund_address : Address ) {
2018-10-02 16:33:19 +02:00
if let Some ( parentlen ) = self . sublen_stack . last_mut ( ) {
* parentlen + = 1 ;
}
2016-07-28 20:31:29 +02:00
let trace = FlatTrace {
subtraces : 0 ,
2017-08-03 15:55:58 +02:00
action : Action ::Suicide ( Suicide { address , refund_address , balance } ) ,
2016-07-22 14:47:23 +02:00
result : Res ::None ,
2018-10-02 16:33:19 +02:00
trace_address : self . index_stack . clone ( ) ,
2016-07-22 14:47:23 +02:00
} ;
2017-08-02 17:10:06 +02:00
debug! ( target : " trace " , " Traced suicide {:?} " , trace ) ;
2016-07-22 14:47:23 +02:00
self . traces . push ( trace ) ;
2018-10-02 16:33:19 +02:00
if let Some ( index ) = self . index_stack . last_mut ( ) {
* index + = 1 ;
}
2016-07-22 14:47:23 +02:00
}
2017-09-04 16:36:49 +02:00
2017-07-31 12:06:38 +02:00
fn trace_reward ( & mut self , author : Address , value : U256 , reward_type : RewardType ) {
2018-10-02 16:33:19 +02:00
if let Some ( parentlen ) = self . sublen_stack . last_mut ( ) {
* parentlen + = 1 ;
}
2017-07-18 12:14:06 +02:00
let trace = FlatTrace {
subtraces : 0 ,
2017-08-03 15:55:58 +02:00
action : Action ::Reward ( Reward { author , value , reward_type } ) ,
2017-07-18 12:14:06 +02:00
result : Res ::None ,
2018-10-02 16:33:19 +02:00
trace_address : self . index_stack . clone ( ) ,
2017-07-18 12:14:06 +02:00
} ;
2017-08-02 17:10:06 +02:00
debug! ( target : " trace " , " Traced reward {:?} " , trace ) ;
2017-07-18 12:14:06 +02:00
self . traces . push ( trace ) ;
2016-07-22 14:47:23 +02:00
2018-10-02 16:33:19 +02:00
if let Some ( index ) = self . index_stack . last_mut ( ) {
* index + = 1 ;
}
2016-04-30 17:41:24 +02:00
}
2017-08-28 14:25:16 +02:00
fn drain ( self ) -> Vec < FlatTrace > {
2016-04-30 17:41:24 +02:00
self . traces
}
}
2016-06-02 12:40:31 +02:00
2019-09-05 16:11:51 +02:00
struct TraceData {
mem_written : Option < ( usize , usize ) > ,
store_written : Option < ( U256 , U256 ) > ,
}
2016-06-02 12:40:31 +02:00
/// Simple VM tracer. Traces all operations.
pub struct ExecutiveVMTracer {
data : VMTrace ,
2018-10-02 16:33:19 +02:00
depth : usize ,
2019-09-05 16:11:51 +02:00
trace_stack : Vec < TraceData > ,
2016-06-02 12:40:31 +02:00
}
2016-08-03 20:07:30 +02:00
impl ExecutiveVMTracer {
/// Create a new top-level instance.
pub fn toplevel ( ) -> Self {
ExecutiveVMTracer {
data : VMTrace {
parent_step : 0 ,
code : vec ! [ ] ,
operations : vec ! [ Default ::default ( ) ] , // prefill with a single entry so that prepare_subtrace can get the parent_step
subs : vec ! [ ] ,
2018-10-02 16:33:19 +02:00
} ,
depth : 0 ,
2019-09-05 16:11:51 +02:00
trace_stack : vec ! [ ] ,
2018-10-02 16:33:19 +02:00
}
}
fn with_trace_in_depth < F : Fn ( & mut VMTrace ) > ( trace : & mut VMTrace , depth : usize , f : F ) {
if depth = = 0 {
f ( trace ) ;
} else {
Self ::with_trace_in_depth ( trace . subs . last_mut ( ) . expect ( " self.depth is incremented with prepare_subtrace; a subtrace is always pushed; self.depth cannot be greater than subtrace stack; qed " ) , depth - 1 , f ) ;
2016-08-03 20:07:30 +02:00
}
}
}
2016-06-02 12:40:31 +02:00
impl VMTracer for ExecutiveVMTracer {
2017-10-20 15:40:25 +02:00
type Output = VMTrace ;
2018-01-18 10:32:22 +01:00
fn trace_next_instruction ( & mut self , _pc : usize , _instruction : u8 , _current_gas : U256 ) -> bool { true }
2017-07-10 13:23:40 +02:00
2018-10-02 16:33:19 +02:00
fn trace_prepare_execute ( & mut self , pc : usize , instruction : u8 , gas_cost : U256 , mem_written : Option < ( usize , usize ) > , store_written : Option < ( U256 , U256 ) > ) {
Self ::with_trace_in_depth ( & mut self . data , self . depth , move | trace | {
trace . operations . push ( VMOperation {
pc : pc ,
instruction : instruction ,
gas_cost : gas_cost ,
executed : None ,
} ) ;
2016-06-02 12:40:31 +02:00
} ) ;
2019-09-05 16:11:51 +02:00
self . trace_stack . push ( TraceData { mem_written , store_written } ) ;
}
fn trace_failed ( & mut self ) {
let _ = self . trace_stack . pop ( ) . expect ( " pushed in trace_prepare_execute; qed " ) ;
2016-06-02 12:40:31 +02:00
}
2018-10-02 16:33:19 +02:00
fn trace_executed ( & mut self , gas_used : U256 , stack_push : & [ U256 ] , mem : & [ u8 ] ) {
2019-09-05 16:11:51 +02:00
let TraceData { mem_written , store_written } = self . trace_stack . pop ( ) . expect ( " pushed in trace_prepare_execute; qed " ) ;
let mem_diff = mem_written . map ( | ( o , s ) | {
2019-08-28 18:24:06 +02:00
if o + s > mem . len ( ) {
2019-09-05 16:11:51 +02:00
warn! ( target : " trace " , " mem_written is out of bounds " ) ;
2019-08-28 18:24:06 +02:00
}
2019-09-05 16:11:51 +02:00
( o , & mem [ min ( mem . len ( ) , o ) .. min ( o + s , mem . len ( ) ) ] )
2019-08-28 18:24:06 +02:00
} ) ;
2019-09-05 16:11:51 +02:00
let store_diff = store_written ;
2018-10-02 16:33:19 +02:00
Self ::with_trace_in_depth ( & mut self . data , self . depth , move | trace | {
let ex = VMExecutedOperation {
gas_used : gas_used ,
2019-09-05 16:11:51 +02:00
stack_push : stack_push . to_vec ( ) ,
mem_diff : mem_diff . map ( | ( s , r ) | MemoryDiff { offset : s , data : r . to_vec ( ) } ) ,
2018-10-02 16:33:19 +02:00
store_diff : store_diff . map ( | ( l , v ) | StorageDiff { location : l , value : v } ) ,
} ;
trace . operations . last_mut ( ) . expect ( " trace_executed is always called after a trace_prepare_execute; trace.operations cannot be empty; qed " ) . executed = Some ( ex ) ;
} ) ;
2016-06-02 12:40:31 +02:00
}
2018-10-02 16:33:19 +02:00
fn prepare_subtrace ( & mut self , code : & [ u8 ] ) {
Self ::with_trace_in_depth ( & mut self . data , self . depth , move | trace | {
let parent_step = trace . operations . len ( ) - 1 ; // won't overflow since we must already have pushed an operation in trace_prepare_execute.
trace . subs . push ( VMTrace {
parent_step ,
code : code . to_vec ( ) ,
operations : vec ! [ ] ,
subs : vec ! [ ] ,
} ) ;
} ) ;
self . depth + = 1 ;
2016-06-02 12:40:31 +02:00
}
2018-10-02 16:33:19 +02:00
fn done_subtrace ( & mut self ) {
self . depth - = 1 ;
2016-06-02 12:40:31 +02:00
}
fn drain ( mut self ) -> Option < VMTrace > { self . data . subs . pop ( ) }
}
2018-10-02 16:33:19 +02:00
#[ cfg(test) ]
mod tests {
use super ::* ;
#[ test ]
fn should_prefix_address_properly ( ) {
let mut tracer = ExecutiveTracer ::default ( ) ;
tracer . prepare_trace_call ( & ActionParams ::default ( ) , 0 , false ) ;
tracer . prepare_trace_call ( & ActionParams ::default ( ) , 1 , false ) ;
tracer . prepare_trace_call ( & ActionParams ::default ( ) , 2 , false ) ;
tracer . done_trace_call ( U256 ::zero ( ) , & [ ] ) ;
tracer . prepare_trace_call ( & ActionParams ::default ( ) , 2 , false ) ;
tracer . done_trace_call ( U256 ::zero ( ) , & [ ] ) ;
tracer . prepare_trace_call ( & ActionParams ::default ( ) , 2 , false ) ;
tracer . done_trace_call ( U256 ::zero ( ) , & [ ] ) ;
tracer . done_trace_call ( U256 ::zero ( ) , & [ ] ) ;
tracer . done_trace_call ( U256 ::zero ( ) , & [ ] ) ;
let drained = tracer . drain ( ) ;
assert! ( drained [ 0 ] . trace_address . len ( ) = = 0 ) ;
assert_eq! ( & drained [ 1 ] . trace_address , & [ 0 ] ) ;
assert_eq! ( & drained [ 2 ] . trace_address , & [ 0 , 0 ] ) ;
assert_eq! ( & drained [ 3 ] . trace_address , & [ 0 , 1 ] ) ;
assert_eq! ( & drained [ 4 ] . trace_address , & [ 0 , 2 ] ) ;
}
}