2019-01-07 11:33:07 +01:00
|
|
|
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of Parity Ethereum.
|
2016-04-30 17:41:24 +02:00
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
2016-04-30 17:41:24 +02:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
2016-04-30 17:41:24 +02:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2019-01-07 11:33:07 +01:00
|
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
//! Trace database.
|
2018-10-02 16:33:19 +02:00
|
|
|
use std::collections::HashMap;
|
2016-07-13 19:59:59 +02:00
|
|
|
use std::sync::Arc;
|
2019-01-04 14:05:46 +01:00
|
|
|
|
|
|
|
use blockchain::BlockChainDB;
|
|
|
|
use db::cache_manager::CacheManager;
|
|
|
|
use db::{self, Key, Writable, Readable, CacheUpdatePolicy};
|
2018-03-12 21:15:55 +01:00
|
|
|
use ethereum_types::{H256, H264};
|
2019-01-04 14:05:46 +01:00
|
|
|
use heapsize::HeapSizeOf;
|
2018-06-20 15:13:07 +02:00
|
|
|
use kvdb::{DBTransaction};
|
2017-09-02 20:09:13 +02:00
|
|
|
use parking_lot::RwLock;
|
2019-01-04 14:05:46 +01:00
|
|
|
use types::BlockNumber;
|
|
|
|
|
2016-09-26 19:21:25 +02:00
|
|
|
use trace::{LocalizedTrace, Config, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras};
|
2019-01-04 14:05:46 +01:00
|
|
|
use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
|
2016-07-13 19:59:59 +02:00
|
|
|
|
2016-04-30 17:41:24 +02:00
|
|
|
const TRACE_DB_VER: &'static [u8] = b"1.0";
|
|
|
|
|
|
|
|
#[derive(Debug, Copy, Clone)]
|
|
|
|
enum TraceDBIndex {
|
|
|
|
/// Block traces index.
|
|
|
|
BlockTraces = 0,
|
|
|
|
}
|
|
|
|
|
2016-07-24 00:20:21 +02:00
|
|
|
impl Key<FlatBlockTraces> for H256 {
|
2016-04-30 17:41:24 +02:00
|
|
|
type Target = H264;
|
|
|
|
|
|
|
|
fn key(&self) -> H264 {
|
|
|
|
let mut result = H264::default();
|
|
|
|
result[0] = TraceDBIndex::BlockTraces as u8;
|
2016-07-13 21:10:20 +02:00
|
|
|
result[1..33].copy_from_slice(self);
|
2016-04-30 17:41:24 +02:00
|
|
|
result
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-03 18:42:13 +01:00
|
|
|
/// Database to store transaction execution trace.
|
|
|
|
///
|
|
|
|
/// Whenever a transaction is executed by EVM it's execution trace is stored
|
|
|
|
/// in trace database. Each trace has information, which contracts have been
|
|
|
|
/// touched, which have been created during the execution of transaction, and
|
|
|
|
/// which calls failed.
|
2016-04-30 17:41:24 +02:00
|
|
|
pub struct TraceDB<T> where T: DatabaseExtras {
|
2018-06-20 15:13:07 +02:00
|
|
|
/// cache
|
2016-07-24 00:20:21 +02:00
|
|
|
traces: RwLock<HashMap<H256, FlatBlockTraces>>,
|
2018-06-20 15:13:07 +02:00
|
|
|
/// hashes of cached traces
|
|
|
|
cache_manager: RwLock<CacheManager<H256>>,
|
|
|
|
/// db
|
|
|
|
db: Arc<BlockChainDB>,
|
|
|
|
/// tracing enabled
|
2016-04-30 17:41:24 +02:00
|
|
|
enabled: bool,
|
2018-06-20 15:13:07 +02:00
|
|
|
/// extras
|
2016-04-30 17:41:24 +02:00
|
|
|
extras: Arc<T>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> TraceDB<T> where T: DatabaseExtras {
|
|
|
|
/// Creates new instance of `TraceDB`.
|
2018-06-20 15:13:07 +02:00
|
|
|
pub fn new(config: Config, db: Arc<BlockChainDB>, extras: Arc<T>) -> Self {
|
2017-02-20 17:21:55 +01:00
|
|
|
let mut batch = DBTransaction::new();
|
2016-11-24 15:05:52 +01:00
|
|
|
let genesis = extras.block_hash(0)
|
|
|
|
.expect("Genesis block is always inserted upon extras db creation qed");
|
|
|
|
batch.write(db::COL_TRACE, &genesis, &FlatBlockTraces::default());
|
2016-08-18 18:24:49 +02:00
|
|
|
batch.put(db::COL_TRACE, b"version", TRACE_DB_VER);
|
2018-06-20 15:13:07 +02:00
|
|
|
db.key_value().write(batch).expect("failed to update version");
|
2016-04-30 17:41:24 +02:00
|
|
|
|
2016-09-26 19:21:25 +02:00
|
|
|
TraceDB {
|
2018-03-12 21:15:55 +01:00
|
|
|
traces: RwLock::new(HashMap::new()),
|
2016-07-31 00:19:27 +02:00
|
|
|
cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)),
|
2018-06-20 15:13:07 +02:00
|
|
|
db,
|
2016-09-26 19:21:25 +02:00
|
|
|
enabled: config.enabled,
|
2018-03-12 21:15:55 +01:00
|
|
|
extras: extras,
|
2016-09-26 19:21:25 +02:00
|
|
|
}
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
|
2016-07-31 00:19:27 +02:00
|
|
|
fn cache_size(&self) -> usize {
|
2018-06-20 15:13:07 +02:00
|
|
|
self.traces.read().heap_size_of_children()
|
2016-07-31 00:19:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Let the cache system know that a cacheable item has been used.
|
2018-06-20 15:13:07 +02:00
|
|
|
fn note_trace_used(&self, trace_id: H256) {
|
2016-07-31 00:19:27 +02:00
|
|
|
let mut cache_manager = self.cache_manager.write();
|
2018-06-20 15:13:07 +02:00
|
|
|
cache_manager.note_used(trace_id);
|
2016-07-31 00:19:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Ticks our cache system and throws out any old data.
|
|
|
|
pub fn collect_garbage(&self) {
|
2016-08-08 16:14:37 +02:00
|
|
|
let current_size = self.cache_size();
|
|
|
|
|
|
|
|
let mut traces = self.traces.write();
|
2016-07-31 00:19:27 +02:00
|
|
|
let mut cache_manager = self.cache_manager.write();
|
|
|
|
|
2016-08-08 16:14:37 +02:00
|
|
|
cache_manager.collect_garbage(current_size, | ids | {
|
2016-07-31 00:19:27 +02:00
|
|
|
for id in &ids {
|
2018-06-20 15:13:07 +02:00
|
|
|
traces.remove(id);
|
2016-07-31 00:19:27 +02:00
|
|
|
}
|
|
|
|
traces.shrink_to_fit();
|
2016-08-08 16:14:37 +02:00
|
|
|
|
2018-06-20 15:13:07 +02:00
|
|
|
traces.heap_size_of_children()
|
2016-07-31 00:19:27 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2016-04-30 17:41:24 +02:00
|
|
|
/// Returns traces for block with hash.
|
2016-07-24 00:20:21 +02:00
|
|
|
fn traces(&self, block_hash: &H256) -> Option<FlatBlockTraces> {
|
2018-06-20 15:13:07 +02:00
|
|
|
let result = self.db.key_value().read_with_cache(db::COL_TRACE, &self.traces, block_hash);
|
|
|
|
self.note_trace_used(*block_hash);
|
2016-08-08 16:14:37 +02:00
|
|
|
result
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns vector of transaction traces for given block.
|
|
|
|
fn transactions_traces(&self, block_hash: &H256) -> Option<Vec<FlatTransactionTraces>> {
|
2016-07-24 00:20:21 +02:00
|
|
|
self.traces(block_hash).map(Into::into)
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fn matching_block_traces(
|
|
|
|
&self,
|
|
|
|
filter: &Filter,
|
|
|
|
traces: FlatBlockTraces,
|
|
|
|
block_hash: H256,
|
|
|
|
block_number: BlockNumber
|
|
|
|
) -> Vec<LocalizedTrace> {
|
|
|
|
let tx_traces: Vec<FlatTransactionTraces> = traces.into();
|
|
|
|
tx_traces.into_iter()
|
|
|
|
.enumerate()
|
|
|
|
.flat_map(|(tx_number, tx_trace)| {
|
|
|
|
self.matching_transaction_traces(filter, tx_trace, block_hash.clone(), block_number, tx_number)
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn matching_transaction_traces(
|
|
|
|
&self,
|
|
|
|
filter: &Filter,
|
|
|
|
traces: FlatTransactionTraces,
|
|
|
|
block_hash: H256,
|
|
|
|
block_number: BlockNumber,
|
|
|
|
tx_number: usize
|
|
|
|
) -> Vec<LocalizedTrace> {
|
2017-08-02 17:10:06 +02:00
|
|
|
let (trace_tx_number, trace_tx_hash) = match self.extras.transaction_hash(block_number, tx_number) {
|
|
|
|
Some(hash) => (Some(tx_number), Some(hash.clone())),
|
|
|
|
//None means trace without transaction (reward)
|
|
|
|
None => (None, None),
|
|
|
|
};
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
let flat_traces: Vec<FlatTrace> = traces.into();
|
|
|
|
flat_traces.into_iter()
|
|
|
|
.filter_map(|trace| {
|
|
|
|
match filter.matches(&trace) {
|
|
|
|
true => Some(LocalizedTrace {
|
|
|
|
action: trace.action,
|
|
|
|
result: trace.result,
|
|
|
|
subtraces: trace.subtraces,
|
2016-07-28 20:31:29 +02:00
|
|
|
trace_address: trace.trace_address.into_iter().collect(),
|
2017-07-28 13:41:51 +02:00
|
|
|
transaction_number: trace_tx_number,
|
|
|
|
transaction_hash: trace_tx_hash,
|
2016-04-30 17:41:24 +02:00
|
|
|
block_number: block_number,
|
|
|
|
block_hash: block_hash
|
|
|
|
}),
|
|
|
|
false => None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
|
|
|
|
fn tracing_enabled(&self) -> bool {
|
|
|
|
self.enabled
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Traces of import request's enacted blocks are expected to be already in database
|
|
|
|
/// or to be the currently inserted trace.
|
2016-08-25 16:43:56 +02:00
|
|
|
fn import(&self, batch: &mut DBTransaction, request: ImportRequest) {
|
2016-08-24 18:35:53 +02:00
|
|
|
// valid (canon): retracted 0, enacted 1 => false, true,
|
|
|
|
// valid (branch): retracted 0, enacted 0 => false, false,
|
|
|
|
// valid (bbcc): retracted 1, enacted 1 => true, true,
|
|
|
|
// invalid: retracted 1, enacted 0 => true, false,
|
|
|
|
let ret = request.retracted != 0;
|
|
|
|
let ena = !request.enacted.is_empty();
|
|
|
|
assert!(!(ret && !ena));
|
2016-04-30 17:41:24 +02:00
|
|
|
// fast return if tracing is disabled
|
|
|
|
if !self.tracing_enabled() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-03-12 21:15:55 +01:00
|
|
|
// now let's rebuild the blooms
|
|
|
|
if !request.enacted.is_empty() {
|
2018-06-20 15:13:07 +02:00
|
|
|
let range_start = request.block_number + 1 - request.enacted.len() as u64;
|
|
|
|
let enacted_blooms: Vec<_> = request.enacted
|
2018-03-12 21:15:55 +01:00
|
|
|
.iter()
|
|
|
|
// all traces are expected to be found here. That's why `expect` has been used
|
|
|
|
// instead of `filter_map`. If some traces haven't been found, it meens that
|
|
|
|
// traces database is corrupted or incomplete.
|
|
|
|
.map(|block_hash| if block_hash == &request.block_hash {
|
|
|
|
request.traces.bloom()
|
|
|
|
} else {
|
|
|
|
self.traces(block_hash).expect("Traces database is incomplete.").bloom()
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
2018-06-20 15:13:07 +02:00
|
|
|
self.db.trace_blooms()
|
|
|
|
.insert_blooms(range_start, enacted_blooms.iter())
|
|
|
|
.expect("Low level database error. Some issue with disk?");
|
2018-03-12 21:15:55 +01:00
|
|
|
}
|
|
|
|
|
2016-10-18 16:43:42 +02:00
|
|
|
// insert new block traces into the cache and the database
|
2018-03-12 21:15:55 +01:00
|
|
|
{
|
|
|
|
let mut traces = self.traces.write();
|
|
|
|
// it's important to use overwrite here,
|
|
|
|
// cause this value might be queried by hash later
|
|
|
|
batch.write_with_cache(db::COL_TRACE, &mut *traces, request.block_hash, request.traces, CacheUpdatePolicy::Overwrite);
|
|
|
|
// note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection
|
2018-06-20 15:13:07 +02:00
|
|
|
self.note_trace_used(request.block_hash);
|
2018-03-12 21:15:55 +01:00
|
|
|
}
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec<usize>) -> Option<LocalizedTrace> {
|
|
|
|
self.extras.block_hash(block_number)
|
|
|
|
.and_then(|block_hash| self.transactions_traces(&block_hash)
|
|
|
|
.and_then(|traces| traces.into_iter().nth(tx_position))
|
|
|
|
.map(Into::<Vec<FlatTrace>>::into)
|
|
|
|
// this may and should be optimized
|
2018-10-02 16:33:19 +02:00
|
|
|
.and_then(|traces| traces.into_iter().find(|trace| trace.trace_address == trace_position))
|
2016-04-30 17:41:24 +02:00
|
|
|
.map(|trace| {
|
|
|
|
let tx_hash = self.extras.transaction_hash(block_number, tx_position)
|
|
|
|
.expect("Expected to find transaction hash. Database is probably corrupted");
|
|
|
|
|
|
|
|
LocalizedTrace {
|
|
|
|
action: trace.action,
|
|
|
|
result: trace.result,
|
|
|
|
subtraces: trace.subtraces,
|
2016-07-28 20:31:29 +02:00
|
|
|
trace_address: trace.trace_address.into_iter().collect(),
|
2017-07-28 13:41:51 +02:00
|
|
|
transaction_number: Some(tx_position),
|
|
|
|
transaction_hash: Some(tx_hash),
|
2016-04-30 17:41:24 +02:00
|
|
|
block_number: block_number,
|
|
|
|
block_hash: block_hash,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn transaction_traces(&self, block_number: BlockNumber, tx_position: usize) -> Option<Vec<LocalizedTrace>> {
|
|
|
|
self.extras.block_hash(block_number)
|
|
|
|
.and_then(|block_hash| self.transactions_traces(&block_hash)
|
|
|
|
.and_then(|traces| traces.into_iter().nth(tx_position))
|
|
|
|
.map(Into::<Vec<FlatTrace>>::into)
|
|
|
|
.map(|traces| {
|
|
|
|
let tx_hash = self.extras.transaction_hash(block_number, tx_position)
|
|
|
|
.expect("Expected to find transaction hash. Database is probably corrupted");
|
|
|
|
|
|
|
|
traces.into_iter()
|
|
|
|
.map(|trace| LocalizedTrace {
|
|
|
|
action: trace.action,
|
|
|
|
result: trace.result,
|
|
|
|
subtraces: trace.subtraces,
|
2016-07-28 20:31:29 +02:00
|
|
|
trace_address: trace.trace_address.into_iter().collect(),
|
2017-07-28 13:41:51 +02:00
|
|
|
transaction_number: Some(tx_position),
|
|
|
|
transaction_hash: Some(tx_hash.clone()),
|
2016-04-30 17:41:24 +02:00
|
|
|
block_number: block_number,
|
|
|
|
block_hash: block_hash
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
})
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn block_traces(&self, block_number: BlockNumber) -> Option<Vec<LocalizedTrace>> {
|
|
|
|
self.extras.block_hash(block_number)
|
|
|
|
.and_then(|block_hash| self.transactions_traces(&block_hash)
|
|
|
|
.map(|traces| {
|
|
|
|
traces.into_iter()
|
|
|
|
.map(Into::<Vec<FlatTrace>>::into)
|
|
|
|
.enumerate()
|
|
|
|
.flat_map(|(tx_position, traces)| {
|
2017-08-02 17:10:06 +02:00
|
|
|
let (trace_tx_number, trace_tx_hash) = match self.extras.transaction_hash(block_number, tx_position) {
|
|
|
|
Some(hash) => (Some(tx_position), Some(hash.clone())),
|
|
|
|
//None means trace without transaction (reward)
|
|
|
|
None => (None, None),
|
|
|
|
};
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
traces.into_iter()
|
|
|
|
.map(|trace| LocalizedTrace {
|
|
|
|
action: trace.action,
|
|
|
|
result: trace.result,
|
|
|
|
subtraces: trace.subtraces,
|
2016-07-28 20:31:29 +02:00
|
|
|
trace_address: trace.trace_address.into_iter().collect(),
|
2017-07-28 13:41:51 +02:00
|
|
|
transaction_number: trace_tx_number,
|
|
|
|
transaction_hash: trace_tx_hash,
|
2016-04-30 17:41:24 +02:00
|
|
|
block_number: block_number,
|
|
|
|
block_hash: block_hash,
|
|
|
|
})
|
|
|
|
.collect::<Vec<LocalizedTrace>>()
|
|
|
|
})
|
|
|
|
.collect::<Vec<LocalizedTrace>>()
|
|
|
|
})
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn filter(&self, filter: &Filter) -> Vec<LocalizedTrace> {
|
2018-06-20 15:13:07 +02:00
|
|
|
let possibilities = filter.bloom_possibilities();
|
|
|
|
let numbers = self.db.trace_blooms()
|
|
|
|
.filter(filter.range.start as u64, filter.range.end as u64, &possibilities)
|
|
|
|
.expect("Low level database error. Some issue with disk?");
|
|
|
|
|
2018-03-12 21:15:55 +01:00
|
|
|
numbers.into_iter()
|
|
|
|
.flat_map(|n| {
|
|
|
|
let number = n as BlockNumber;
|
|
|
|
let hash = self.extras.block_hash(number)
|
|
|
|
.expect("Expected to find block hash. Extras db is probably corrupted");
|
|
|
|
let traces = self.traces(&hash)
|
|
|
|
.expect("Expected to find a trace. Db is probably corrupted.");
|
2016-07-24 00:20:21 +02:00
|
|
|
self.matching_block_traces(filter, traces, hash, number)
|
2016-04-30 17:41:24 +02:00
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use std::collections::HashMap;
|
|
|
|
use std::sync::Arc;
|
2018-01-10 13:35:18 +01:00
|
|
|
use ethereum_types::{H256, U256, Address};
|
2018-06-20 15:13:07 +02:00
|
|
|
use kvdb::{DBTransaction};
|
2019-01-04 14:05:46 +01:00
|
|
|
use types::BlockNumber;
|
2016-09-26 19:21:25 +02:00
|
|
|
use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest};
|
2016-09-05 11:56:44 +02:00
|
|
|
use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError};
|
2016-04-30 17:41:24 +02:00
|
|
|
use trace::trace::{Call, Action, Res};
|
2016-07-28 20:31:29 +02:00
|
|
|
use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
|
2017-07-12 13:09:17 +02:00
|
|
|
use evm::CallType;
|
2018-06-20 15:13:07 +02:00
|
|
|
use test_helpers::new_db;
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
struct NoopExtras;
|
|
|
|
|
|
|
|
impl DatabaseExtras for NoopExtras {
|
2016-11-24 15:05:52 +01:00
|
|
|
fn block_hash(&self, block_number: BlockNumber) -> Option<H256> {
|
|
|
|
if block_number == 0 {
|
|
|
|
Some(H256::default())
|
|
|
|
} else {
|
|
|
|
unimplemented!()
|
|
|
|
}
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fn transaction_hash(&self, _block_number: BlockNumber, _tx_position: usize) -> Option<H256> {
|
|
|
|
unimplemented!();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-31 13:47:29 +02:00
|
|
|
#[derive(Clone)]
|
2016-04-30 17:41:24 +02:00
|
|
|
struct Extras {
|
|
|
|
block_hashes: HashMap<BlockNumber, H256>,
|
|
|
|
transaction_hashes: HashMap<BlockNumber, Vec<H256>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for Extras {
|
|
|
|
fn default() -> Self {
|
|
|
|
Extras {
|
|
|
|
block_hashes: HashMap::new(),
|
|
|
|
transaction_hashes: HashMap::new(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl DatabaseExtras for Extras {
|
|
|
|
fn block_hash(&self, block_number: BlockNumber) -> Option<H256> {
|
|
|
|
self.block_hashes.get(&block_number).cloned()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn transaction_hash(&self, block_number: BlockNumber, tx_position: usize) -> Option<H256> {
|
|
|
|
self.transaction_hashes.get(&block_number)
|
|
|
|
.and_then(|hashes| hashes.iter().cloned().nth(tx_position))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_reopening_db_with_tracing_off() {
|
2017-02-20 17:21:55 +01:00
|
|
|
let db = new_db();
|
2016-04-30 17:41:24 +02:00
|
|
|
let mut config = Config::default();
|
|
|
|
|
|
|
|
// set autotracing
|
2016-09-26 19:21:25 +02:00
|
|
|
config.enabled = false;
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
{
|
2016-09-26 19:21:25 +02:00
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras));
|
2016-04-30 17:41:24 +02:00
|
|
|
assert_eq!(tracedb.tracing_enabled(), false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_reopening_db_with_tracing_on() {
|
2017-02-20 17:21:55 +01:00
|
|
|
let db = new_db();
|
2016-04-30 17:41:24 +02:00
|
|
|
let mut config = Config::default();
|
|
|
|
|
|
|
|
// set tracing on
|
2016-09-26 19:21:25 +02:00
|
|
|
config.enabled = true;
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
{
|
2016-09-26 19:21:25 +02:00
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras));
|
2016-04-30 17:41:24 +02:00
|
|
|
assert_eq!(tracedb.tracing_enabled(), true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest {
|
|
|
|
ImportRequest {
|
2016-07-28 20:31:29 +02:00
|
|
|
traces: FlatBlockTraces::from(vec![FlatTransactionTraces::from(vec![FlatTrace {
|
|
|
|
trace_address: Default::default(),
|
|
|
|
subtraces: 0,
|
2016-04-30 17:41:24 +02:00
|
|
|
action: Action::Call(Call {
|
2016-07-28 20:31:29 +02:00
|
|
|
from: 1.into(),
|
|
|
|
to: 2.into(),
|
|
|
|
value: 3.into(),
|
|
|
|
gas: 4.into(),
|
2016-04-30 17:41:24 +02:00
|
|
|
input: vec![],
|
2016-07-27 17:41:21 +02:00
|
|
|
call_type: CallType::Call,
|
2016-04-30 17:41:24 +02:00
|
|
|
}),
|
2016-09-05 11:56:44 +02:00
|
|
|
result: Res::FailedCall(TraceError::OutOfGas),
|
2016-07-28 20:31:29 +02:00
|
|
|
}])]),
|
2016-04-30 17:41:24 +02:00
|
|
|
block_hash: block_hash.clone(),
|
|
|
|
block_number: block_number,
|
|
|
|
enacted: vec![block_hash],
|
|
|
|
retracted: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-18 16:43:42 +02:00
|
|
|
fn create_noncanon_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest {
|
|
|
|
ImportRequest {
|
|
|
|
traces: FlatBlockTraces::from(vec![FlatTransactionTraces::from(vec![FlatTrace {
|
|
|
|
trace_address: Default::default(),
|
|
|
|
subtraces: 0,
|
|
|
|
action: Action::Call(Call {
|
|
|
|
from: 1.into(),
|
|
|
|
to: 2.into(),
|
|
|
|
value: 3.into(),
|
|
|
|
gas: 4.into(),
|
|
|
|
input: vec![],
|
|
|
|
call_type: CallType::Call,
|
|
|
|
}),
|
|
|
|
result: Res::FailedCall(TraceError::OutOfGas),
|
|
|
|
}])]),
|
|
|
|
block_hash: block_hash.clone(),
|
|
|
|
block_number: block_number,
|
|
|
|
enacted: vec![],
|
|
|
|
retracted: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-30 17:41:24 +02:00
|
|
|
fn create_simple_localized_trace(block_number: BlockNumber, block_hash: H256, tx_hash: H256) -> LocalizedTrace {
|
|
|
|
LocalizedTrace {
|
|
|
|
action: Action::Call(Call {
|
|
|
|
from: Address::from(1),
|
|
|
|
to: Address::from(2),
|
|
|
|
value: U256::from(3),
|
|
|
|
gas: U256::from(4),
|
|
|
|
input: vec![],
|
2016-07-27 17:41:21 +02:00
|
|
|
call_type: CallType::Call,
|
2016-04-30 17:41:24 +02:00
|
|
|
}),
|
2016-09-05 11:56:44 +02:00
|
|
|
result: Res::FailedCall(TraceError::OutOfGas),
|
2016-04-30 17:41:24 +02:00
|
|
|
trace_address: vec![],
|
|
|
|
subtraces: 0,
|
2017-07-28 13:41:51 +02:00
|
|
|
transaction_number: Some(0),
|
|
|
|
transaction_hash: Some(tx_hash),
|
2016-04-30 17:41:24 +02:00
|
|
|
block_number: block_number,
|
|
|
|
block_hash: block_hash,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-18 16:43:42 +02:00
|
|
|
#[test]
|
|
|
|
fn test_import_non_canon_traces() {
|
2017-02-20 17:21:55 +01:00
|
|
|
let db = new_db();
|
2016-10-18 16:43:42 +02:00
|
|
|
let mut config = Config::default();
|
|
|
|
config.enabled = true;
|
|
|
|
let block_0 = H256::from(0xa1);
|
|
|
|
let block_1 = H256::from(0xa2);
|
|
|
|
let tx_0 = H256::from(0xff);
|
|
|
|
let tx_1 = H256::from(0xaf);
|
|
|
|
|
|
|
|
let mut extras = Extras::default();
|
|
|
|
extras.block_hashes.insert(0, block_0.clone());
|
|
|
|
extras.block_hashes.insert(1, block_1.clone());
|
|
|
|
extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
|
|
|
|
extras.transaction_hashes.insert(1, vec![tx_1.clone()]);
|
|
|
|
|
|
|
|
let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras));
|
|
|
|
|
|
|
|
// import block 0
|
|
|
|
let request = create_noncanon_import_request(0, block_0.clone());
|
2017-02-20 17:21:55 +01:00
|
|
|
let mut batch = DBTransaction::new();
|
2016-10-18 16:43:42 +02:00
|
|
|
tracedb.import(&mut batch, request);
|
2018-06-20 15:13:07 +02:00
|
|
|
db.key_value().write(batch).unwrap();
|
2016-10-18 16:43:42 +02:00
|
|
|
|
|
|
|
assert!(tracedb.traces(&block_0).is_some(), "Traces should be available even if block is non-canon.");
|
|
|
|
}
|
|
|
|
|
2016-04-30 17:41:24 +02:00
|
|
|
#[test]
|
|
|
|
fn test_import() {
|
2017-02-20 17:21:55 +01:00
|
|
|
let db = new_db();
|
2016-04-30 17:41:24 +02:00
|
|
|
let mut config = Config::default();
|
2016-09-26 19:21:25 +02:00
|
|
|
config.enabled = true;
|
2016-11-24 15:05:52 +01:00
|
|
|
let block_1 = H256::from(0xa1);
|
|
|
|
let block_2 = H256::from(0xa2);
|
|
|
|
let tx_1 = H256::from(0xff);
|
|
|
|
let tx_2 = H256::from(0xaf);
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
let mut extras = Extras::default();
|
2016-11-24 15:05:52 +01:00
|
|
|
extras.block_hashes.insert(0, H256::default());
|
|
|
|
|
2016-04-30 17:41:24 +02:00
|
|
|
extras.block_hashes.insert(1, block_1.clone());
|
2016-11-24 15:05:52 +01:00
|
|
|
extras.block_hashes.insert(2, block_2.clone());
|
2016-04-30 17:41:24 +02:00
|
|
|
extras.transaction_hashes.insert(1, vec![tx_1.clone()]);
|
2016-11-24 15:05:52 +01:00
|
|
|
extras.transaction_hashes.insert(2, vec![tx_2.clone()]);
|
2016-04-30 17:41:24 +02:00
|
|
|
|
2016-09-26 19:21:25 +02:00
|
|
|
let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras));
|
2016-04-30 17:41:24 +02:00
|
|
|
|
2016-11-24 15:05:52 +01:00
|
|
|
// import block 1
|
|
|
|
let request = create_simple_import_request(1, block_1.clone());
|
2017-02-20 17:21:55 +01:00
|
|
|
let mut batch = DBTransaction::new();
|
2016-08-25 16:43:56 +02:00
|
|
|
tracedb.import(&mut batch, request);
|
2018-06-20 15:13:07 +02:00
|
|
|
db.key_value().write(batch).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
let filter = Filter {
|
2016-11-24 15:05:52 +01:00
|
|
|
range: (1..1),
|
2016-04-30 17:41:24 +02:00
|
|
|
from_address: AddressesFilter::from(vec![Address::from(1)]),
|
|
|
|
to_address: AddressesFilter::from(vec![]),
|
|
|
|
};
|
|
|
|
|
|
|
|
let traces = tracedb.filter(&filter);
|
|
|
|
assert_eq!(traces.len(), 1);
|
2016-11-24 15:05:52 +01:00
|
|
|
assert_eq!(traces[0], create_simple_localized_trace(1, block_1.clone(), tx_1.clone()));
|
2016-04-30 17:41:24 +02:00
|
|
|
|
2016-11-24 15:05:52 +01:00
|
|
|
// import block 2
|
|
|
|
let request = create_simple_import_request(2, block_2.clone());
|
2017-02-20 17:21:55 +01:00
|
|
|
let mut batch = DBTransaction::new();
|
2016-08-25 16:43:56 +02:00
|
|
|
tracedb.import(&mut batch, request);
|
2018-06-20 15:13:07 +02:00
|
|
|
db.key_value().write(batch).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
let filter = Filter {
|
2016-11-24 15:05:52 +01:00
|
|
|
range: (1..2),
|
2016-04-30 17:41:24 +02:00
|
|
|
from_address: AddressesFilter::from(vec![Address::from(1)]),
|
|
|
|
to_address: AddressesFilter::from(vec![]),
|
|
|
|
};
|
|
|
|
|
|
|
|
let traces = tracedb.filter(&filter);
|
|
|
|
assert_eq!(traces.len(), 2);
|
2016-11-24 15:05:52 +01:00
|
|
|
assert_eq!(traces[0], create_simple_localized_trace(1, block_1.clone(), tx_1.clone()));
|
|
|
|
assert_eq!(traces[1], create_simple_localized_trace(2, block_2.clone(), tx_2.clone()));
|
2016-04-30 17:41:24 +02:00
|
|
|
|
2016-11-24 15:05:52 +01:00
|
|
|
assert!(tracedb.block_traces(0).is_some(), "Genesis trace should be always present.");
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
let traces = tracedb.block_traces(1).unwrap();
|
|
|
|
assert_eq!(traces.len(), 1);
|
|
|
|
assert_eq!(traces[0], create_simple_localized_trace(1, block_1.clone(), tx_1.clone()));
|
|
|
|
|
2016-11-24 15:05:52 +01:00
|
|
|
let traces = tracedb.block_traces(2).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
assert_eq!(traces.len(), 1);
|
2016-11-24 15:05:52 +01:00
|
|
|
assert_eq!(traces[0], create_simple_localized_trace(2, block_2.clone(), tx_2.clone()));
|
|
|
|
|
|
|
|
assert_eq!(None, tracedb.block_traces(3));
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
let traces = tracedb.transaction_traces(1, 0).unwrap();
|
|
|
|
assert_eq!(traces.len(), 1);
|
|
|
|
assert_eq!(traces[0], create_simple_localized_trace(1, block_1.clone(), tx_1.clone()));
|
|
|
|
|
2016-11-24 15:05:52 +01:00
|
|
|
let traces = tracedb.transaction_traces(2, 0).unwrap();
|
|
|
|
assert_eq!(traces.len(), 1);
|
|
|
|
assert_eq!(traces[0], create_simple_localized_trace(2, block_2.clone(), tx_2.clone()));
|
|
|
|
|
|
|
|
assert_eq!(None, tracedb.transaction_traces(2, 1));
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
assert_eq!(tracedb.trace(1, 0, vec![]).unwrap(), create_simple_localized_trace(1, block_1.clone(), tx_1.clone()));
|
2016-11-24 15:05:52 +01:00
|
|
|
assert_eq!(tracedb.trace(2, 0, vec![]).unwrap(), create_simple_localized_trace(2, block_2.clone(), tx_2.clone()));
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
2016-07-31 13:47:29 +02:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn query_trace_after_reopen() {
|
2017-02-20 17:21:55 +01:00
|
|
|
let db = new_db();
|
2016-07-31 13:47:29 +02:00
|
|
|
let mut config = Config::default();
|
|
|
|
let mut extras = Extras::default();
|
|
|
|
let block_0 = H256::from(0xa1);
|
|
|
|
let tx_0 = H256::from(0xff);
|
|
|
|
|
2016-11-24 15:05:52 +01:00
|
|
|
extras.block_hashes.insert(0, H256::default());
|
|
|
|
extras.transaction_hashes.insert(0, vec![]);
|
|
|
|
extras.block_hashes.insert(1, block_0.clone());
|
|
|
|
extras.transaction_hashes.insert(1, vec![tx_0.clone()]);
|
2016-07-31 13:47:29 +02:00
|
|
|
|
|
|
|
// set tracing on
|
2016-09-26 19:21:25 +02:00
|
|
|
config.enabled = true;
|
2016-07-31 13:47:29 +02:00
|
|
|
|
|
|
|
{
|
2016-09-26 19:21:25 +02:00
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone()));
|
2016-07-31 13:47:29 +02:00
|
|
|
|
2016-11-24 15:05:52 +01:00
|
|
|
// import block 1
|
|
|
|
let request = create_simple_import_request(1, block_0.clone());
|
2017-02-20 17:21:55 +01:00
|
|
|
let mut batch = DBTransaction::new();
|
2016-08-25 16:43:56 +02:00
|
|
|
tracedb.import(&mut batch, request);
|
2018-06-20 15:13:07 +02:00
|
|
|
db.key_value().write(batch).unwrap();
|
2016-07-31 13:47:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2016-09-26 19:21:25 +02:00
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras));
|
2016-11-24 15:05:52 +01:00
|
|
|
let traces = tracedb.transaction_traces(1, 0);
|
|
|
|
assert_eq!(traces.unwrap(), vec![create_simple_localized_trace(1, block_0, tx_0)]);
|
2016-07-31 13:47:29 +02:00
|
|
|
}
|
|
|
|
}
|
2016-11-24 15:05:52 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn query_genesis() {
|
2017-02-20 17:21:55 +01:00
|
|
|
let db = new_db();
|
2016-11-24 15:05:52 +01:00
|
|
|
let mut config = Config::default();
|
|
|
|
let mut extras = Extras::default();
|
|
|
|
let block_0 = H256::from(0xa1);
|
|
|
|
|
|
|
|
extras.block_hashes.insert(0, block_0.clone());
|
|
|
|
extras.transaction_hashes.insert(0, vec![]);
|
|
|
|
|
|
|
|
// set tracing on
|
|
|
|
config.enabled = true;
|
|
|
|
|
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone()));
|
|
|
|
let traces = tracedb.block_traces(0).unwrap();
|
|
|
|
|
|
|
|
assert_eq!(traces.len(), 0);
|
|
|
|
}
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|