2016-04-30 17:41:24 +02:00
|
|
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
//! Trace database.
|
2016-08-10 16:29:40 +02:00
|
|
|
use std::ops::Deref;
|
2016-04-30 17:41:24 +02:00
|
|
|
use std::collections::HashMap;
|
2016-07-13 19:59:59 +02:00
|
|
|
use std::sync::Arc;
|
2016-04-30 17:41:24 +02:00
|
|
|
use bloomchain::{Number, Config as BloomConfig};
|
|
|
|
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
|
2016-07-31 00:19:27 +02:00
|
|
|
use util::{H256, H264, Database, DBTransaction, RwLock, HeapSizeOf};
|
2016-04-30 17:41:24 +02:00
|
|
|
use header::BlockNumber;
|
2016-07-25 10:21:02 +02:00
|
|
|
use trace::{LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras, Error};
|
2016-08-18 18:24:49 +02:00
|
|
|
use db::{self, Key, Writable, Readable, CacheUpdatePolicy};
|
2016-05-26 18:24:51 +02:00
|
|
|
use blooms;
|
2016-04-30 17:41:24 +02:00
|
|
|
use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
|
2016-07-31 00:19:27 +02:00
|
|
|
use cache_manager::CacheManager;
|
2016-07-13 19:59:59 +02:00
|
|
|
|
2016-04-30 17:41:24 +02:00
|
|
|
const TRACE_DB_VER: &'static [u8] = b"1.0";
|
|
|
|
|
|
|
|
#[derive(Debug, Copy, Clone)]
|
2016-05-02 13:13:12 +02:00
|
|
|
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
2016-04-30 17:41:24 +02:00
|
|
|
enum TraceDBIndex {
|
|
|
|
/// Block traces index.
|
|
|
|
BlockTraces = 0,
|
|
|
|
/// Trace bloom group index.
|
2016-05-26 18:24:51 +02:00
|
|
|
BloomGroups = 1,
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
|
2016-07-24 00:20:21 +02:00
|
|
|
impl Key<FlatBlockTraces> for H256 {
|
2016-04-30 17:41:24 +02:00
|
|
|
type Target = H264;
|
|
|
|
|
|
|
|
fn key(&self) -> H264 {
|
|
|
|
let mut result = H264::default();
|
|
|
|
result[0] = TraceDBIndex::BlockTraces as u8;
|
2016-07-13 21:10:20 +02:00
|
|
|
result[1..33].copy_from_slice(self);
|
2016-04-30 17:41:24 +02:00
|
|
|
result
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-27 17:56:25 +02:00
|
|
|
/// Wrapper around `blooms::GroupPosition` so it could be
|
2016-05-26 18:24:51 +02:00
|
|
|
/// uniquely identified in the database.
|
|
|
|
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
|
|
|
|
struct TraceGroupPosition(blooms::GroupPosition);
|
|
|
|
|
|
|
|
impl From<GroupPosition> for TraceGroupPosition {
|
|
|
|
fn from(position: GroupPosition) -> Self {
|
|
|
|
TraceGroupPosition(From::from(position))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-31 00:19:27 +02:00
|
|
|
impl HeapSizeOf for TraceGroupPosition {
|
|
|
|
fn heap_size_of_children(&self) -> usize {
|
|
|
|
0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-30 17:41:24 +02:00
|
|
|
/// Helper data structure created cause [u8; 6] does not implement Deref to &[u8].
|
|
|
|
pub struct TraceGroupKey([u8; 6]);
|
|
|
|
|
|
|
|
impl Deref for TraceGroupKey {
|
|
|
|
type Target = [u8];
|
|
|
|
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
&self.0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
impl Key<blooms::BloomGroup> for TraceGroupPosition {
|
2016-04-30 17:41:24 +02:00
|
|
|
type Target = TraceGroupKey;
|
|
|
|
|
|
|
|
fn key(&self) -> Self::Target {
|
|
|
|
let mut result = [0u8; 6];
|
2016-05-26 18:24:51 +02:00
|
|
|
result[0] = TraceDBIndex::BloomGroups as u8;
|
|
|
|
result[1] = self.0.level;
|
|
|
|
result[2] = self.0.index as u8;
|
2016-07-13 21:10:20 +02:00
|
|
|
result[3] = (self.0.index >> 8) as u8;
|
|
|
|
result[4] = (self.0.index >> 16) as u8;
|
|
|
|
result[5] = (self.0.index >> 24) as u8;
|
2016-04-30 17:41:24 +02:00
|
|
|
TraceGroupKey(result)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-31 00:19:27 +02:00
|
|
|
#[derive(Debug, Hash, Eq, PartialEq)]
|
|
|
|
enum CacheID {
|
|
|
|
Trace(H256),
|
|
|
|
Bloom(TraceGroupPosition),
|
|
|
|
}
|
|
|
|
|
2016-04-30 17:41:24 +02:00
|
|
|
/// Trace database.
|
|
|
|
pub struct TraceDB<T> where T: DatabaseExtras {
|
|
|
|
// cache
|
2016-07-24 00:20:21 +02:00
|
|
|
traces: RwLock<HashMap<H256, FlatBlockTraces>>,
|
2016-05-26 18:24:51 +02:00
|
|
|
blooms: RwLock<HashMap<TraceGroupPosition, blooms::BloomGroup>>,
|
2016-07-31 00:19:27 +02:00
|
|
|
cache_manager: RwLock<CacheManager<CacheID>>,
|
2016-04-30 17:41:24 +02:00
|
|
|
// db
|
2016-07-28 23:46:24 +02:00
|
|
|
tracesdb: Arc<Database>,
|
2016-04-30 17:41:24 +02:00
|
|
|
// config,
|
|
|
|
bloom_config: BloomConfig,
|
|
|
|
// tracing enabled
|
|
|
|
enabled: bool,
|
|
|
|
// extras
|
|
|
|
extras: Arc<T>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> BloomGroupDatabase for TraceDB<T> where T: DatabaseExtras {
|
|
|
|
fn blooms_at(&self, position: &GroupPosition) -> Option<BloomGroup> {
|
|
|
|
let position = TraceGroupPosition::from(position.clone());
|
2016-08-18 18:24:49 +02:00
|
|
|
let result = self.tracesdb.read_with_cache(db::COL_TRACE, &self.blooms, &position).map(Into::into);
|
2016-08-08 16:14:37 +02:00
|
|
|
self.note_used(CacheID::Bloom(position));
|
|
|
|
result
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> TraceDB<T> where T: DatabaseExtras {
|
|
|
|
/// Creates new instance of `TraceDB`.
|
2016-07-28 23:46:24 +02:00
|
|
|
pub fn new(config: Config, tracesdb: Arc<Database>, extras: Arc<T>) -> Result<Self, Error> {
|
2016-04-30 17:41:24 +02:00
|
|
|
// check if in previously tracing was enabled
|
2016-08-18 18:24:49 +02:00
|
|
|
let old_tracing = match tracesdb.get(db::COL_TRACE, b"enabled").unwrap() {
|
2016-04-30 17:41:24 +02:00
|
|
|
Some(ref value) if value as &[u8] == &[0x1] => Switch::On,
|
|
|
|
Some(ref value) if value as &[u8] == &[0x0] => Switch::Off,
|
|
|
|
Some(_) => { panic!("tracesdb is corrupted") },
|
|
|
|
None => Switch::Auto,
|
|
|
|
};
|
|
|
|
|
2016-05-18 11:34:15 +02:00
|
|
|
let enabled = try!(old_tracing.turn_to(config.enabled));
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
let encoded_tracing = match enabled {
|
|
|
|
true => [0x1],
|
|
|
|
false => [0x0]
|
|
|
|
};
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
let batch = DBTransaction::new(&tracesdb);
|
2016-08-18 18:24:49 +02:00
|
|
|
batch.put(db::COL_TRACE, b"enabled", &encoded_tracing);
|
|
|
|
batch.put(db::COL_TRACE, b"version", TRACE_DB_VER);
|
2016-07-28 23:46:24 +02:00
|
|
|
tracesdb.write(batch).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
|
2016-05-18 11:34:15 +02:00
|
|
|
let db = TraceDB {
|
2016-04-30 17:41:24 +02:00
|
|
|
traces: RwLock::new(HashMap::new()),
|
|
|
|
blooms: RwLock::new(HashMap::new()),
|
2016-07-31 00:19:27 +02:00
|
|
|
cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)),
|
2016-04-30 17:41:24 +02:00
|
|
|
tracesdb: tracesdb,
|
|
|
|
bloom_config: config.blooms,
|
|
|
|
enabled: enabled,
|
|
|
|
extras: extras,
|
2016-05-18 11:34:15 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
Ok(db)
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
|
2016-07-31 00:19:27 +02:00
|
|
|
fn cache_size(&self) -> usize {
|
|
|
|
let traces = self.traces.read().heap_size_of_children();
|
|
|
|
let blooms = self.blooms.read().heap_size_of_children();
|
|
|
|
traces + blooms
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Let the cache system know that a cacheable item has been used.
|
|
|
|
fn note_used(&self, id: CacheID) {
|
|
|
|
let mut cache_manager = self.cache_manager.write();
|
|
|
|
cache_manager.note_used(id);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Ticks our cache system and throws out any old data.
|
|
|
|
pub fn collect_garbage(&self) {
|
2016-08-08 16:14:37 +02:00
|
|
|
let current_size = self.cache_size();
|
|
|
|
|
|
|
|
let mut traces = self.traces.write();
|
|
|
|
let mut blooms = self.blooms.write();
|
2016-07-31 00:19:27 +02:00
|
|
|
let mut cache_manager = self.cache_manager.write();
|
|
|
|
|
2016-08-08 16:14:37 +02:00
|
|
|
cache_manager.collect_garbage(current_size, | ids | {
|
2016-07-31 00:19:27 +02:00
|
|
|
for id in &ids {
|
|
|
|
match *id {
|
|
|
|
CacheID::Trace(ref h) => { traces.remove(h); },
|
|
|
|
CacheID::Bloom(ref h) => { blooms.remove(h); },
|
|
|
|
}
|
|
|
|
}
|
|
|
|
traces.shrink_to_fit();
|
|
|
|
blooms.shrink_to_fit();
|
2016-08-08 16:14:37 +02:00
|
|
|
|
|
|
|
traces.heap_size_of_children() + blooms.heap_size_of_children()
|
2016-07-31 00:19:27 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2016-04-30 17:41:24 +02:00
|
|
|
/// Returns traces for block with hash.
|
2016-07-24 00:20:21 +02:00
|
|
|
fn traces(&self, block_hash: &H256) -> Option<FlatBlockTraces> {
|
2016-08-18 18:24:49 +02:00
|
|
|
let result = self.tracesdb.read_with_cache(db::COL_TRACE, &self.traces, block_hash);
|
2016-07-31 00:19:27 +02:00
|
|
|
self.note_used(CacheID::Trace(block_hash.clone()));
|
2016-08-08 16:14:37 +02:00
|
|
|
result
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns vector of transaction traces for given block.
|
|
|
|
fn transactions_traces(&self, block_hash: &H256) -> Option<Vec<FlatTransactionTraces>> {
|
2016-07-24 00:20:21 +02:00
|
|
|
self.traces(block_hash).map(Into::into)
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fn matching_block_traces(
|
|
|
|
&self,
|
|
|
|
filter: &Filter,
|
|
|
|
traces: FlatBlockTraces,
|
|
|
|
block_hash: H256,
|
|
|
|
block_number: BlockNumber
|
|
|
|
) -> Vec<LocalizedTrace> {
|
|
|
|
let tx_traces: Vec<FlatTransactionTraces> = traces.into();
|
|
|
|
tx_traces.into_iter()
|
|
|
|
.enumerate()
|
|
|
|
.flat_map(|(tx_number, tx_trace)| {
|
|
|
|
self.matching_transaction_traces(filter, tx_trace, block_hash.clone(), block_number, tx_number)
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn matching_transaction_traces(
|
|
|
|
&self,
|
|
|
|
filter: &Filter,
|
|
|
|
traces: FlatTransactionTraces,
|
|
|
|
block_hash: H256,
|
|
|
|
block_number: BlockNumber,
|
|
|
|
tx_number: usize
|
|
|
|
) -> Vec<LocalizedTrace> {
|
|
|
|
let tx_hash = self.extras.transaction_hash(block_number, tx_number)
|
|
|
|
.expect("Expected to find transaction hash. Database is probably corrupted");
|
|
|
|
|
|
|
|
let flat_traces: Vec<FlatTrace> = traces.into();
|
|
|
|
flat_traces.into_iter()
|
|
|
|
.filter_map(|trace| {
|
|
|
|
match filter.matches(&trace) {
|
|
|
|
true => Some(LocalizedTrace {
|
|
|
|
action: trace.action,
|
|
|
|
result: trace.result,
|
|
|
|
subtraces: trace.subtraces,
|
2016-07-28 20:31:29 +02:00
|
|
|
trace_address: trace.trace_address.into_iter().collect(),
|
2016-04-30 17:41:24 +02:00
|
|
|
transaction_number: tx_number,
|
|
|
|
transaction_hash: tx_hash.clone(),
|
|
|
|
block_number: block_number,
|
|
|
|
block_hash: block_hash
|
|
|
|
}),
|
|
|
|
false => None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
|
|
|
|
fn tracing_enabled(&self) -> bool {
|
|
|
|
self.enabled
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Traces of import request's enacted blocks are expected to be already in database
|
|
|
|
/// or to be the currently inserted trace.
|
2016-07-28 23:46:24 +02:00
|
|
|
fn import(&self, batch: &DBTransaction, request: ImportRequest) {
|
2016-08-24 18:35:53 +02:00
|
|
|
// valid (canon): retracted 0, enacted 1 => false, true,
|
|
|
|
// valid (branch): retracted 0, enacted 0 => false, false,
|
|
|
|
// valid (bbcc): retracted 1, enacted 1 => true, true,
|
|
|
|
// invalid: retracted 1, enacted 0 => true, false,
|
|
|
|
let ret = request.retracted != 0;
|
|
|
|
let ena = !request.enacted.is_empty();
|
|
|
|
assert!(!(ret && !ena));
|
2016-04-30 17:41:24 +02:00
|
|
|
// fast return if tracing is disabled
|
|
|
|
if !self.tracing_enabled() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// at first, let's insert new block traces
|
|
|
|
{
|
2016-07-13 19:59:59 +02:00
|
|
|
let mut traces = self.traces.write();
|
2016-04-30 17:41:24 +02:00
|
|
|
// it's important to use overwrite here,
|
|
|
|
// cause this value might be queried by hash later
|
2016-08-18 18:24:49 +02:00
|
|
|
batch.write_with_cache(db::COL_TRACE, &mut *traces, request.block_hash, request.traces, CacheUpdatePolicy::Overwrite);
|
2016-08-08 16:14:37 +02:00
|
|
|
// note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection
|
|
|
|
self.note_used(CacheID::Trace(request.block_hash.clone()));
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// now let's rebuild the blooms
|
2016-08-24 18:35:53 +02:00
|
|
|
if !request.enacted.is_empty() {
|
2016-04-30 17:41:24 +02:00
|
|
|
let range_start = request.block_number as Number + 1 - request.enacted.len();
|
|
|
|
let range_end = range_start + request.retracted;
|
|
|
|
let replaced_range = range_start..range_end;
|
|
|
|
let enacted_blooms = request.enacted
|
|
|
|
.iter()
|
|
|
|
// all traces are expected to be found here. That's why `expect` has been used
|
|
|
|
// instead of `filter_map`. If some traces haven't been found, it meens that
|
|
|
|
// traces database is corrupted or incomplete.
|
|
|
|
.map(|block_hash| self.traces(block_hash).expect("Traces database is incomplete."))
|
|
|
|
.map(|block_traces| block_traces.bloom())
|
2016-05-26 18:24:51 +02:00
|
|
|
.map(blooms::Bloom::from)
|
2016-04-30 17:41:24 +02:00
|
|
|
.map(Into::into)
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let chain = BloomGroupChain::new(self.bloom_config, self);
|
|
|
|
let trace_blooms = chain.replace(&replaced_range, enacted_blooms);
|
|
|
|
let blooms_to_insert = trace_blooms.into_iter()
|
|
|
|
.map(|p| (From::from(p.0), From::from(p.1)))
|
2016-05-26 18:24:51 +02:00
|
|
|
.collect::<HashMap<TraceGroupPosition, blooms::BloomGroup>>();
|
2016-04-30 17:41:24 +02:00
|
|
|
|
2016-08-08 16:14:37 +02:00
|
|
|
let blooms_keys: Vec<_> = blooms_to_insert.keys().cloned().collect();
|
2016-08-04 08:56:28 +02:00
|
|
|
let mut blooms = self.blooms.write();
|
2016-08-18 18:24:49 +02:00
|
|
|
batch.extend_with_cache(db::COL_TRACE, &mut *blooms, blooms_to_insert, CacheUpdatePolicy::Remove);
|
2016-08-08 16:14:37 +02:00
|
|
|
// note_used must be called after locking blooms to avoid cache/traces deadlock on garbage collection
|
|
|
|
for key in blooms_keys.into_iter() {
|
|
|
|
self.note_used(CacheID::Bloom(key));
|
|
|
|
}
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec<usize>) -> Option<LocalizedTrace> {
|
2016-07-28 20:31:29 +02:00
|
|
|
let trace_position_deq = trace_position.into_iter().collect();
|
2016-04-30 17:41:24 +02:00
|
|
|
self.extras.block_hash(block_number)
|
|
|
|
.and_then(|block_hash| self.transactions_traces(&block_hash)
|
|
|
|
.and_then(|traces| traces.into_iter().nth(tx_position))
|
|
|
|
.map(Into::<Vec<FlatTrace>>::into)
|
|
|
|
// this may and should be optimized
|
2016-07-28 20:31:29 +02:00
|
|
|
.and_then(|traces| traces.into_iter().find(|trace| trace.trace_address == trace_position_deq))
|
2016-04-30 17:41:24 +02:00
|
|
|
.map(|trace| {
|
|
|
|
let tx_hash = self.extras.transaction_hash(block_number, tx_position)
|
|
|
|
.expect("Expected to find transaction hash. Database is probably corrupted");
|
|
|
|
|
|
|
|
LocalizedTrace {
|
|
|
|
action: trace.action,
|
|
|
|
result: trace.result,
|
|
|
|
subtraces: trace.subtraces,
|
2016-07-28 20:31:29 +02:00
|
|
|
trace_address: trace.trace_address.into_iter().collect(),
|
2016-04-30 17:41:24 +02:00
|
|
|
transaction_number: tx_position,
|
|
|
|
transaction_hash: tx_hash,
|
|
|
|
block_number: block_number,
|
|
|
|
block_hash: block_hash,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn transaction_traces(&self, block_number: BlockNumber, tx_position: usize) -> Option<Vec<LocalizedTrace>> {
|
|
|
|
self.extras.block_hash(block_number)
|
|
|
|
.and_then(|block_hash| self.transactions_traces(&block_hash)
|
|
|
|
.and_then(|traces| traces.into_iter().nth(tx_position))
|
|
|
|
.map(Into::<Vec<FlatTrace>>::into)
|
|
|
|
.map(|traces| {
|
|
|
|
let tx_hash = self.extras.transaction_hash(block_number, tx_position)
|
|
|
|
.expect("Expected to find transaction hash. Database is probably corrupted");
|
|
|
|
|
|
|
|
traces.into_iter()
|
|
|
|
.map(|trace| LocalizedTrace {
|
|
|
|
action: trace.action,
|
|
|
|
result: trace.result,
|
|
|
|
subtraces: trace.subtraces,
|
2016-07-28 20:31:29 +02:00
|
|
|
trace_address: trace.trace_address.into_iter().collect(),
|
2016-04-30 17:41:24 +02:00
|
|
|
transaction_number: tx_position,
|
|
|
|
transaction_hash: tx_hash.clone(),
|
|
|
|
block_number: block_number,
|
|
|
|
block_hash: block_hash
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
})
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn block_traces(&self, block_number: BlockNumber) -> Option<Vec<LocalizedTrace>> {
|
|
|
|
self.extras.block_hash(block_number)
|
|
|
|
.and_then(|block_hash| self.transactions_traces(&block_hash)
|
|
|
|
.map(|traces| {
|
|
|
|
traces.into_iter()
|
|
|
|
.map(Into::<Vec<FlatTrace>>::into)
|
|
|
|
.enumerate()
|
|
|
|
.flat_map(|(tx_position, traces)| {
|
|
|
|
let tx_hash = self.extras.transaction_hash(block_number, tx_position)
|
|
|
|
.expect("Expected to find transaction hash. Database is probably corrupted");
|
|
|
|
|
|
|
|
traces.into_iter()
|
|
|
|
.map(|trace| LocalizedTrace {
|
|
|
|
action: trace.action,
|
|
|
|
result: trace.result,
|
|
|
|
subtraces: trace.subtraces,
|
2016-07-28 20:31:29 +02:00
|
|
|
trace_address: trace.trace_address.into_iter().collect(),
|
2016-04-30 17:41:24 +02:00
|
|
|
transaction_number: tx_position,
|
|
|
|
transaction_hash: tx_hash.clone(),
|
|
|
|
block_number: block_number,
|
|
|
|
block_hash: block_hash,
|
|
|
|
})
|
|
|
|
.collect::<Vec<LocalizedTrace>>()
|
|
|
|
})
|
|
|
|
.collect::<Vec<LocalizedTrace>>()
|
|
|
|
})
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn filter(&self, filter: &Filter) -> Vec<LocalizedTrace> {
|
|
|
|
let chain = BloomGroupChain::new(self.bloom_config, self);
|
|
|
|
let numbers = chain.filter(filter);
|
|
|
|
numbers.into_iter()
|
|
|
|
.flat_map(|n| {
|
|
|
|
let number = n as BlockNumber;
|
|
|
|
let hash = self.extras.block_hash(number)
|
|
|
|
.expect("Expected to find block hash. Extras db is probably corrupted");
|
|
|
|
let traces = self.traces(&hash)
|
|
|
|
.expect("Expected to find a trace. Db is probably corrupted.");
|
2016-07-24 00:20:21 +02:00
|
|
|
self.matching_block_traces(filter, traces, hash, number)
|
2016-04-30 17:41:24 +02:00
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use std::collections::HashMap;
|
|
|
|
use std::sync::Arc;
|
2016-07-28 23:46:24 +02:00
|
|
|
use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction};
|
2016-04-30 17:41:24 +02:00
|
|
|
use devtools::RandomTempPath;
|
|
|
|
use header::BlockNumber;
|
2016-07-28 23:46:24 +02:00
|
|
|
use trace::{Config, Switch, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest};
|
2016-07-28 20:31:29 +02:00
|
|
|
use trace::{Filter, LocalizedTrace, AddressesFilter};
|
2016-04-30 17:41:24 +02:00
|
|
|
use trace::trace::{Call, Action, Res};
|
2016-07-28 20:31:29 +02:00
|
|
|
use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
|
2016-07-27 17:41:21 +02:00
|
|
|
use types::executed::CallType;
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
struct NoopExtras;
|
|
|
|
|
|
|
|
impl DatabaseExtras for NoopExtras {
|
|
|
|
fn block_hash(&self, _block_number: BlockNumber) -> Option<H256> {
|
|
|
|
unimplemented!();
|
|
|
|
}
|
|
|
|
|
|
|
|
fn transaction_hash(&self, _block_number: BlockNumber, _tx_position: usize) -> Option<H256> {
|
|
|
|
unimplemented!();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-31 13:47:29 +02:00
|
|
|
#[derive(Clone)]
|
2016-04-30 17:41:24 +02:00
|
|
|
struct Extras {
|
|
|
|
block_hashes: HashMap<BlockNumber, H256>,
|
|
|
|
transaction_hashes: HashMap<BlockNumber, Vec<H256>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for Extras {
|
|
|
|
fn default() -> Self {
|
|
|
|
Extras {
|
|
|
|
block_hashes: HashMap::new(),
|
|
|
|
transaction_hashes: HashMap::new(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl DatabaseExtras for Extras {
|
|
|
|
fn block_hash(&self, block_number: BlockNumber) -> Option<H256> {
|
|
|
|
self.block_hashes.get(&block_number).cloned()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn transaction_hash(&self, block_number: BlockNumber, tx_position: usize) -> Option<H256> {
|
|
|
|
self.transaction_hashes.get(&block_number)
|
|
|
|
.and_then(|hashes| hashes.iter().cloned().nth(tx_position))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
fn new_db(path: &str) -> Arc<Database> {
|
2016-08-18 18:24:49 +02:00
|
|
|
Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), path).unwrap())
|
2016-07-28 23:46:24 +02:00
|
|
|
}
|
|
|
|
|
2016-04-30 17:41:24 +02:00
|
|
|
#[test]
|
|
|
|
fn test_reopening_db_with_tracing_off() {
|
|
|
|
let temp = RandomTempPath::new();
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
2016-04-30 17:41:24 +02:00
|
|
|
let mut config = Config::default();
|
|
|
|
|
|
|
|
// set autotracing
|
|
|
|
config.enabled = Switch::Auto;
|
|
|
|
|
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
assert_eq!(tracedb.tracing_enabled(), false);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
assert_eq!(tracedb.tracing_enabled(), false);
|
|
|
|
}
|
|
|
|
|
|
|
|
config.enabled = Switch::Off;
|
|
|
|
|
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
assert_eq!(tracedb.tracing_enabled(), false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_reopening_db_with_tracing_on() {
|
|
|
|
let temp = RandomTempPath::new();
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
2016-04-30 17:41:24 +02:00
|
|
|
let mut config = Config::default();
|
|
|
|
|
|
|
|
// set tracing on
|
|
|
|
config.enabled = Switch::On;
|
|
|
|
|
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
assert_eq!(tracedb.tracing_enabled(), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
assert_eq!(tracedb.tracing_enabled(), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
config.enabled = Switch::Auto;
|
|
|
|
|
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
assert_eq!(tracedb.tracing_enabled(), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
config.enabled = Switch::Off;
|
|
|
|
|
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
assert_eq!(tracedb.tracing_enabled(), false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[should_panic]
|
|
|
|
fn test_invalid_reopening_db() {
|
|
|
|
let temp = RandomTempPath::new();
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
2016-04-30 17:41:24 +02:00
|
|
|
let mut config = Config::default();
|
|
|
|
|
|
|
|
// set tracing on
|
|
|
|
config.enabled = Switch::Off;
|
|
|
|
|
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
assert_eq!(tracedb.tracing_enabled(), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
config.enabled = Switch::On;
|
2016-07-28 23:46:24 +02:00
|
|
|
TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); // should panic!
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest {
|
|
|
|
ImportRequest {
|
2016-07-28 20:31:29 +02:00
|
|
|
traces: FlatBlockTraces::from(vec![FlatTransactionTraces::from(vec![FlatTrace {
|
|
|
|
trace_address: Default::default(),
|
|
|
|
subtraces: 0,
|
2016-04-30 17:41:24 +02:00
|
|
|
action: Action::Call(Call {
|
2016-07-28 20:31:29 +02:00
|
|
|
from: 1.into(),
|
|
|
|
to: 2.into(),
|
|
|
|
value: 3.into(),
|
|
|
|
gas: 4.into(),
|
2016-04-30 17:41:24 +02:00
|
|
|
input: vec![],
|
2016-07-27 17:41:21 +02:00
|
|
|
call_type: CallType::Call,
|
2016-04-30 17:41:24 +02:00
|
|
|
}),
|
|
|
|
result: Res::FailedCall,
|
2016-07-28 20:31:29 +02:00
|
|
|
}])]),
|
2016-04-30 17:41:24 +02:00
|
|
|
block_hash: block_hash.clone(),
|
|
|
|
block_number: block_number,
|
|
|
|
enacted: vec![block_hash],
|
|
|
|
retracted: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn create_simple_localized_trace(block_number: BlockNumber, block_hash: H256, tx_hash: H256) -> LocalizedTrace {
|
|
|
|
LocalizedTrace {
|
|
|
|
action: Action::Call(Call {
|
|
|
|
from: Address::from(1),
|
|
|
|
to: Address::from(2),
|
|
|
|
value: U256::from(3),
|
|
|
|
gas: U256::from(4),
|
|
|
|
input: vec![],
|
2016-07-27 17:41:21 +02:00
|
|
|
call_type: CallType::Call,
|
2016-04-30 17:41:24 +02:00
|
|
|
}),
|
|
|
|
result: Res::FailedCall,
|
|
|
|
trace_address: vec![],
|
|
|
|
subtraces: 0,
|
|
|
|
transaction_number: 0,
|
|
|
|
transaction_hash: tx_hash,
|
|
|
|
block_number: block_number,
|
|
|
|
block_hash: block_hash,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_import() {
|
|
|
|
let temp = RandomTempPath::new();
|
2016-08-18 18:24:49 +02:00
|
|
|
let db = Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), temp.as_str()).unwrap());
|
2016-04-30 17:41:24 +02:00
|
|
|
let mut config = Config::default();
|
|
|
|
config.enabled = Switch::On;
|
|
|
|
let block_0 = H256::from(0xa1);
|
|
|
|
let block_1 = H256::from(0xa2);
|
|
|
|
let tx_0 = H256::from(0xff);
|
|
|
|
let tx_1 = H256::from(0xaf);
|
|
|
|
|
|
|
|
let mut extras = Extras::default();
|
|
|
|
extras.block_hashes.insert(0, block_0.clone());
|
|
|
|
extras.block_hashes.insert(1, block_1.clone());
|
|
|
|
extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
|
|
|
|
extras.transaction_hashes.insert(1, vec![tx_1.clone()]);
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
// import block 0
|
|
|
|
let request = create_simple_import_request(0, block_0.clone());
|
2016-07-28 23:46:24 +02:00
|
|
|
let batch = DBTransaction::new(&db);
|
|
|
|
tracedb.import(&batch, request);
|
|
|
|
db.write(batch).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
let filter = Filter {
|
|
|
|
range: (0..0),
|
|
|
|
from_address: AddressesFilter::from(vec![Address::from(1)]),
|
|
|
|
to_address: AddressesFilter::from(vec![]),
|
|
|
|
};
|
|
|
|
|
|
|
|
let traces = tracedb.filter(&filter);
|
|
|
|
assert_eq!(traces.len(), 1);
|
|
|
|
assert_eq!(traces[0], create_simple_localized_trace(0, block_0.clone(), tx_0.clone()));
|
|
|
|
|
|
|
|
// import block 1
|
|
|
|
let request = create_simple_import_request(1, block_1.clone());
|
2016-07-28 23:46:24 +02:00
|
|
|
let batch = DBTransaction::new(&db);
|
|
|
|
tracedb.import(&batch, request);
|
|
|
|
db.write(batch).unwrap();
|
2016-04-30 17:41:24 +02:00
|
|
|
|
|
|
|
let filter = Filter {
|
|
|
|
range: (0..1),
|
|
|
|
from_address: AddressesFilter::from(vec![Address::from(1)]),
|
|
|
|
to_address: AddressesFilter::from(vec![]),
|
|
|
|
};
|
|
|
|
|
|
|
|
let traces = tracedb.filter(&filter);
|
|
|
|
assert_eq!(traces.len(), 2);
|
|
|
|
assert_eq!(traces[0], create_simple_localized_trace(0, block_0.clone(), tx_0.clone()));
|
|
|
|
assert_eq!(traces[1], create_simple_localized_trace(1, block_1.clone(), tx_1.clone()));
|
|
|
|
|
|
|
|
let traces = tracedb.block_traces(0).unwrap();
|
|
|
|
assert_eq!(traces.len(), 1);
|
|
|
|
assert_eq!(traces[0], create_simple_localized_trace(0, block_0.clone(), tx_0.clone()));
|
|
|
|
|
|
|
|
let traces = tracedb.block_traces(1).unwrap();
|
|
|
|
assert_eq!(traces.len(), 1);
|
|
|
|
assert_eq!(traces[0], create_simple_localized_trace(1, block_1.clone(), tx_1.clone()));
|
|
|
|
|
|
|
|
assert_eq!(None, tracedb.block_traces(2));
|
|
|
|
|
|
|
|
let traces = tracedb.transaction_traces(0, 0).unwrap();
|
|
|
|
assert_eq!(traces.len(), 1);
|
|
|
|
assert_eq!(traces[0], create_simple_localized_trace(0, block_0.clone(), tx_0.clone()));
|
|
|
|
|
|
|
|
let traces = tracedb.transaction_traces(1, 0).unwrap();
|
|
|
|
assert_eq!(traces.len(), 1);
|
|
|
|
assert_eq!(traces[0], create_simple_localized_trace(1, block_1.clone(), tx_1.clone()));
|
|
|
|
|
|
|
|
assert_eq!(None, tracedb.transaction_traces(1, 1));
|
|
|
|
|
|
|
|
assert_eq!(tracedb.trace(0, 0, vec![]).unwrap(), create_simple_localized_trace(0, block_0.clone(), tx_0.clone()));
|
|
|
|
assert_eq!(tracedb.trace(1, 0, vec![]).unwrap(), create_simple_localized_trace(1, block_1.clone(), tx_1.clone()));
|
|
|
|
}
|
2016-07-31 13:47:29 +02:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn query_trace_after_reopen() {
|
|
|
|
let temp = RandomTempPath::new();
|
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let mut config = Config::default();
|
|
|
|
let mut extras = Extras::default();
|
|
|
|
let block_0 = H256::from(0xa1);
|
|
|
|
let tx_0 = H256::from(0xff);
|
|
|
|
|
|
|
|
extras.block_hashes.insert(0, block_0.clone());
|
|
|
|
extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
|
|
|
|
|
|
|
|
// set tracing on
|
|
|
|
config.enabled = Switch::On;
|
|
|
|
|
|
|
|
{
|
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())).unwrap();
|
|
|
|
|
|
|
|
// import block 0
|
|
|
|
let request = create_simple_import_request(0, block_0.clone());
|
|
|
|
let batch = DBTransaction::new(&db);
|
|
|
|
tracedb.import(&batch, request);
|
|
|
|
db.write(batch).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras)).unwrap();
|
|
|
|
let traces = tracedb.transaction_traces(0, 0);
|
|
|
|
assert_eq!(traces.unwrap(), vec![create_simple_localized_trace(0, block_0, tx_0)]);
|
|
|
|
}
|
|
|
|
}
|
2016-04-30 17:41:24 +02:00
|
|
|
}
|