Fix deprectation warnings on nightly (#10746)
* Run `cargo fix` on ethcore * Add note about funky import needed-but-not-needed * Fix benches * cargo fix blockchain
This commit is contained in:
parent
78a1d8b7b4
commit
dbdb57a8c0
@ -29,7 +29,7 @@ use criterion::{Criterion, Bencher};
|
|||||||
use bytes::BytesRef;
|
use bytes::BytesRef;
|
||||||
use ethcore::builtin::Builtin;
|
use ethcore::builtin::Builtin;
|
||||||
use ethcore::machine::EthereumMachine;
|
use ethcore::machine::EthereumMachine;
|
||||||
use ethereum_types::U256;
|
use ethereum_types::H160;
|
||||||
use ethcore::ethereum::new_byzantium_test_machine;
|
use ethcore::ethereum::new_byzantium_test_machine;
|
||||||
use rustc_hex::FromHex;
|
use rustc_hex::FromHex;
|
||||||
|
|
||||||
@ -46,8 +46,9 @@ struct BuiltinBenchmark<'a> {
|
|||||||
impl<'a> BuiltinBenchmark<'a> {
|
impl<'a> BuiltinBenchmark<'a> {
|
||||||
fn new(builtin_address: &'static str, input: &str, expected: &str) -> BuiltinBenchmark<'a> {
|
fn new(builtin_address: &'static str, input: &str, expected: &str) -> BuiltinBenchmark<'a> {
|
||||||
let builtins = BYZANTIUM_MACHINE.builtins();
|
let builtins = BYZANTIUM_MACHINE.builtins();
|
||||||
|
use std::str::FromStr;
|
||||||
let builtin = builtins.get(&builtin_address.into()).unwrap().clone();
|
let addr = H160::from_str(builtin_address).unwrap();
|
||||||
|
let builtin = builtins.get(&addr).unwrap().clone();
|
||||||
let input = FromHex::from_hex(input).unwrap();
|
let input = FromHex::from_hex(input).unwrap();
|
||||||
let expected = FromHex::from_hex(expected).unwrap();
|
let expected = FromHex::from_hex(expected).unwrap();
|
||||||
|
|
||||||
@ -56,10 +57,6 @@ impl<'a> BuiltinBenchmark<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gas_cost(&self) -> U256 {
|
|
||||||
self.builtin.cost(&self.input)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run(&self, b: &mut Bencher) {
|
fn run(&self, b: &mut Bencher) {
|
||||||
let mut output = vec![0; self.expected.len()];
|
let mut output = vec![0; self.expected.len()];
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ use crate::{CacheSize, ImportRoute, Config};
|
|||||||
/// Database backing `BlockChain`.
|
/// Database backing `BlockChain`.
|
||||||
pub trait BlockChainDB: Send + Sync {
|
pub trait BlockChainDB: Send + Sync {
|
||||||
/// Generic key value store.
|
/// Generic key value store.
|
||||||
fn key_value(&self) -> &Arc<KeyValueDB>;
|
fn key_value(&self) -> &Arc<dyn KeyValueDB>;
|
||||||
|
|
||||||
/// Header blooms database.
|
/// Header blooms database.
|
||||||
fn blooms(&self) -> &blooms_db::Database;
|
fn blooms(&self) -> &blooms_db::Database;
|
||||||
@ -85,7 +85,7 @@ pub trait BlockChainDB: Send + Sync {
|
|||||||
/// predefined config.
|
/// predefined config.
|
||||||
pub trait BlockChainDBHandler: Send + Sync {
|
pub trait BlockChainDBHandler: Send + Sync {
|
||||||
/// Open the predefined key-value database.
|
/// Open the predefined key-value database.
|
||||||
fn open(&self, path: &Path) -> io::Result<Arc<BlockChainDB>>;
|
fn open(&self, path: &Path) -> io::Result<Arc<dyn BlockChainDB>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Interface for querying blocks by hash and by number.
|
/// Interface for querying blocks by hash and by number.
|
||||||
@ -228,7 +228,7 @@ pub struct BlockChain {
|
|||||||
transaction_addresses: RwLock<HashMap<H256, TransactionAddress>>,
|
transaction_addresses: RwLock<HashMap<H256, TransactionAddress>>,
|
||||||
block_receipts: RwLock<HashMap<H256, BlockReceipts>>,
|
block_receipts: RwLock<HashMap<H256, BlockReceipts>>,
|
||||||
|
|
||||||
db: Arc<BlockChainDB>,
|
db: Arc<dyn BlockChainDB>,
|
||||||
|
|
||||||
cache_man: Mutex<CacheManager<CacheId>>,
|
cache_man: Mutex<CacheManager<CacheId>>,
|
||||||
|
|
||||||
@ -481,7 +481,7 @@ impl<'a> Iterator for AncestryWithMetadataIter<'a> {
|
|||||||
/// Returns epoch transitions.
|
/// Returns epoch transitions.
|
||||||
pub struct EpochTransitionIter<'a> {
|
pub struct EpochTransitionIter<'a> {
|
||||||
chain: &'a BlockChain,
|
chain: &'a BlockChain,
|
||||||
prefix_iter: Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>,
|
prefix_iter: Box<dyn Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Iterator for EpochTransitionIter<'a> {
|
impl<'a> Iterator for EpochTransitionIter<'a> {
|
||||||
@ -521,7 +521,7 @@ impl<'a> Iterator for EpochTransitionIter<'a> {
|
|||||||
|
|
||||||
impl BlockChain {
|
impl BlockChain {
|
||||||
/// Create new instance of blockchain from given Genesis.
|
/// Create new instance of blockchain from given Genesis.
|
||||||
pub fn new(config: Config, genesis: &[u8], db: Arc<BlockChainDB>) -> BlockChain {
|
pub fn new(config: Config, genesis: &[u8], db: Arc<dyn BlockChainDB>) -> BlockChain {
|
||||||
// 400 is the average size of the key
|
// 400 is the average size of the key
|
||||||
let cache_man = CacheManager::new(config.pref_cache_size, config.max_cache_size, 400);
|
let cache_man = CacheManager::new(config.pref_cache_size, config.max_cache_size, 400);
|
||||||
|
|
||||||
@ -1592,11 +1592,11 @@ mod tests {
|
|||||||
_trace_blooms_dir: TempDir,
|
_trace_blooms_dir: TempDir,
|
||||||
blooms: blooms_db::Database,
|
blooms: blooms_db::Database,
|
||||||
trace_blooms: blooms_db::Database,
|
trace_blooms: blooms_db::Database,
|
||||||
key_value: Arc<KeyValueDB>,
|
key_value: Arc<dyn KeyValueDB>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockChainDB for TestBlockChainDB {
|
impl BlockChainDB for TestBlockChainDB {
|
||||||
fn key_value(&self) -> &Arc<KeyValueDB> {
|
fn key_value(&self) -> &Arc<dyn KeyValueDB> {
|
||||||
&self.key_value
|
&self.key_value
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1610,7 +1610,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Creates new test instance of `BlockChainDB`
|
/// Creates new test instance of `BlockChainDB`
|
||||||
pub fn new_db() -> Arc<BlockChainDB> {
|
pub fn new_db() -> Arc<dyn BlockChainDB> {
|
||||||
let blooms_dir = TempDir::new("").unwrap();
|
let blooms_dir = TempDir::new("").unwrap();
|
||||||
let trace_blooms_dir = TempDir::new("").unwrap();
|
let trace_blooms_dir = TempDir::new("").unwrap();
|
||||||
|
|
||||||
@ -1625,15 +1625,15 @@ mod tests {
|
|||||||
Arc::new(db)
|
Arc::new(db)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_chain(genesis: encoded::Block, db: Arc<BlockChainDB>) -> BlockChain {
|
fn new_chain(genesis: encoded::Block, db: Arc<dyn BlockChainDB>) -> BlockChain {
|
||||||
BlockChain::new(Config::default(), genesis.raw(), db)
|
BlockChain::new(Config::default(), genesis.raw(), db)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert_block(db: &Arc<BlockChainDB>, bc: &BlockChain, block: encoded::Block, receipts: Vec<Receipt>) -> ImportRoute {
|
fn insert_block(db: &Arc<dyn BlockChainDB>, bc: &BlockChain, block: encoded::Block, receipts: Vec<Receipt>) -> ImportRoute {
|
||||||
insert_block_commit(db, bc, block, receipts, true)
|
insert_block_commit(db, bc, block, receipts, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert_block_commit(db: &Arc<BlockChainDB>, bc: &BlockChain, block: encoded::Block, receipts: Vec<Receipt>, commit: bool) -> ImportRoute {
|
fn insert_block_commit(db: &Arc<dyn BlockChainDB>, bc: &BlockChain, block: encoded::Block, receipts: Vec<Receipt>, commit: bool) -> ImportRoute {
|
||||||
let mut batch = db.key_value().transaction();
|
let mut batch = db.key_value().transaction();
|
||||||
let res = insert_block_batch(&mut batch, bc, block, receipts);
|
let res = insert_block_batch(&mut batch, bc, block, receipts);
|
||||||
db.key_value().write(batch).unwrap();
|
db.key_value().write(batch).unwrap();
|
||||||
|
@ -57,7 +57,7 @@ impl Default for Factory {
|
|||||||
impl Factory {
|
impl Factory {
|
||||||
/// Create a read-only accountdb.
|
/// Create a read-only accountdb.
|
||||||
/// This will panic when write operations are called.
|
/// This will panic when write operations are called.
|
||||||
pub fn readonly<'db>(&self, db: &'db HashDB<KeccakHasher, DBValue>, address_hash: H256) -> Box<HashDB<KeccakHasher, DBValue> + 'db> {
|
pub fn readonly<'db>(&self, db: &'db dyn HashDB<KeccakHasher, DBValue>, address_hash: H256) -> Box<dyn HashDB<KeccakHasher, DBValue> + 'db> {
|
||||||
match *self {
|
match *self {
|
||||||
Factory::Mangled => Box::new(AccountDB::from_hash(db, address_hash)),
|
Factory::Mangled => Box::new(AccountDB::from_hash(db, address_hash)),
|
||||||
Factory::Plain => Box::new(Wrapping(db)),
|
Factory::Plain => Box::new(Wrapping(db)),
|
||||||
@ -65,7 +65,7 @@ impl Factory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new mutable hashdb.
|
/// Create a new mutable hashdb.
|
||||||
pub fn create<'db>(&self, db: &'db mut HashDB<KeccakHasher, DBValue>, address_hash: H256) -> Box<HashDB<KeccakHasher, DBValue> + 'db> {
|
pub fn create<'db>(&self, db: &'db mut dyn HashDB<KeccakHasher, DBValue>, address_hash: H256) -> Box<dyn HashDB<KeccakHasher, DBValue> + 'db> {
|
||||||
match *self {
|
match *self {
|
||||||
Factory::Mangled => Box::new(AccountDBMut::from_hash(db, address_hash)),
|
Factory::Mangled => Box::new(AccountDBMut::from_hash(db, address_hash)),
|
||||||
Factory::Plain => Box::new(WrappingMut(db)),
|
Factory::Plain => Box::new(WrappingMut(db)),
|
||||||
@ -77,19 +77,19 @@ impl Factory {
|
|||||||
/// DB backend wrapper for Account trie
|
/// DB backend wrapper for Account trie
|
||||||
/// Transforms trie node keys for the database
|
/// Transforms trie node keys for the database
|
||||||
pub struct AccountDB<'db> {
|
pub struct AccountDB<'db> {
|
||||||
db: &'db HashDB<KeccakHasher, DBValue>,
|
db: &'db dyn HashDB<KeccakHasher, DBValue>,
|
||||||
address_hash: H256,
|
address_hash: H256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'db> AccountDB<'db> {
|
impl<'db> AccountDB<'db> {
|
||||||
/// Create a new AccountDB from an address.
|
/// Create a new AccountDB from an address.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn new(db: &'db HashDB<KeccakHasher, DBValue>, address: &Address) -> Self {
|
pub fn new(db: &'db dyn HashDB<KeccakHasher, DBValue>, address: &Address) -> Self {
|
||||||
Self::from_hash(db, keccak(address))
|
Self::from_hash(db, keccak(address))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new AcountDB from an address' hash.
|
/// Create a new AcountDB from an address' hash.
|
||||||
pub fn from_hash(db: &'db HashDB<KeccakHasher, DBValue>, address_hash: H256) -> Self {
|
pub fn from_hash(db: &'db dyn HashDB<KeccakHasher, DBValue>, address_hash: H256) -> Self {
|
||||||
AccountDB {
|
AccountDB {
|
||||||
db: db,
|
db: db,
|
||||||
address_hash: address_hash,
|
address_hash: address_hash,
|
||||||
@ -98,8 +98,8 @@ impl<'db> AccountDB<'db> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'db> AsHashDB<KeccakHasher, DBValue> for AccountDB<'db> {
|
impl<'db> AsHashDB<KeccakHasher, DBValue> for AccountDB<'db> {
|
||||||
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'db> HashDB<KeccakHasher, DBValue> for AccountDB<'db> {
|
impl<'db> HashDB<KeccakHasher, DBValue> for AccountDB<'db> {
|
||||||
@ -132,19 +132,19 @@ impl<'db> HashDB<KeccakHasher, DBValue> for AccountDB<'db> {
|
|||||||
|
|
||||||
/// DB backend wrapper for Account trie
|
/// DB backend wrapper for Account trie
|
||||||
pub struct AccountDBMut<'db> {
|
pub struct AccountDBMut<'db> {
|
||||||
db: &'db mut HashDB<KeccakHasher, DBValue>,
|
db: &'db mut dyn HashDB<KeccakHasher, DBValue>,
|
||||||
address_hash: H256,
|
address_hash: H256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'db> AccountDBMut<'db> {
|
impl<'db> AccountDBMut<'db> {
|
||||||
/// Create a new AccountDB from an address.
|
/// Create a new AccountDB from an address.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn new(db: &'db mut HashDB<KeccakHasher, DBValue>, address: &Address) -> Self {
|
pub fn new(db: &'db mut dyn HashDB<KeccakHasher, DBValue>, address: &Address) -> Self {
|
||||||
Self::from_hash(db, keccak(address))
|
Self::from_hash(db, keccak(address))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new AcountDB from an address' hash.
|
/// Create a new AcountDB from an address' hash.
|
||||||
pub fn from_hash(db: &'db mut HashDB<KeccakHasher, DBValue>, address_hash: H256) -> Self {
|
pub fn from_hash(db: &'db mut dyn HashDB<KeccakHasher, DBValue>, address_hash: H256) -> Self {
|
||||||
AccountDBMut {
|
AccountDBMut {
|
||||||
db: db,
|
db: db,
|
||||||
address_hash: address_hash,
|
address_hash: address_hash,
|
||||||
@ -200,15 +200,15 @@ impl<'db> HashDB<KeccakHasher, DBValue> for AccountDBMut<'db>{
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'db> AsHashDB<KeccakHasher, DBValue> for AccountDBMut<'db> {
|
impl<'db> AsHashDB<KeccakHasher, DBValue> for AccountDBMut<'db> {
|
||||||
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Wrapping<'db>(&'db HashDB<KeccakHasher, DBValue>);
|
struct Wrapping<'db>(&'db dyn HashDB<KeccakHasher, DBValue>);
|
||||||
|
|
||||||
impl<'db> AsHashDB<KeccakHasher, DBValue> for Wrapping<'db> {
|
impl<'db> AsHashDB<KeccakHasher, DBValue> for Wrapping<'db> {
|
||||||
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'db> HashDB<KeccakHasher, DBValue> for Wrapping<'db> {
|
impl<'db> HashDB<KeccakHasher, DBValue> for Wrapping<'db> {
|
||||||
@ -239,10 +239,10 @@ impl<'db> HashDB<KeccakHasher, DBValue> for Wrapping<'db> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct WrappingMut<'db>(&'db mut HashDB<KeccakHasher, DBValue>);
|
struct WrappingMut<'db>(&'db mut dyn HashDB<KeccakHasher, DBValue>);
|
||||||
impl<'db> AsHashDB<KeccakHasher, DBValue> for WrappingMut<'db> {
|
impl<'db> AsHashDB<KeccakHasher, DBValue> for WrappingMut<'db> {
|
||||||
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'db> HashDB<KeccakHasher, DBValue> for WrappingMut<'db>{
|
impl<'db> HashDB<KeccakHasher, DBValue> for WrappingMut<'db>{
|
||||||
|
@ -61,7 +61,7 @@ use types::receipt::{Receipt, TransactionOutcome};
|
|||||||
/// maintain the system `state()`. We also archive execution receipts in preparation for later block creation.
|
/// maintain the system `state()`. We also archive execution receipts in preparation for later block creation.
|
||||||
pub struct OpenBlock<'x> {
|
pub struct OpenBlock<'x> {
|
||||||
block: ExecutedBlock,
|
block: ExecutedBlock,
|
||||||
engine: &'x EthEngine,
|
engine: &'x dyn EthEngine,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
|
/// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
|
||||||
@ -163,7 +163,7 @@ pub trait Drain {
|
|||||||
impl<'x> OpenBlock<'x> {
|
impl<'x> OpenBlock<'x> {
|
||||||
/// Create a new `OpenBlock` ready for transaction pushing.
|
/// Create a new `OpenBlock` ready for transaction pushing.
|
||||||
pub fn new<'a, I: IntoIterator<Item = ExtendedHeader>>(
|
pub fn new<'a, I: IntoIterator<Item = ExtendedHeader>>(
|
||||||
engine: &'x EthEngine,
|
engine: &'x dyn EthEngine,
|
||||||
factories: Factories,
|
factories: Factories,
|
||||||
tracing: bool,
|
tracing: bool,
|
||||||
db: StateDB,
|
db: StateDB,
|
||||||
@ -374,7 +374,7 @@ impl ClosedBlock {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`.
|
/// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`.
|
||||||
pub fn reopen(self, engine: &EthEngine) -> OpenBlock {
|
pub fn reopen(self, engine: &dyn EthEngine) -> OpenBlock {
|
||||||
// revert rewards (i.e. set state back at last transaction's state).
|
// revert rewards (i.e. set state back at last transaction's state).
|
||||||
let mut block = self.block;
|
let mut block = self.block;
|
||||||
block.state = self.unclosed_state;
|
block.state = self.unclosed_state;
|
||||||
@ -404,7 +404,7 @@ impl LockedBlock {
|
|||||||
/// Provide a valid seal in order to turn this into a `SealedBlock`.
|
/// Provide a valid seal in order to turn this into a `SealedBlock`.
|
||||||
///
|
///
|
||||||
/// NOTE: This does not check the validity of `seal` with the engine.
|
/// NOTE: This does not check the validity of `seal` with the engine.
|
||||||
pub fn seal(self, engine: &EthEngine, seal: Vec<Bytes>) -> Result<SealedBlock, Error> {
|
pub fn seal(self, engine: &dyn EthEngine, seal: Vec<Bytes>) -> Result<SealedBlock, Error> {
|
||||||
let expected_seal_fields = engine.seal_fields(&self.header);
|
let expected_seal_fields = engine.seal_fields(&self.header);
|
||||||
let mut s = self;
|
let mut s = self;
|
||||||
if seal.len() != expected_seal_fields {
|
if seal.len() != expected_seal_fields {
|
||||||
@ -429,7 +429,7 @@ impl LockedBlock {
|
|||||||
/// TODO(https://github.com/paritytech/parity-ethereum/issues/10407): This is currently only used in POW chain call paths, we should really merge it with seal() above.
|
/// TODO(https://github.com/paritytech/parity-ethereum/issues/10407): This is currently only used in POW chain call paths, we should really merge it with seal() above.
|
||||||
pub fn try_seal(
|
pub fn try_seal(
|
||||||
self,
|
self,
|
||||||
engine: &EthEngine,
|
engine: &dyn EthEngine,
|
||||||
seal: Vec<Bytes>,
|
seal: Vec<Bytes>,
|
||||||
) -> Result<SealedBlock, Error> {
|
) -> Result<SealedBlock, Error> {
|
||||||
let mut s = self;
|
let mut s = self;
|
||||||
@ -472,14 +472,14 @@ pub(crate) fn enact(
|
|||||||
header: Header,
|
header: Header,
|
||||||
transactions: Vec<SignedTransaction>,
|
transactions: Vec<SignedTransaction>,
|
||||||
uncles: Vec<Header>,
|
uncles: Vec<Header>,
|
||||||
engine: &EthEngine,
|
engine: &dyn EthEngine,
|
||||||
tracing: bool,
|
tracing: bool,
|
||||||
db: StateDB,
|
db: StateDB,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: Arc<LastHashes>,
|
last_hashes: Arc<LastHashes>,
|
||||||
factories: Factories,
|
factories: Factories,
|
||||||
is_epoch_begin: bool,
|
is_epoch_begin: bool,
|
||||||
ancestry: &mut Iterator<Item=ExtendedHeader>,
|
ancestry: &mut dyn Iterator<Item=ExtendedHeader>,
|
||||||
) -> Result<LockedBlock, Error> {
|
) -> Result<LockedBlock, Error> {
|
||||||
// For trace log
|
// For trace log
|
||||||
let trace_state = if log_enabled!(target: "enact", ::log::Level::Trace) {
|
let trace_state = if log_enabled!(target: "enact", ::log::Level::Trace) {
|
||||||
@ -525,14 +525,14 @@ pub(crate) fn enact(
|
|||||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
|
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
|
||||||
pub fn enact_verified(
|
pub fn enact_verified(
|
||||||
block: PreverifiedBlock,
|
block: PreverifiedBlock,
|
||||||
engine: &EthEngine,
|
engine: &dyn EthEngine,
|
||||||
tracing: bool,
|
tracing: bool,
|
||||||
db: StateDB,
|
db: StateDB,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: Arc<LastHashes>,
|
last_hashes: Arc<LastHashes>,
|
||||||
factories: Factories,
|
factories: Factories,
|
||||||
is_epoch_begin: bool,
|
is_epoch_begin: bool,
|
||||||
ancestry: &mut Iterator<Item=ExtendedHeader>,
|
ancestry: &mut dyn Iterator<Item=ExtendedHeader>,
|
||||||
) -> Result<LockedBlock, Error> {
|
) -> Result<LockedBlock, Error> {
|
||||||
|
|
||||||
enact(
|
enact(
|
||||||
@ -570,7 +570,7 @@ mod tests {
|
|||||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
|
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
|
||||||
fn enact_bytes(
|
fn enact_bytes(
|
||||||
block_bytes: Vec<u8>,
|
block_bytes: Vec<u8>,
|
||||||
engine: &EthEngine,
|
engine: &dyn EthEngine,
|
||||||
tracing: bool,
|
tracing: bool,
|
||||||
db: StateDB,
|
db: StateDB,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
@ -623,7 +623,7 @@ mod tests {
|
|||||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
|
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
|
||||||
fn enact_and_seal(
|
fn enact_and_seal(
|
||||||
block_bytes: Vec<u8>,
|
block_bytes: Vec<u8>,
|
||||||
engine: &EthEngine,
|
engine: &dyn EthEngine,
|
||||||
tracing: bool,
|
tracing: bool,
|
||||||
db: StateDB,
|
db: StateDB,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
|
@ -157,8 +157,8 @@ impl ModexpPricer {
|
|||||||
///
|
///
|
||||||
/// Unless `is_active` is true,
|
/// Unless `is_active` is true,
|
||||||
pub struct Builtin {
|
pub struct Builtin {
|
||||||
pricer: Box<Pricer>,
|
pricer: Box<dyn Pricer>,
|
||||||
native: Box<Impl>,
|
native: Box<dyn Impl>,
|
||||||
activate_at: u64,
|
activate_at: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -177,7 +177,7 @@ impl Builtin {
|
|||||||
|
|
||||||
impl From<ethjson::spec::Builtin> for Builtin {
|
impl From<ethjson::spec::Builtin> for Builtin {
|
||||||
fn from(b: ethjson::spec::Builtin) -> Self {
|
fn from(b: ethjson::spec::Builtin) -> Self {
|
||||||
let pricer: Box<Pricer> = match b.pricing {
|
let pricer: Box<dyn Pricer> = match b.pricing {
|
||||||
ethjson::spec::Pricing::Linear(linear) => {
|
ethjson::spec::Pricing::Linear(linear) => {
|
||||||
Box::new(Linear {
|
Box::new(Linear {
|
||||||
base: linear.base,
|
base: linear.base,
|
||||||
@ -211,16 +211,16 @@ impl From<ethjson::spec::Builtin> for Builtin {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Ethereum built-in factory.
|
/// Ethereum built-in factory.
|
||||||
pub fn ethereum_builtin(name: &str) -> Box<Impl> {
|
pub fn ethereum_builtin(name: &str) -> Box<dyn Impl> {
|
||||||
match name {
|
match name {
|
||||||
"identity" => Box::new(Identity) as Box<Impl>,
|
"identity" => Box::new(Identity) as Box<dyn Impl>,
|
||||||
"ecrecover" => Box::new(EcRecover) as Box<Impl>,
|
"ecrecover" => Box::new(EcRecover) as Box<dyn Impl>,
|
||||||
"sha256" => Box::new(Sha256) as Box<Impl>,
|
"sha256" => Box::new(Sha256) as Box<dyn Impl>,
|
||||||
"ripemd160" => Box::new(Ripemd160) as Box<Impl>,
|
"ripemd160" => Box::new(Ripemd160) as Box<dyn Impl>,
|
||||||
"modexp" => Box::new(ModexpImpl) as Box<Impl>,
|
"modexp" => Box::new(ModexpImpl) as Box<dyn Impl>,
|
||||||
"alt_bn128_add" => Box::new(Bn128AddImpl) as Box<Impl>,
|
"alt_bn128_add" => Box::new(Bn128AddImpl) as Box<dyn Impl>,
|
||||||
"alt_bn128_mul" => Box::new(Bn128MulImpl) as Box<Impl>,
|
"alt_bn128_mul" => Box::new(Bn128MulImpl) as Box<dyn Impl>,
|
||||||
"alt_bn128_pairing" => Box::new(Bn128PairingImpl) as Box<Impl>,
|
"alt_bn128_pairing" => Box::new(Bn128PairingImpl) as Box<dyn Impl>,
|
||||||
_ => panic!("invalid builtin name: {}", name),
|
_ => panic!("invalid builtin name: {}", name),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1008,7 +1008,7 @@ mod tests {
|
|||||||
fn is_active() {
|
fn is_active() {
|
||||||
let pricer = Box::new(Linear { base: 10, word: 20} );
|
let pricer = Box::new(Linear { base: 10, word: 20} );
|
||||||
let b = Builtin {
|
let b = Builtin {
|
||||||
pricer: pricer as Box<Pricer>,
|
pricer: pricer as Box<dyn Pricer>,
|
||||||
native: ethereum_builtin("identity"),
|
native: ethereum_builtin("identity"),
|
||||||
activate_at: 100_000,
|
activate_at: 100_000,
|
||||||
};
|
};
|
||||||
@ -1022,7 +1022,7 @@ mod tests {
|
|||||||
fn from_named_linear() {
|
fn from_named_linear() {
|
||||||
let pricer = Box::new(Linear { base: 10, word: 20 });
|
let pricer = Box::new(Linear { base: 10, word: 20 });
|
||||||
let b = Builtin {
|
let b = Builtin {
|
||||||
pricer: pricer as Box<Pricer>,
|
pricer: pricer as Box<dyn Pricer>,
|
||||||
native: ethereum_builtin("identity"),
|
native: ethereum_builtin("identity"),
|
||||||
activate_at: 1,
|
activate_at: 1,
|
||||||
};
|
};
|
||||||
|
@ -32,13 +32,13 @@ const HEAVY_VERIFY_RATE: f32 = 0.02;
|
|||||||
/// Ancient block verifier: import an ancient sequence of blocks in order from a starting
|
/// Ancient block verifier: import an ancient sequence of blocks in order from a starting
|
||||||
/// epoch.
|
/// epoch.
|
||||||
pub struct AncientVerifier {
|
pub struct AncientVerifier {
|
||||||
cur_verifier: RwLock<Option<Box<EpochVerifier<EthereumMachine>>>>,
|
cur_verifier: RwLock<Option<Box<dyn EpochVerifier<EthereumMachine>>>>,
|
||||||
engine: Arc<EthEngine>,
|
engine: Arc<dyn EthEngine>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AncientVerifier {
|
impl AncientVerifier {
|
||||||
/// Create a new ancient block verifier with the given engine.
|
/// Create a new ancient block verifier with the given engine.
|
||||||
pub fn new(engine: Arc<EthEngine>) -> Self {
|
pub fn new(engine: Arc<dyn EthEngine>) -> Self {
|
||||||
AncientVerifier {
|
AncientVerifier {
|
||||||
cur_verifier: RwLock::new(None),
|
cur_verifier: RwLock::new(None),
|
||||||
engine,
|
engine,
|
||||||
@ -87,7 +87,7 @@ impl AncientVerifier {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn initial_verifier(&self, header: &Header, chain: &BlockChain)
|
fn initial_verifier(&self, header: &Header, chain: &BlockChain)
|
||||||
-> Result<Box<EpochVerifier<EthereumMachine>>, ::error::Error>
|
-> Result<Box<dyn EpochVerifier<EthereumMachine>>, ::error::Error>
|
||||||
{
|
{
|
||||||
trace!(target: "client", "Initializing ancient block restoration.");
|
trace!(target: "client", "Initializing ancient block restoration.");
|
||||||
let current_epoch_data = chain.epoch_transitions()
|
let current_epoch_data = chain.epoch_transitions()
|
||||||
|
@ -153,7 +153,7 @@ struct Importer {
|
|||||||
pub import_lock: Mutex<()>, // FIXME Maybe wrap the whole `Importer` instead?
|
pub import_lock: Mutex<()>, // FIXME Maybe wrap the whole `Importer` instead?
|
||||||
|
|
||||||
/// Used to verify blocks
|
/// Used to verify blocks
|
||||||
pub verifier: Box<Verifier<Client>>,
|
pub verifier: Box<dyn Verifier<Client>>,
|
||||||
|
|
||||||
/// Queue containing pending blocks
|
/// Queue containing pending blocks
|
||||||
pub block_queue: BlockQueue,
|
pub block_queue: BlockQueue,
|
||||||
@ -165,7 +165,7 @@ struct Importer {
|
|||||||
pub ancient_verifier: AncientVerifier,
|
pub ancient_verifier: AncientVerifier,
|
||||||
|
|
||||||
/// Ethereum engine to be used during import
|
/// Ethereum engine to be used during import
|
||||||
pub engine: Arc<EthEngine>,
|
pub engine: Arc<dyn EthEngine>,
|
||||||
|
|
||||||
/// A lru cache of recently detected bad blocks
|
/// A lru cache of recently detected bad blocks
|
||||||
pub bad_blocks: bad_blocks::BadBlocks,
|
pub bad_blocks: bad_blocks::BadBlocks,
|
||||||
@ -187,7 +187,7 @@ pub struct Client {
|
|||||||
|
|
||||||
chain: RwLock<Arc<BlockChain>>,
|
chain: RwLock<Arc<BlockChain>>,
|
||||||
tracedb: RwLock<TraceDB<BlockChain>>,
|
tracedb: RwLock<TraceDB<BlockChain>>,
|
||||||
engine: Arc<EthEngine>,
|
engine: Arc<dyn EthEngine>,
|
||||||
|
|
||||||
/// Client configuration
|
/// Client configuration
|
||||||
config: ClientConfig,
|
config: ClientConfig,
|
||||||
@ -196,7 +196,7 @@ pub struct Client {
|
|||||||
pruning: journaldb::Algorithm,
|
pruning: journaldb::Algorithm,
|
||||||
|
|
||||||
/// Client uses this to store blocks, traces, etc.
|
/// Client uses this to store blocks, traces, etc.
|
||||||
db: RwLock<Arc<BlockChainDB>>,
|
db: RwLock<Arc<dyn BlockChainDB>>,
|
||||||
|
|
||||||
state_db: RwLock<StateDB>,
|
state_db: RwLock<StateDB>,
|
||||||
|
|
||||||
@ -210,7 +210,7 @@ pub struct Client {
|
|||||||
io_channel: RwLock<IoChannel<ClientIoMessage>>,
|
io_channel: RwLock<IoChannel<ClientIoMessage>>,
|
||||||
|
|
||||||
/// List of actors to be notified on certain chain events
|
/// List of actors to be notified on certain chain events
|
||||||
notify: RwLock<Vec<Weak<ChainNotify>>>,
|
notify: RwLock<Vec<Weak<dyn ChainNotify>>>,
|
||||||
|
|
||||||
/// Queued transactions from IO
|
/// Queued transactions from IO
|
||||||
queue_transactions: IoChannelQueue,
|
queue_transactions: IoChannelQueue,
|
||||||
@ -232,12 +232,12 @@ pub struct Client {
|
|||||||
history: u64,
|
history: u64,
|
||||||
|
|
||||||
/// An action to be done if a mode/spec_name change happens
|
/// An action to be done if a mode/spec_name change happens
|
||||||
on_user_defaults_change: Mutex<Option<Box<FnMut(Option<Mode>) + 'static + Send>>>,
|
on_user_defaults_change: Mutex<Option<Box<dyn FnMut(Option<Mode>) + 'static + Send>>>,
|
||||||
|
|
||||||
registrar_address: Option<Address>,
|
registrar_address: Option<Address>,
|
||||||
|
|
||||||
/// A closure to call when we want to restart the client
|
/// A closure to call when we want to restart the client
|
||||||
exit_handler: Mutex<Option<Box<Fn(String) + 'static + Send>>>,
|
exit_handler: Mutex<Option<Box<dyn Fn(String) + 'static + Send>>>,
|
||||||
|
|
||||||
importer: Importer,
|
importer: Importer,
|
||||||
}
|
}
|
||||||
@ -245,7 +245,7 @@ pub struct Client {
|
|||||||
impl Importer {
|
impl Importer {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
config: &ClientConfig,
|
config: &ClientConfig,
|
||||||
engine: Arc<EthEngine>,
|
engine: Arc<dyn EthEngine>,
|
||||||
message_channel: IoChannel<ClientIoMessage>,
|
message_channel: IoChannel<ClientIoMessage>,
|
||||||
miner: Arc<Miner>,
|
miner: Arc<Miner>,
|
||||||
) -> Result<Importer, ::error::Error> {
|
) -> Result<Importer, ::error::Error> {
|
||||||
@ -449,7 +449,7 @@ impl Importer {
|
|||||||
///
|
///
|
||||||
/// The block is guaranteed to be the next best blocks in the
|
/// The block is guaranteed to be the next best blocks in the
|
||||||
/// first block sequence. Does no sealing or transaction validation.
|
/// first block sequence. Does no sealing or transaction validation.
|
||||||
fn import_old_block(&self, unverified: Unverified, receipts_bytes: &[u8], db: &KeyValueDB, chain: &BlockChain) -> EthcoreResult<()> {
|
fn import_old_block(&self, unverified: Unverified, receipts_bytes: &[u8], db: &dyn KeyValueDB, chain: &BlockChain) -> EthcoreResult<()> {
|
||||||
let receipts = ::rlp::decode_list(receipts_bytes);
|
let receipts = ::rlp::decode_list(receipts_bytes);
|
||||||
let _import_lock = self.import_lock.lock();
|
let _import_lock = self.import_lock.lock();
|
||||||
|
|
||||||
@ -702,7 +702,7 @@ impl Client {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
config: ClientConfig,
|
config: ClientConfig,
|
||||||
spec: &Spec,
|
spec: &Spec,
|
||||||
db: Arc<BlockChainDB>,
|
db: Arc<dyn BlockChainDB>,
|
||||||
miner: Arc<Miner>,
|
miner: Arc<Miner>,
|
||||||
message_channel: IoChannel<ClientIoMessage>,
|
message_channel: IoChannel<ClientIoMessage>,
|
||||||
) -> Result<Arc<Client>, ::error::Error> {
|
) -> Result<Arc<Client>, ::error::Error> {
|
||||||
@ -844,7 +844,7 @@ impl Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Adds an actor to be notified on certain events
|
/// Adds an actor to be notified on certain events
|
||||||
pub fn add_notify(&self, target: Arc<ChainNotify>) {
|
pub fn add_notify(&self, target: Arc<dyn ChainNotify>) {
|
||||||
self.notify.write().push(Arc::downgrade(&target));
|
self.notify.write().push(Arc::downgrade(&target));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -857,11 +857,11 @@ impl Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns engine reference.
|
/// Returns engine reference.
|
||||||
pub fn engine(&self) -> &EthEngine {
|
pub fn engine(&self) -> &dyn EthEngine {
|
||||||
&*self.engine
|
&*self.engine
|
||||||
}
|
}
|
||||||
|
|
||||||
fn notify<F>(&self, f: F) where F: Fn(&ChainNotify) {
|
fn notify<F>(&self, f: F) where F: Fn(&dyn ChainNotify) {
|
||||||
for np in &*self.notify.read() {
|
for np in &*self.notify.read() {
|
||||||
if let Some(n) = np.upgrade() {
|
if let Some(n) = np.upgrade() {
|
||||||
f(&*n);
|
f(&*n);
|
||||||
@ -1071,7 +1071,7 @@ impl Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get a copy of the best block's state.
|
/// Get a copy of the best block's state.
|
||||||
pub fn state(&self) -> Box<StateInfo> {
|
pub fn state(&self) -> Box<dyn StateInfo> {
|
||||||
Box::new(self.latest_state()) as Box<_>
|
Box::new(self.latest_state()) as Box<_>
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1648,7 +1648,7 @@ impl Call for Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl EngineInfo for Client {
|
impl EngineInfo for Client {
|
||||||
fn engine(&self) -> &EthEngine {
|
fn engine(&self) -> &dyn EthEngine {
|
||||||
Client::engine(self)
|
Client::engine(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1668,7 +1668,7 @@ impl BlockChainClient for Client {
|
|||||||
Ok(self.replay_block_transactions(block, analytics)?.nth(address.index).expect(PROOF).1)
|
Ok(self.replay_block_transactions(block, analytics)?.nth(address.index).expect(PROOF).1)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn replay_block_transactions(&self, block: BlockId, analytics: CallAnalytics) -> Result<Box<Iterator<Item = (H256, Executed)>>, CallError> {
|
fn replay_block_transactions(&self, block: BlockId, analytics: CallAnalytics) -> Result<Box<dyn Iterator<Item = (H256, Executed)>>, CallError> {
|
||||||
let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?;
|
let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?;
|
||||||
let body = self.block_body(block).ok_or(CallError::StatePruned)?;
|
let body = self.block_body(block).ok_or(CallError::StatePruned)?;
|
||||||
let mut state = self.state_at_beginning(block).ok_or(CallError::StatePruned)?;
|
let mut state = self.state_at_beginning(block).ok_or(CallError::StatePruned)?;
|
||||||
@ -2495,7 +2495,7 @@ impl super::traits::EngineClient for Client {
|
|||||||
self.chain.read().epoch_transition_for(parent_hash)
|
self.chain.read().epoch_transition_for(parent_hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn as_full_client(&self) -> Option<&BlockChainClient> { Some(self) }
|
fn as_full_client(&self) -> Option<&dyn BlockChainClient> { Some(self) }
|
||||||
|
|
||||||
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
||||||
BlockChainClient::block_number(self, id)
|
BlockChainClient::block_number(self, id)
|
||||||
|
@ -47,7 +47,7 @@ impl ClientIoMessage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A function to invoke in the client thread.
|
/// A function to invoke in the client thread.
|
||||||
pub struct Callback(pub Box<Fn(&Client) + Send + Sync>);
|
pub struct Callback(pub Box<dyn Fn(&Client) + Send + Sync>);
|
||||||
|
|
||||||
impl fmt::Debug for Callback {
|
impl fmt::Debug for Callback {
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
@ -627,7 +627,7 @@ impl StateClient for TestBlockChainClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl EngineInfo for TestBlockChainClient {
|
impl EngineInfo for TestBlockChainClient {
|
||||||
fn engine(&self) -> &EthEngine {
|
fn engine(&self) -> &dyn EthEngine {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -661,7 +661,7 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn replay_block_transactions(&self, _block: BlockId, _analytics: CallAnalytics) -> Result<Box<Iterator<Item = (H256, Executed)>>, CallError> {
|
fn replay_block_transactions(&self, _block: BlockId, _analytics: CallAnalytics) -> Result<Box<dyn Iterator<Item = (H256, Executed)>>, CallError> {
|
||||||
Ok(Box::new(
|
Ok(Box::new(
|
||||||
self.traces
|
self.traces
|
||||||
.read()
|
.read()
|
||||||
@ -955,7 +955,7 @@ impl super::traits::EngineClient for TestBlockChainClient {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn as_full_client(&self) -> Option<&BlockChainClient> { Some(self) }
|
fn as_full_client(&self) -> Option<&dyn BlockChainClient> { Some(self) }
|
||||||
|
|
||||||
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
||||||
BlockChainClient::block_number(self, id)
|
BlockChainClient::block_number(self, id)
|
||||||
|
@ -23,11 +23,11 @@ use types::BlockNumber;
|
|||||||
|
|
||||||
impl TraceDatabaseExtras for BlockChain {
|
impl TraceDatabaseExtras for BlockChain {
|
||||||
fn block_hash(&self, block_number: BlockNumber) -> Option<H256> {
|
fn block_hash(&self, block_number: BlockNumber) -> Option<H256> {
|
||||||
(self as &BlockProvider).block_hash(block_number)
|
(self as &dyn BlockProvider).block_hash(block_number)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_hash(&self, block_number: BlockNumber, tx_position: usize) -> Option<H256> {
|
fn transaction_hash(&self, block_number: BlockNumber, tx_position: usize) -> Option<H256> {
|
||||||
(self as &BlockProvider).block_hash(block_number)
|
(self as &dyn BlockProvider).block_hash(block_number)
|
||||||
.and_then(|block_hash| {
|
.and_then(|block_hash| {
|
||||||
let tx_address = TransactionAddress {
|
let tx_address = TransactionAddress {
|
||||||
block_hash: block_hash,
|
block_hash: block_hash,
|
||||||
|
@ -55,7 +55,7 @@ use verification::queue::kind::blocks::Unverified;
|
|||||||
/// State information to be used during client query
|
/// State information to be used during client query
|
||||||
pub enum StateOrBlock {
|
pub enum StateOrBlock {
|
||||||
/// State to be used, may be pending
|
/// State to be used, may be pending
|
||||||
State(Box<StateInfo>),
|
State(Box<dyn StateInfo>),
|
||||||
|
|
||||||
/// Id of an existing block from a chain to get state from
|
/// Id of an existing block from a chain to get state from
|
||||||
Block(BlockId)
|
Block(BlockId)
|
||||||
@ -67,8 +67,8 @@ impl<S: StateInfo + 'static> From<S> for StateOrBlock {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Box<StateInfo>> for StateOrBlock {
|
impl From<Box<dyn StateInfo>> for StateOrBlock {
|
||||||
fn from(info: Box<StateInfo>) -> StateOrBlock {
|
fn from(info: Box<dyn StateInfo>) -> StateOrBlock {
|
||||||
StateOrBlock::State(info)
|
StateOrBlock::State(info)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -184,7 +184,7 @@ pub trait Call {
|
|||||||
/// Provides `engine` method
|
/// Provides `engine` method
|
||||||
pub trait EngineInfo {
|
pub trait EngineInfo {
|
||||||
/// Get underlying engine object
|
/// Get underlying engine object
|
||||||
fn engine(&self) -> &EthEngine;
|
fn engine(&self) -> &dyn EthEngine;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// IO operations that should off-load heavy work to another thread.
|
/// IO operations that should off-load heavy work to another thread.
|
||||||
@ -306,7 +306,7 @@ pub trait BlockChainClient : Sync + Send + AccountData + BlockChain + CallContra
|
|||||||
fn replay(&self, t: TransactionId, analytics: CallAnalytics) -> Result<Executed, CallError>;
|
fn replay(&self, t: TransactionId, analytics: CallAnalytics) -> Result<Executed, CallError>;
|
||||||
|
|
||||||
/// Replays all the transactions in a given block for inspection.
|
/// Replays all the transactions in a given block for inspection.
|
||||||
fn replay_block_transactions(&self, block: BlockId, analytics: CallAnalytics) -> Result<Box<Iterator<Item = (H256, Executed)>>, CallError>;
|
fn replay_block_transactions(&self, block: BlockId, analytics: CallAnalytics) -> Result<Box<dyn Iterator<Item = (H256, Executed)>>, CallError>;
|
||||||
|
|
||||||
/// Returns traces matching given filter.
|
/// Returns traces matching given filter.
|
||||||
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>>;
|
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>>;
|
||||||
@ -441,7 +441,7 @@ pub trait EngineClient: Sync + Send + ChainInfo {
|
|||||||
fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition>;
|
fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition>;
|
||||||
|
|
||||||
/// Attempt to cast the engine client to a full client.
|
/// Attempt to cast the engine client to a full client.
|
||||||
fn as_full_client(&self) -> Option<&BlockChainClient>;
|
fn as_full_client(&self) -> Option<&dyn BlockChainClient>;
|
||||||
|
|
||||||
/// Get a block number by ID.
|
/// Get a block number by ID.
|
||||||
fn block_number(&self, id: BlockId) -> Option<BlockNumber>;
|
fn block_number(&self, id: BlockId) -> Option<BlockNumber>;
|
||||||
|
@ -63,7 +63,7 @@ pub struct AuthorityRoundParams {
|
|||||||
/// Starting step,
|
/// Starting step,
|
||||||
pub start_step: Option<u64>,
|
pub start_step: Option<u64>,
|
||||||
/// Valid validators.
|
/// Valid validators.
|
||||||
pub validators: Box<ValidatorSet>,
|
pub validators: Box<dyn ValidatorSet>,
|
||||||
/// Chain score validation transition block.
|
/// Chain score validation transition block.
|
||||||
pub validate_score_transition: u64,
|
pub validate_score_transition: u64,
|
||||||
/// Monotonic step validation transition block.
|
/// Monotonic step validation transition block.
|
||||||
@ -222,9 +222,9 @@ impl EpochManager {
|
|||||||
// Zooms to the epoch after the header with the given hash. Returns true if succeeded, false otherwise.
|
// Zooms to the epoch after the header with the given hash. Returns true if succeeded, false otherwise.
|
||||||
fn zoom_to_after(
|
fn zoom_to_after(
|
||||||
&mut self,
|
&mut self,
|
||||||
client: &EngineClient,
|
client: &dyn EngineClient,
|
||||||
machine: &EthereumMachine,
|
machine: &EthereumMachine,
|
||||||
validators: &ValidatorSet,
|
validators: &dyn ValidatorSet,
|
||||||
hash: H256
|
hash: H256
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let last_was_parent = self.finality_checker.subchain_head() == Some(hash);
|
let last_was_parent = self.finality_checker.subchain_head() == Some(hash);
|
||||||
@ -324,7 +324,7 @@ impl EmptyStep {
|
|||||||
EmptyStep { signature, step, parent_hash }
|
EmptyStep { signature, step, parent_hash }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify(&self, validators: &ValidatorSet) -> Result<bool, Error> {
|
fn verify(&self, validators: &dyn ValidatorSet) -> Result<bool, Error> {
|
||||||
let message = keccak(empty_step_rlp(self.step, &self.parent_hash));
|
let message = keccak(empty_step_rlp(self.step, &self.parent_hash));
|
||||||
let correct_proposer = step_proposer(validators, &self.parent_hash, self.step);
|
let correct_proposer = step_proposer(validators, &self.parent_hash, self.step);
|
||||||
|
|
||||||
@ -419,9 +419,9 @@ struct PermissionedStep {
|
|||||||
pub struct AuthorityRound {
|
pub struct AuthorityRound {
|
||||||
transition_service: IoService<()>,
|
transition_service: IoService<()>,
|
||||||
step: Arc<PermissionedStep>,
|
step: Arc<PermissionedStep>,
|
||||||
client: Arc<RwLock<Option<Weak<EngineClient>>>>,
|
client: Arc<RwLock<Option<Weak<dyn EngineClient>>>>,
|
||||||
signer: RwLock<Option<Box<EngineSigner>>>,
|
signer: RwLock<Option<Box<dyn EngineSigner>>>,
|
||||||
validators: Box<ValidatorSet>,
|
validators: Box<dyn ValidatorSet>,
|
||||||
validate_score_transition: u64,
|
validate_score_transition: u64,
|
||||||
validate_step_transition: u64,
|
validate_step_transition: u64,
|
||||||
empty_steps: Mutex<BTreeSet<EmptyStep>>,
|
empty_steps: Mutex<BTreeSet<EmptyStep>>,
|
||||||
@ -563,13 +563,13 @@ fn header_empty_steps_signers(header: &Header, empty_steps_transition: u64) -> R
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn step_proposer(validators: &ValidatorSet, bh: &H256, step: u64) -> Address {
|
fn step_proposer(validators: &dyn ValidatorSet, bh: &H256, step: u64) -> Address {
|
||||||
let proposer = validators.get(bh, step as usize);
|
let proposer = validators.get(bh, step as usize);
|
||||||
trace!(target: "engine", "Fetched proposer for step {}: {}", step, proposer);
|
trace!(target: "engine", "Fetched proposer for step {}: {}", step, proposer);
|
||||||
proposer
|
proposer
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_step_proposer(validators: &ValidatorSet, bh: &H256, step: u64, address: &Address) -> bool {
|
fn is_step_proposer(validators: &dyn ValidatorSet, bh: &H256, step: u64, address: &Address) -> bool {
|
||||||
step_proposer(validators, bh, step) == *address
|
step_proposer(validators, bh, step) == *address
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -597,7 +597,7 @@ fn verify_timestamp(step: &Step, header_step: u64) -> Result<(), BlockError> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_external(header: &Header, validators: &ValidatorSet, empty_steps_transition: u64) -> Result<(), Error> {
|
fn verify_external(header: &Header, validators: &dyn ValidatorSet, empty_steps_transition: u64) -> Result<(), Error> {
|
||||||
let header_step = header_step(header, empty_steps_transition)?;
|
let header_step = header_step(header, empty_steps_transition)?;
|
||||||
|
|
||||||
let proposer_signature = header_signature(header, empty_steps_transition)?;
|
let proposer_signature = header_signature(header, empty_steps_transition)?;
|
||||||
@ -716,7 +716,7 @@ impl AuthorityRound {
|
|||||||
|
|
||||||
// fetch correct validator set for epoch at header, taking into account
|
// fetch correct validator set for epoch at header, taking into account
|
||||||
// finality of previous transitions.
|
// finality of previous transitions.
|
||||||
fn epoch_set<'a>(&'a self, header: &Header) -> Result<(CowLike<ValidatorSet, SimpleList>, BlockNumber), Error> {
|
fn epoch_set<'a>(&'a self, header: &Header) -> Result<(CowLike<dyn ValidatorSet, SimpleList>, BlockNumber), Error> {
|
||||||
Ok(if self.immediate_transitions {
|
Ok(if self.immediate_transitions {
|
||||||
(CowLike::Borrowed(&*self.validators), header.number())
|
(CowLike::Borrowed(&*self.validators), header.number())
|
||||||
} else {
|
} else {
|
||||||
@ -802,7 +802,7 @@ impl AuthorityRound {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn report_skipped(&self, header: &Header, current_step: u64, parent_step: u64, validators: &ValidatorSet, set_number: u64) {
|
fn report_skipped(&self, header: &Header, current_step: u64, parent_step: u64, validators: &dyn ValidatorSet, set_number: u64) {
|
||||||
// we're building on top of the genesis block so don't report any skipped steps
|
// we're building on top of the genesis block so don't report any skipped steps
|
||||||
if header.number() == 1 {
|
if header.number() == 1 {
|
||||||
return;
|
return;
|
||||||
@ -825,7 +825,7 @@ impl AuthorityRound {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns the hashes of all ancestor blocks that are finalized by the given `chain_head`.
|
// Returns the hashes of all ancestor blocks that are finalized by the given `chain_head`.
|
||||||
fn build_finality(&self, chain_head: &Header, ancestry: &mut Iterator<Item=Header>) -> Vec<H256> {
|
fn build_finality(&self, chain_head: &Header, ancestry: &mut dyn Iterator<Item=Header>) -> Vec<H256> {
|
||||||
if self.immediate_transitions { return Vec::new() }
|
if self.immediate_transitions { return Vec::new() }
|
||||||
|
|
||||||
let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) {
|
let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) {
|
||||||
@ -894,7 +894,7 @@ fn unix_now() -> Duration {
|
|||||||
|
|
||||||
struct TransitionHandler {
|
struct TransitionHandler {
|
||||||
step: Arc<PermissionedStep>,
|
step: Arc<PermissionedStep>,
|
||||||
client: Arc<RwLock<Option<Weak<EngineClient>>>>,
|
client: Arc<RwLock<Option<Weak<dyn EngineClient>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
const ENGINE_TIMEOUT_TOKEN: TimerToken = 23;
|
const ENGINE_TIMEOUT_TOKEN: TimerToken = 23;
|
||||||
@ -1198,7 +1198,7 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
|||||||
&self,
|
&self,
|
||||||
block: &mut ExecutedBlock,
|
block: &mut ExecutedBlock,
|
||||||
epoch_begin: bool,
|
epoch_begin: bool,
|
||||||
_ancestry: &mut Iterator<Item=ExtendedHeader>,
|
_ancestry: &mut dyn Iterator<Item=ExtendedHeader>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// with immediate transitions, we don't use the epoch mechanism anyway.
|
// with immediate transitions, we don't use the epoch mechanism anyway.
|
||||||
// the genesis is always considered an epoch, but we ignore it intentionally.
|
// the genesis is always considered an epoch, but we ignore it intentionally.
|
||||||
@ -1555,12 +1555,12 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_client(&self, client: Weak<EngineClient>) {
|
fn register_client(&self, client: Weak<dyn EngineClient>) {
|
||||||
*self.client.write() = Some(client.clone());
|
*self.client.write() = Some(client.clone());
|
||||||
self.validators.register_client(client);
|
self.validators.register_client(client);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_signer(&self, signer: Box<EngineSigner>) {
|
fn set_signer(&self, signer: Box<dyn EngineSigner>) {
|
||||||
*self.signer.write() = Some(signer);
|
*self.signer.write() = Some(signer);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1572,7 +1572,7 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
|
fn snapshot_components(&self) -> Option<Box<dyn (::snapshot::SnapshotComponents)>> {
|
||||||
if self.immediate_transitions {
|
if self.immediate_transitions {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
@ -1584,7 +1584,7 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
|||||||
super::total_difficulty_fork_choice(new, current)
|
super::total_difficulty_fork_choice(new, current)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ancestry_actions(&self, header: &Header, ancestry: &mut Iterator<Item=ExtendedHeader>) -> Vec<AncestryAction> {
|
fn ancestry_actions(&self, header: &Header, ancestry: &mut dyn Iterator<Item=ExtendedHeader>) -> Vec<AncestryAction> {
|
||||||
let finalized = self.build_finality(
|
let finalized = self.build_finality(
|
||||||
header,
|
header,
|
||||||
&mut ancestry.take_while(|e| !e.is_finalized).map(|e| e.header),
|
&mut ancestry.take_while(|e| !e.is_finalized).map(|e| e.header),
|
||||||
@ -1908,14 +1908,14 @@ mod tests {
|
|||||||
(spec, tap, accounts)
|
(spec, tap, accounts)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn empty_step(engine: &EthEngine, step: u64, parent_hash: &H256) -> EmptyStep {
|
fn empty_step(engine: &dyn EthEngine, step: u64, parent_hash: &H256) -> EmptyStep {
|
||||||
let empty_step_rlp = super::empty_step_rlp(step, parent_hash);
|
let empty_step_rlp = super::empty_step_rlp(step, parent_hash);
|
||||||
let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into();
|
let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into();
|
||||||
let parent_hash = parent_hash.clone();
|
let parent_hash = parent_hash.clone();
|
||||||
EmptyStep { step, signature, parent_hash }
|
EmptyStep { step, signature, parent_hash }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sealed_empty_step(engine: &EthEngine, step: u64, parent_hash: &H256) -> SealedEmptyStep {
|
fn sealed_empty_step(engine: &dyn EthEngine, step: u64, parent_hash: &H256) -> SealedEmptyStep {
|
||||||
let empty_step_rlp = super::empty_step_rlp(step, parent_hash);
|
let empty_step_rlp = super::empty_step_rlp(step, parent_hash);
|
||||||
let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into();
|
let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into();
|
||||||
SealedEmptyStep { signature, step }
|
SealedEmptyStep { signature, step }
|
||||||
|
@ -55,7 +55,7 @@ impl super::EpochVerifier<EthereumMachine> for EpochVerifier {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_external(header: &Header, validators: &ValidatorSet) -> Result<(), Error> {
|
fn verify_external(header: &Header, validators: &dyn ValidatorSet) -> Result<(), Error> {
|
||||||
use rlp::Rlp;
|
use rlp::Rlp;
|
||||||
|
|
||||||
// Check if the signature belongs to a validator, can depend on parent state.
|
// Check if the signature belongs to a validator, can depend on parent state.
|
||||||
@ -75,8 +75,8 @@ fn verify_external(header: &Header, validators: &ValidatorSet) -> Result<(), Err
|
|||||||
/// Engine using `BasicAuthority`, trivial proof-of-authority consensus.
|
/// Engine using `BasicAuthority`, trivial proof-of-authority consensus.
|
||||||
pub struct BasicAuthority {
|
pub struct BasicAuthority {
|
||||||
machine: EthereumMachine,
|
machine: EthereumMachine,
|
||||||
signer: RwLock<Option<Box<EngineSigner>>>,
|
signer: RwLock<Option<Box<dyn EngineSigner>>>,
|
||||||
validators: Box<ValidatorSet>,
|
validators: Box<dyn ValidatorSet>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BasicAuthority {
|
impl BasicAuthority {
|
||||||
@ -189,11 +189,11 @@ impl Engine<EthereumMachine> for BasicAuthority {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_client(&self, client: Weak<EngineClient>) {
|
fn register_client(&self, client: Weak<dyn EngineClient>) {
|
||||||
self.validators.register_client(client);
|
self.validators.register_client(client);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_signer(&self, signer: Box<EngineSigner>) {
|
fn set_signer(&self, signer: Box<dyn EngineSigner>) {
|
||||||
*self.signer.write() = Some(signer);
|
*self.signer.write() = Some(signer);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,7 +205,7 @@ impl Engine<EthereumMachine> for BasicAuthority {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
|
fn snapshot_components(&self) -> Option<Box<dyn (::snapshot::SnapshotComponents)>> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,10 +162,10 @@ pub struct Clique {
|
|||||||
epoch_length: u64,
|
epoch_length: u64,
|
||||||
period: u64,
|
period: u64,
|
||||||
machine: EthereumMachine,
|
machine: EthereumMachine,
|
||||||
client: RwLock<Option<Weak<EngineClient>>>,
|
client: RwLock<Option<Weak<dyn EngineClient>>>,
|
||||||
block_state_by_hash: RwLock<LruCache<H256, CliqueBlockState>>,
|
block_state_by_hash: RwLock<LruCache<H256, CliqueBlockState>>,
|
||||||
proposals: RwLock<HashMap<Address, VoteType>>,
|
proposals: RwLock<HashMap<Address, VoteType>>,
|
||||||
signer: RwLock<Option<Box<EngineSigner>>>,
|
signer: RwLock<Option<Box<dyn EngineSigner>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -174,10 +174,10 @@ pub struct Clique {
|
|||||||
pub epoch_length: u64,
|
pub epoch_length: u64,
|
||||||
pub period: u64,
|
pub period: u64,
|
||||||
pub machine: EthereumMachine,
|
pub machine: EthereumMachine,
|
||||||
pub client: RwLock<Option<Weak<EngineClient>>>,
|
pub client: RwLock<Option<Weak<dyn EngineClient>>>,
|
||||||
pub block_state_by_hash: RwLock<LruCache<H256, CliqueBlockState>>,
|
pub block_state_by_hash: RwLock<LruCache<H256, CliqueBlockState>>,
|
||||||
pub proposals: RwLock<HashMap<Address, VoteType>>,
|
pub proposals: RwLock<HashMap<Address, VoteType>>,
|
||||||
pub signer: RwLock<Option<Box<EngineSigner>>>,
|
pub signer: RwLock<Option<Box<dyn EngineSigner>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clique {
|
impl Clique {
|
||||||
@ -370,7 +370,7 @@ impl Engine<EthereumMachine> for Clique {
|
|||||||
&self,
|
&self,
|
||||||
_block: &mut ExecutedBlock,
|
_block: &mut ExecutedBlock,
|
||||||
_epoch_begin: bool,
|
_epoch_begin: bool,
|
||||||
_ancestry: &mut Iterator<Item=ExtendedHeader>,
|
_ancestry: &mut dyn Iterator<Item=ExtendedHeader>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -736,12 +736,12 @@ impl Engine<EthereumMachine> for Clique {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_signer(&self, signer: Box<EngineSigner>) {
|
fn set_signer(&self, signer: Box<dyn EngineSigner>) {
|
||||||
trace!(target: "engine", "set_signer: {}", signer.address());
|
trace!(target: "engine", "set_signer: {}", signer.address());
|
||||||
*self.signer.write() = Some(signer);
|
*self.signer.write() = Some(signer);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_client(&self, client: Weak<EngineClient>) {
|
fn register_client(&self, client: Weak<dyn EngineClient>) {
|
||||||
*self.client.write() = Some(client.clone());
|
*self.client.write() = Some(client.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,10 +175,10 @@ pub enum SealingState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A system-calling closure. Enacts calls on a block's state from the system address.
|
/// A system-calling closure. Enacts calls on a block's state from the system address.
|
||||||
pub type SystemCall<'a> = FnMut(Address, Vec<u8>) -> Result<Vec<u8>, String> + 'a;
|
pub type SystemCall<'a> = dyn FnMut(Address, Vec<u8>) -> Result<Vec<u8>, String> + 'a;
|
||||||
|
|
||||||
/// A system-calling closure. Enacts calls on a block's state with code either from an on-chain contract, or hard-coded EVM or WASM (if enabled on-chain) codes.
|
/// A system-calling closure. Enacts calls on a block's state with code either from an on-chain contract, or hard-coded EVM or WASM (if enabled on-chain) codes.
|
||||||
pub type SystemOrCodeCall<'a> = FnMut(SystemOrCodeCallKind, Vec<u8>) -> Result<Vec<u8>, String> + 'a;
|
pub type SystemOrCodeCall<'a> = dyn FnMut(SystemOrCodeCallKind, Vec<u8>) -> Result<Vec<u8>, String> + 'a;
|
||||||
|
|
||||||
/// Kind of SystemOrCodeCall, this is either an on-chain address, or code.
|
/// Kind of SystemOrCodeCall, this is either an on-chain address, or code.
|
||||||
#[derive(PartialEq, Debug, Clone)]
|
#[derive(PartialEq, Debug, Clone)]
|
||||||
@ -220,10 +220,10 @@ pub fn default_system_or_code_call<'a>(machine: &'a ::machine::EthereumMachine,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Type alias for a function we can get headers by hash through.
|
/// Type alias for a function we can get headers by hash through.
|
||||||
pub type Headers<'a, H> = Fn(H256) -> Option<H> + 'a;
|
pub type Headers<'a, H> = dyn Fn(H256) -> Option<H> + 'a;
|
||||||
|
|
||||||
/// Type alias for a function we can query pending transitions by block hash through.
|
/// Type alias for a function we can query pending transitions by block hash through.
|
||||||
pub type PendingTransitionStore<'a> = Fn(H256) -> Option<epoch::PendingTransition> + 'a;
|
pub type PendingTransitionStore<'a> = dyn Fn(H256) -> Option<epoch::PendingTransition> + 'a;
|
||||||
|
|
||||||
/// Proof dependent on state.
|
/// Proof dependent on state.
|
||||||
pub trait StateDependentProof<M: Machine>: Send + Sync {
|
pub trait StateDependentProof<M: Machine>: Send + Sync {
|
||||||
@ -240,16 +240,16 @@ pub enum Proof<M: Machine> {
|
|||||||
/// Known proof (extracted from signal)
|
/// Known proof (extracted from signal)
|
||||||
Known(Vec<u8>),
|
Known(Vec<u8>),
|
||||||
/// State dependent proof.
|
/// State dependent proof.
|
||||||
WithState(Arc<StateDependentProof<M>>),
|
WithState(Arc<dyn StateDependentProof<M>>),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generated epoch verifier.
|
/// Generated epoch verifier.
|
||||||
pub enum ConstructedVerifier<'a, M: Machine> {
|
pub enum ConstructedVerifier<'a, M: Machine> {
|
||||||
/// Fully trusted verifier.
|
/// Fully trusted verifier.
|
||||||
Trusted(Box<EpochVerifier<M>>),
|
Trusted(Box<dyn EpochVerifier<M>>),
|
||||||
/// Verifier unconfirmed. Check whether given finality proof finalizes given hash
|
/// Verifier unconfirmed. Check whether given finality proof finalizes given hash
|
||||||
/// under previous epoch.
|
/// under previous epoch.
|
||||||
Unconfirmed(Box<EpochVerifier<M>>, &'a [u8], H256),
|
Unconfirmed(Box<dyn EpochVerifier<M>>, &'a [u8], H256),
|
||||||
/// Error constructing verifier.
|
/// Error constructing verifier.
|
||||||
Err(Error),
|
Err(Error),
|
||||||
}
|
}
|
||||||
@ -257,7 +257,7 @@ pub enum ConstructedVerifier<'a, M: Machine> {
|
|||||||
impl<'a, M: Machine> ConstructedVerifier<'a, M> {
|
impl<'a, M: Machine> ConstructedVerifier<'a, M> {
|
||||||
/// Convert to a result, indicating that any necessary confirmation has been done
|
/// Convert to a result, indicating that any necessary confirmation has been done
|
||||||
/// already.
|
/// already.
|
||||||
pub fn known_confirmed(self) -> Result<Box<EpochVerifier<M>>, Error> {
|
pub fn known_confirmed(self) -> Result<Box<dyn EpochVerifier<M>>, Error> {
|
||||||
match self {
|
match self {
|
||||||
ConstructedVerifier::Trusted(v) | ConstructedVerifier::Unconfirmed(v, _, _) => Ok(v),
|
ConstructedVerifier::Trusted(v) | ConstructedVerifier::Unconfirmed(v, _, _) => Ok(v),
|
||||||
ConstructedVerifier::Err(e) => Err(e),
|
ConstructedVerifier::Err(e) => Err(e),
|
||||||
@ -303,7 +303,7 @@ pub trait Engine<M: Machine>: Sync + Send {
|
|||||||
&self,
|
&self,
|
||||||
_block: &mut ExecutedBlock,
|
_block: &mut ExecutedBlock,
|
||||||
_epoch_begin: bool,
|
_epoch_begin: bool,
|
||||||
_ancestry: &mut Iterator<Item = ExtendedHeader>,
|
_ancestry: &mut dyn Iterator<Item = ExtendedHeader>,
|
||||||
) -> Result<(), M::Error> {
|
) -> Result<(), M::Error> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -426,7 +426,7 @@ pub trait Engine<M: Machine>: Sync + Send {
|
|||||||
fn handle_message(&self, _message: &[u8]) -> Result<(), EngineError> { Err(EngineError::UnexpectedMessage) }
|
fn handle_message(&self, _message: &[u8]) -> Result<(), EngineError> { Err(EngineError::UnexpectedMessage) }
|
||||||
|
|
||||||
/// Register a component which signs consensus messages.
|
/// Register a component which signs consensus messages.
|
||||||
fn set_signer(&self, _signer: Box<EngineSigner>) {}
|
fn set_signer(&self, _signer: Box<dyn EngineSigner>) {}
|
||||||
|
|
||||||
/// Sign using the EngineSigner, to be used for consensus tx signing.
|
/// Sign using the EngineSigner, to be used for consensus tx signing.
|
||||||
fn sign(&self, _hash: H256) -> Result<Signature, M::Error> { unimplemented!() }
|
fn sign(&self, _hash: H256) -> Result<Signature, M::Error> { unimplemented!() }
|
||||||
@ -439,7 +439,7 @@ pub trait Engine<M: Machine>: Sync + Send {
|
|||||||
|
|
||||||
/// Create a factory for building snapshot chunks and restoring from them.
|
/// Create a factory for building snapshot chunks and restoring from them.
|
||||||
/// Returning `None` indicates that this engine doesn't support snapshot creation.
|
/// Returning `None` indicates that this engine doesn't support snapshot creation.
|
||||||
fn snapshot_components(&self) -> Option<Box<SnapshotComponents>> {
|
fn snapshot_components(&self) -> Option<Box<dyn SnapshotComponents>> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -463,7 +463,7 @@ pub trait Engine<M: Machine>: Sync + Send {
|
|||||||
|
|
||||||
/// Gather all ancestry actions. Called at the last stage when a block is committed. The Engine must guarantee that
|
/// Gather all ancestry actions. Called at the last stage when a block is committed. The Engine must guarantee that
|
||||||
/// the ancestry exists.
|
/// the ancestry exists.
|
||||||
fn ancestry_actions(&self, _header: &Header, _ancestry: &mut Iterator<Item = ExtendedHeader>) -> Vec<AncestryAction> {
|
fn ancestry_actions(&self, _header: &Header, _ancestry: &mut dyn Iterator<Item = ExtendedHeader>) -> Vec<AncestryAction> {
|
||||||
Vec::new()
|
Vec::new()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ impl<M: Machine> Engine<M> for NullEngine<M> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
|
fn snapshot_components(&self) -> Option<Box<dyn (::snapshot::SnapshotComponents)>> {
|
||||||
Some(Box::new(::snapshot::PowSnapshot::new(10000, 10000)))
|
Some(Box::new(::snapshot::PowSnapshot::new(10000, 10000)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ pub trait EngineSigner: Send + Sync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new `EngineSigner` from given key pair.
|
/// Creates a new `EngineSigner` from given key pair.
|
||||||
pub fn from_keypair(keypair: ethkey::KeyPair) -> Box<EngineSigner> {
|
pub fn from_keypair(keypair: ethkey::KeyPair) -> Box<dyn EngineSigner> {
|
||||||
Box::new(Signer(keypair))
|
Box::new(Signer(keypair))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ use_contract!(validator_report, "res/contracts/validator_report.json");
|
|||||||
pub struct ValidatorContract {
|
pub struct ValidatorContract {
|
||||||
contract_address: Address,
|
contract_address: Address,
|
||||||
validators: ValidatorSafeContract,
|
validators: ValidatorSafeContract,
|
||||||
client: RwLock<Option<Weak<EngineClient>>>, // TODO [keorn]: remove
|
client: RwLock<Option<Weak<dyn EngineClient>>>, // TODO [keorn]: remove
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ValidatorContract {
|
impl ValidatorContract {
|
||||||
@ -125,7 +125,7 @@ impl ValidatorSet for ValidatorContract {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_client(&self, client: Weak<EngineClient>) {
|
fn register_client(&self, client: Weak<dyn EngineClient>) {
|
||||||
self.validators.register_client(client.clone());
|
self.validators.register_client(client.clone());
|
||||||
*self.client.write() = Some(client);
|
*self.client.write() = Some(client);
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ use self::multi::Multi;
|
|||||||
use super::SystemCall;
|
use super::SystemCall;
|
||||||
|
|
||||||
/// Creates a validator set from spec.
|
/// Creates a validator set from spec.
|
||||||
pub fn new_validator_set(spec: ValidatorSpec) -> Box<ValidatorSet> {
|
pub fn new_validator_set(spec: ValidatorSpec) -> Box<dyn ValidatorSet> {
|
||||||
match spec {
|
match spec {
|
||||||
ValidatorSpec::List(list) => Box::new(SimpleList::new(list.into_iter().map(Into::into).collect())),
|
ValidatorSpec::List(list) => Box::new(SimpleList::new(list.into_iter().map(Into::into).collect())),
|
||||||
ValidatorSpec::SafeContract(address) => Box::new(ValidatorSafeContract::new(address.into())),
|
ValidatorSpec::SafeContract(address) => Box::new(ValidatorSafeContract::new(address.into())),
|
||||||
@ -141,5 +141,5 @@ pub trait ValidatorSet: Send + Sync + 'static {
|
|||||||
/// Notifies about benign misbehaviour.
|
/// Notifies about benign misbehaviour.
|
||||||
fn report_benign(&self, _validator: &Address, _set_block: BlockNumber, _block: BlockNumber) {}
|
fn report_benign(&self, _validator: &Address, _set_block: BlockNumber, _block: BlockNumber) {}
|
||||||
/// Allows blockchain state access.
|
/// Allows blockchain state access.
|
||||||
fn register_client(&self, _client: Weak<EngineClient>) {}
|
fn register_client(&self, _client: Weak<dyn EngineClient>) {}
|
||||||
}
|
}
|
||||||
|
@ -30,15 +30,15 @@ use client::EngineClient;
|
|||||||
use machine::{AuxiliaryData, Call, EthereumMachine};
|
use machine::{AuxiliaryData, Call, EthereumMachine};
|
||||||
use super::{SystemCall, ValidatorSet};
|
use super::{SystemCall, ValidatorSet};
|
||||||
|
|
||||||
type BlockNumberLookup = Box<Fn(BlockId) -> Result<BlockNumber, String> + Send + Sync + 'static>;
|
type BlockNumberLookup = Box<dyn Fn(BlockId) -> Result<BlockNumber, String> + Send + Sync + 'static>;
|
||||||
|
|
||||||
pub struct Multi {
|
pub struct Multi {
|
||||||
sets: BTreeMap<BlockNumber, Box<ValidatorSet>>,
|
sets: BTreeMap<BlockNumber, Box<dyn ValidatorSet>>,
|
||||||
block_number: RwLock<BlockNumberLookup>,
|
block_number: RwLock<BlockNumberLookup>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Multi {
|
impl Multi {
|
||||||
pub fn new(set_map: BTreeMap<BlockNumber, Box<ValidatorSet>>) -> Self {
|
pub fn new(set_map: BTreeMap<BlockNumber, Box<dyn ValidatorSet>>) -> Self {
|
||||||
assert!(set_map.get(&0u64).is_some(), "ValidatorSet has to be specified from block 0.");
|
assert!(set_map.get(&0u64).is_some(), "ValidatorSet has to be specified from block 0.");
|
||||||
Multi {
|
Multi {
|
||||||
sets: set_map,
|
sets: set_map,
|
||||||
@ -46,7 +46,7 @@ impl Multi {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn correct_set(&self, id: BlockId) -> Option<&ValidatorSet> {
|
fn correct_set(&self, id: BlockId) -> Option<&dyn ValidatorSet> {
|
||||||
match self.block_number.read()(id).map(|parent_block| self.correct_set_by_number(parent_block)) {
|
match self.block_number.read()(id).map(|parent_block| self.correct_set_by_number(parent_block)) {
|
||||||
Ok((_, set)) => Some(set),
|
Ok((_, set)) => Some(set),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -58,7 +58,7 @@ impl Multi {
|
|||||||
|
|
||||||
// get correct set by block number, along with block number at which
|
// get correct set by block number, along with block number at which
|
||||||
// this set was activated.
|
// this set was activated.
|
||||||
fn correct_set_by_number(&self, parent_block: BlockNumber) -> (BlockNumber, &ValidatorSet) {
|
fn correct_set_by_number(&self, parent_block: BlockNumber) -> (BlockNumber, &dyn ValidatorSet) {
|
||||||
let (block, set) = self.sets.iter()
|
let (block, set) = self.sets.iter()
|
||||||
.rev()
|
.rev()
|
||||||
.find(|&(block, _)| *block <= parent_block + 1)
|
.find(|&(block, _)| *block <= parent_block + 1)
|
||||||
@ -134,7 +134,7 @@ impl ValidatorSet for Multi {
|
|||||||
self.correct_set_by_number(set_block).1.report_benign(validator, set_block, block);
|
self.correct_set_by_number(set_block).1.report_benign(validator, set_block, block);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_client(&self, client: Weak<EngineClient>) {
|
fn register_client(&self, client: Weak<dyn EngineClient>) {
|
||||||
for set in self.sets.values() {
|
for set in self.sets.values() {
|
||||||
set.register_client(client.clone());
|
set.register_client(client.clone());
|
||||||
}
|
}
|
||||||
@ -215,7 +215,7 @@ mod tests {
|
|||||||
fn transition_to_fixed_list_instant() {
|
fn transition_to_fixed_list_instant() {
|
||||||
use super::super::SimpleList;
|
use super::super::SimpleList;
|
||||||
|
|
||||||
let mut map: BTreeMap<_, Box<ValidatorSet>> = BTreeMap::new();
|
let mut map: BTreeMap<_, Box<dyn ValidatorSet>> = BTreeMap::new();
|
||||||
let list1: Vec<_> = (0..10).map(|_| Address::random()).collect();
|
let list1: Vec<_> = (0..10).map(|_| Address::random()).collect();
|
||||||
let list2 = {
|
let list2 = {
|
||||||
let mut list = list1.clone();
|
let mut list = list1.clone();
|
||||||
|
@ -75,7 +75,7 @@ impl ::engines::StateDependentProof<EthereumMachine> for StateProof {
|
|||||||
pub struct ValidatorSafeContract {
|
pub struct ValidatorSafeContract {
|
||||||
contract_address: Address,
|
contract_address: Address,
|
||||||
validators: RwLock<MemoryLruCache<H256, SimpleList>>,
|
validators: RwLock<MemoryLruCache<H256, SimpleList>>,
|
||||||
client: RwLock<Option<Weak<EngineClient>>>, // TODO [keorn]: remove
|
client: RwLock<Option<Weak<dyn EngineClient>>>, // TODO [keorn]: remove
|
||||||
}
|
}
|
||||||
|
|
||||||
// first proof is just a state proof call of `getValidators` at header's state.
|
// first proof is just a state proof call of `getValidators` at header's state.
|
||||||
@ -431,7 +431,7 @@ impl ValidatorSet for ValidatorSafeContract {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_client(&self, client: Weak<EngineClient>) {
|
fn register_client(&self, client: Weak<dyn EngineClient>) {
|
||||||
trace!(target: "engine", "Setting up contract caller.");
|
trace!(target: "engine", "Setting up contract caller.");
|
||||||
*self.client.write() = Some(client);
|
*self.client.write() = Some(client);
|
||||||
}
|
}
|
||||||
|
@ -105,8 +105,8 @@ impl ValidatorSet for SimpleList {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsRef<ValidatorSet> for SimpleList {
|
impl AsRef<dyn ValidatorSet> for SimpleList {
|
||||||
fn as_ref(&self) -> &ValidatorSet {
|
fn as_ref(&self) -> &dyn ValidatorSet {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -162,7 +162,7 @@ pub enum QueueError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl error::Error for QueueError {
|
impl error::Error for QueueError {
|
||||||
fn source(&self) -> Option<&(error::Error + 'static)> {
|
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
|
||||||
match self {
|
match self {
|
||||||
QueueError::Channel(e) => Some(e),
|
QueueError::Channel(e) => Some(e),
|
||||||
_ => None,
|
_ => None,
|
||||||
@ -264,7 +264,7 @@ pub enum Error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl error::Error for Error {
|
impl error::Error for Error {
|
||||||
fn source(&self) -> Option<&(error::Error + 'static)> {
|
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
|
||||||
match self {
|
match self {
|
||||||
Error::Io(e) => Some(e),
|
Error::Io(e) => Some(e),
|
||||||
Error::StdIo(e) => Some(e),
|
Error::StdIo(e) => Some(e),
|
||||||
|
@ -377,7 +377,7 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
|
|||||||
engines::ConstructedVerifier::Trusted(Box::new(self.clone()))
|
engines::ConstructedVerifier::Trusted(Box::new(self.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
|
fn snapshot_components(&self) -> Option<Box<dyn (::snapshot::SnapshotComponents)>> {
|
||||||
Some(Box::new(::snapshot::PowSnapshot::new(SNAPSHOT_BLOCKS, MAX_SNAPSHOT_BLOCKS)))
|
Some(Box::new(::snapshot::PowSnapshot::new(SNAPSHOT_BLOCKS, MAX_SNAPSHOT_BLOCKS)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -201,8 +201,8 @@ enum CallCreateExecutiveKind {
|
|||||||
CallBuiltin(ActionParams),
|
CallBuiltin(ActionParams),
|
||||||
ExecCall(ActionParams, Substate),
|
ExecCall(ActionParams, Substate),
|
||||||
ExecCreate(ActionParams, Substate),
|
ExecCreate(ActionParams, Substate),
|
||||||
ResumeCall(OriginInfo, Box<ResumeCall>, Substate),
|
ResumeCall(OriginInfo, Box<dyn ResumeCall>, Substate),
|
||||||
ResumeCreate(OriginInfo, Box<ResumeCreate>, Substate),
|
ResumeCreate(OriginInfo, Box<dyn ResumeCreate>, Substate),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Executive for a raw call/create action.
|
/// Executive for a raw call/create action.
|
||||||
|
@ -31,7 +31,7 @@ pub struct VmFactory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl VmFactory {
|
impl VmFactory {
|
||||||
pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box<Exec> {
|
pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box<dyn Exec> {
|
||||||
if schedule.wasm.is_some() && params.code.as_ref().map_or(false, |code| code.len() > 4 && &code[0..4] == WASM_MAGIC_NUMBER) {
|
if schedule.wasm.is_some() && params.code.as_ref().map_or(false, |code| code.len() > 4 && &code[0..4] == WASM_MAGIC_NUMBER) {
|
||||||
Box::new(WasmInterpreter::new(params))
|
Box::new(WasmInterpreter::new(params))
|
||||||
} else {
|
} else {
|
||||||
|
@ -82,7 +82,11 @@ extern crate journaldb;
|
|||||||
extern crate keccak_hash as hash;
|
extern crate keccak_hash as hash;
|
||||||
extern crate keccak_hasher;
|
extern crate keccak_hasher;
|
||||||
extern crate kvdb;
|
extern crate kvdb;
|
||||||
|
// Note: in `ethcore` this is only used by tests, so without `#[cfg(test)]` there's a warning.
|
||||||
|
// However, when building `parity-ethereum` this is needed. So there's something funny going on
|
||||||
|
// here.
|
||||||
extern crate kvdb_memorydb;
|
extern crate kvdb_memorydb;
|
||||||
|
|
||||||
extern crate len_caching_lock;
|
extern crate len_caching_lock;
|
||||||
extern crate lru_cache;
|
extern crate lru_cache;
|
||||||
extern crate memory_cache;
|
extern crate memory_cache;
|
||||||
|
@ -67,7 +67,7 @@ impl From<::ethjson::spec::EthashParams> for EthashExtensions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Special rules to be applied to the schedule.
|
/// Special rules to be applied to the schedule.
|
||||||
pub type ScheduleCreationRules = Fn(&mut Schedule, BlockNumber) + Sync + Send;
|
pub type ScheduleCreationRules = dyn Fn(&mut Schedule, BlockNumber) + Sync + Send;
|
||||||
|
|
||||||
/// An ethereum-like state machine.
|
/// An ethereum-like state machine.
|
||||||
pub struct EthereumMachine {
|
pub struct EthereumMachine {
|
||||||
@ -415,7 +415,7 @@ pub struct AuxiliaryData<'a> {
|
|||||||
|
|
||||||
/// Type alias for a function we can make calls through synchronously.
|
/// Type alias for a function we can make calls through synchronously.
|
||||||
/// Returns the call result and state proof for each call.
|
/// Returns the call result and state proof for each call.
|
||||||
pub type Call<'a> = Fn(Address, Vec<u8>) -> Result<(Vec<u8>, Vec<Vec<u8>>), String> + 'a;
|
pub type Call<'a> = dyn Fn(Address, Vec<u8>) -> Result<(Vec<u8>, Vec<Vec<u8>>), String> + 'a;
|
||||||
|
|
||||||
/// Request for auxiliary data of a block.
|
/// Request for auxiliary data of a block.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||||
@ -429,7 +429,7 @@ pub enum AuxiliaryRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl super::Machine for EthereumMachine {
|
impl super::Machine for EthereumMachine {
|
||||||
type EngineClient = ::client::EngineClient;
|
type EngineClient = dyn (::client::EngineClient);
|
||||||
|
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
|
|
||||||
|
@ -203,7 +203,7 @@ pub enum Author {
|
|||||||
/// Sealing block is external and we only need a reward beneficiary (i.e. PoW)
|
/// Sealing block is external and we only need a reward beneficiary (i.e. PoW)
|
||||||
External(Address),
|
External(Address),
|
||||||
/// Sealing is done internally, we need a way to create signatures to seal block (i.e. PoA)
|
/// Sealing is done internally, we need a way to create signatures to seal block (i.e. PoA)
|
||||||
Sealer(Box<EngineSigner>),
|
Sealer(Box<dyn EngineSigner>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Author {
|
impl Author {
|
||||||
@ -245,8 +245,8 @@ pub struct Miner {
|
|||||||
options: MinerOptions,
|
options: MinerOptions,
|
||||||
// TODO [ToDr] Arc is only required because of price updater
|
// TODO [ToDr] Arc is only required because of price updater
|
||||||
transaction_queue: Arc<TransactionQueue>,
|
transaction_queue: Arc<TransactionQueue>,
|
||||||
engine: Arc<EthEngine>,
|
engine: Arc<dyn EthEngine>,
|
||||||
accounts: Arc<LocalAccounts>,
|
accounts: Arc<dyn LocalAccounts>,
|
||||||
io_channel: RwLock<Option<IoChannel<ClientIoMessage>>>,
|
io_channel: RwLock<Option<IoChannel<ClientIoMessage>>>,
|
||||||
service_transaction_checker: Option<ServiceTransactionChecker>,
|
service_transaction_checker: Option<ServiceTransactionChecker>,
|
||||||
}
|
}
|
||||||
|
@ -72,8 +72,8 @@ impl NonceCache {
|
|||||||
pub struct PoolClient<'a, C: 'a> {
|
pub struct PoolClient<'a, C: 'a> {
|
||||||
chain: &'a C,
|
chain: &'a C,
|
||||||
cached_nonces: CachedNonceClient<'a, C>,
|
cached_nonces: CachedNonceClient<'a, C>,
|
||||||
engine: &'a EthEngine,
|
engine: &'a dyn EthEngine,
|
||||||
accounts: &'a LocalAccounts,
|
accounts: &'a dyn LocalAccounts,
|
||||||
best_block_header: Header,
|
best_block_header: Header,
|
||||||
service_transaction_checker: Option<&'a ServiceTransactionChecker>,
|
service_transaction_checker: Option<&'a ServiceTransactionChecker>,
|
||||||
}
|
}
|
||||||
@ -98,8 +98,8 @@ impl<'a, C: 'a> PoolClient<'a, C> where
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
chain: &'a C,
|
chain: &'a C,
|
||||||
cache: &'a NonceCache,
|
cache: &'a NonceCache,
|
||||||
engine: &'a EthEngine,
|
engine: &'a dyn EthEngine,
|
||||||
accounts: &'a LocalAccounts,
|
accounts: &'a dyn LocalAccounts,
|
||||||
service_transaction_checker: Option<&'a ServiceTransactionChecker>,
|
service_transaction_checker: Option<&'a ServiceTransactionChecker>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let best_block_header = chain.best_block_header();
|
let best_block_header = chain.best_block_header();
|
||||||
|
@ -79,7 +79,7 @@ impl PodAccount {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Place additional data into given hash DB.
|
/// Place additional data into given hash DB.
|
||||||
pub fn insert_additional(&self, db: &mut HashDB<KeccakHasher, DBValue>, factory: &TrieFactory<KeccakHasher, RlpCodec>) {
|
pub fn insert_additional(&self, db: &mut dyn HashDB<KeccakHasher, DBValue>, factory: &TrieFactory<KeccakHasher, RlpCodec>) {
|
||||||
match self.code {
|
match self.code {
|
||||||
Some(ref c) if !c.is_empty() => { db.insert(c); }
|
Some(ref c) if !c.is_empty() => { db.insert(c); }
|
||||||
_ => {}
|
_ => {}
|
||||||
|
@ -66,7 +66,7 @@ impl CodeState {
|
|||||||
// account address hash, account properties and the storage. Each item contains at most `max_storage_items`
|
// account address hash, account properties and the storage. Each item contains at most `max_storage_items`
|
||||||
// storage records split according to snapshot format definition.
|
// storage records split according to snapshot format definition.
|
||||||
pub fn to_fat_rlps(account_hash: &H256, acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet<H256>, first_chunk_size: usize, max_chunk_size: usize) -> Result<Vec<Bytes>, Error> {
|
pub fn to_fat_rlps(account_hash: &H256, acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet<H256>, first_chunk_size: usize, max_chunk_size: usize) -> Result<Vec<Bytes>, Error> {
|
||||||
let db = &(acct_db as &HashDB<_,_>);
|
let db = &(acct_db as &dyn HashDB<_,_>);
|
||||||
let db = TrieDB::new(db, &acc.storage_root)?;
|
let db = TrieDB::new(db, &acc.storage_root)?;
|
||||||
let mut chunks = Vec::new();
|
let mut chunks = Vec::new();
|
||||||
let mut db_iter = db.iter()?;
|
let mut db_iter = db.iter()?;
|
||||||
|
@ -127,9 +127,9 @@ impl SnapshotComponents for PoaSnapshot {
|
|||||||
fn rebuilder(
|
fn rebuilder(
|
||||||
&self,
|
&self,
|
||||||
chain: BlockChain,
|
chain: BlockChain,
|
||||||
db: Arc<BlockChainDB>,
|
db: Arc<dyn BlockChainDB>,
|
||||||
manifest: &ManifestData,
|
manifest: &ManifestData,
|
||||||
) -> Result<Box<Rebuilder>, ::error::Error> {
|
) -> Result<Box<dyn Rebuilder>, ::error::Error> {
|
||||||
Ok(Box::new(ChunkRebuilder {
|
Ok(Box::new(ChunkRebuilder {
|
||||||
manifest: manifest.clone(),
|
manifest: manifest.clone(),
|
||||||
warp_target: None,
|
warp_target: None,
|
||||||
@ -164,14 +164,14 @@ struct ChunkRebuilder {
|
|||||||
manifest: ManifestData,
|
manifest: ManifestData,
|
||||||
warp_target: Option<Header>,
|
warp_target: Option<Header>,
|
||||||
chain: BlockChain,
|
chain: BlockChain,
|
||||||
db: Arc<KeyValueDB>,
|
db: Arc<dyn KeyValueDB>,
|
||||||
had_genesis: bool,
|
had_genesis: bool,
|
||||||
|
|
||||||
// sorted vectors of unverified first blocks in a chunk
|
// sorted vectors of unverified first blocks in a chunk
|
||||||
// and epoch data from last blocks in chunks.
|
// and epoch data from last blocks in chunks.
|
||||||
// verification for these will be done at the end.
|
// verification for these will be done at the end.
|
||||||
unverified_firsts: Vec<(Header, Bytes, H256)>,
|
unverified_firsts: Vec<(Header, Bytes, H256)>,
|
||||||
last_epochs: Vec<(Header, Box<EpochVerifier<EthereumMachine>>)>,
|
last_epochs: Vec<(Header, Box<dyn EpochVerifier<EthereumMachine>>)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// verified data.
|
// verified data.
|
||||||
@ -183,9 +183,9 @@ struct Verified {
|
|||||||
impl ChunkRebuilder {
|
impl ChunkRebuilder {
|
||||||
fn verify_transition(
|
fn verify_transition(
|
||||||
&mut self,
|
&mut self,
|
||||||
last_verifier: &mut Option<Box<EpochVerifier<EthereumMachine>>>,
|
last_verifier: &mut Option<Box<dyn EpochVerifier<EthereumMachine>>>,
|
||||||
transition_rlp: Rlp,
|
transition_rlp: Rlp,
|
||||||
engine: &EthEngine,
|
engine: &dyn EthEngine,
|
||||||
) -> Result<Verified, ::error::Error> {
|
) -> Result<Verified, ::error::Error> {
|
||||||
use engines::ConstructedVerifier;
|
use engines::ConstructedVerifier;
|
||||||
|
|
||||||
@ -241,7 +241,7 @@ impl Rebuilder for ChunkRebuilder {
|
|||||||
fn feed(
|
fn feed(
|
||||||
&mut self,
|
&mut self,
|
||||||
chunk: &[u8],
|
chunk: &[u8],
|
||||||
engine: &EthEngine,
|
engine: &dyn EthEngine,
|
||||||
abort_flag: &AtomicBool,
|
abort_flag: &AtomicBool,
|
||||||
) -> Result<(), ::error::Error> {
|
) -> Result<(), ::error::Error> {
|
||||||
let rlp = Rlp::new(chunk);
|
let rlp = Rlp::new(chunk);
|
||||||
@ -349,7 +349,7 @@ impl Rebuilder for ChunkRebuilder {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn finalize(&mut self, _engine: &EthEngine) -> Result<(), ::error::Error> {
|
fn finalize(&mut self, _engine: &dyn EthEngine) -> Result<(), ::error::Error> {
|
||||||
if !self.had_genesis {
|
if !self.had_genesis {
|
||||||
return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into());
|
return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into());
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ pub use self::authority::*;
|
|||||||
pub use self::work::*;
|
pub use self::work::*;
|
||||||
|
|
||||||
/// A sink for produced chunks.
|
/// A sink for produced chunks.
|
||||||
pub type ChunkSink<'a> = FnMut(&[u8]) -> ::std::io::Result<()> + 'a;
|
pub type ChunkSink<'a> = dyn FnMut(&[u8]) -> ::std::io::Result<()> + 'a;
|
||||||
|
|
||||||
/// Components necessary for snapshot creation and restoration.
|
/// Components necessary for snapshot creation and restoration.
|
||||||
pub trait SnapshotComponents: Send {
|
pub trait SnapshotComponents: Send {
|
||||||
@ -63,9 +63,9 @@ pub trait SnapshotComponents: Send {
|
|||||||
fn rebuilder(
|
fn rebuilder(
|
||||||
&self,
|
&self,
|
||||||
chain: BlockChain,
|
chain: BlockChain,
|
||||||
db: Arc<BlockChainDB>,
|
db: Arc<dyn BlockChainDB>,
|
||||||
manifest: &ManifestData,
|
manifest: &ManifestData,
|
||||||
) -> Result<Box<Rebuilder>, ::error::Error>;
|
) -> Result<Box<dyn Rebuilder>, ::error::Error>;
|
||||||
|
|
||||||
/// Minimum supported snapshot version number.
|
/// Minimum supported snapshot version number.
|
||||||
fn min_supported_version(&self) -> u64;
|
fn min_supported_version(&self) -> u64;
|
||||||
@ -83,7 +83,7 @@ pub trait Rebuilder: Send {
|
|||||||
fn feed(
|
fn feed(
|
||||||
&mut self,
|
&mut self,
|
||||||
chunk: &[u8],
|
chunk: &[u8],
|
||||||
engine: &EthEngine,
|
engine: &dyn EthEngine,
|
||||||
abort_flag: &AtomicBool,
|
abort_flag: &AtomicBool,
|
||||||
) -> Result<(), ::error::Error>;
|
) -> Result<(), ::error::Error>;
|
||||||
|
|
||||||
@ -92,5 +92,5 @@ pub trait Rebuilder: Send {
|
|||||||
///
|
///
|
||||||
/// This should apply the necessary "glue" between chunks,
|
/// This should apply the necessary "glue" between chunks,
|
||||||
/// and verify against the restored state.
|
/// and verify against the restored state.
|
||||||
fn finalize(&mut self, engine: &EthEngine) -> Result<(), ::error::Error>;
|
fn finalize(&mut self, engine: &dyn EthEngine) -> Result<(), ::error::Error>;
|
||||||
}
|
}
|
||||||
|
@ -81,9 +81,9 @@ impl SnapshotComponents for PowSnapshot {
|
|||||||
fn rebuilder(
|
fn rebuilder(
|
||||||
&self,
|
&self,
|
||||||
chain: BlockChain,
|
chain: BlockChain,
|
||||||
db: Arc<BlockChainDB>,
|
db: Arc<dyn BlockChainDB>,
|
||||||
manifest: &ManifestData,
|
manifest: &ManifestData,
|
||||||
) -> Result<Box<Rebuilder>, ::error::Error> {
|
) -> Result<Box<dyn Rebuilder>, ::error::Error> {
|
||||||
PowRebuilder::new(chain, db.key_value().clone(), manifest, self.max_restore_blocks).map(|r| Box::new(r) as Box<_>)
|
PowRebuilder::new(chain, db.key_value().clone(), manifest, self.max_restore_blocks).map(|r| Box::new(r) as Box<_>)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,7 +194,7 @@ impl<'a> PowWorker<'a> {
|
|||||||
/// After all chunks have been submitted, we "glue" the chunks together.
|
/// After all chunks have been submitted, we "glue" the chunks together.
|
||||||
pub struct PowRebuilder {
|
pub struct PowRebuilder {
|
||||||
chain: BlockChain,
|
chain: BlockChain,
|
||||||
db: Arc<KeyValueDB>,
|
db: Arc<dyn KeyValueDB>,
|
||||||
rng: OsRng,
|
rng: OsRng,
|
||||||
disconnected: Vec<(u64, H256)>,
|
disconnected: Vec<(u64, H256)>,
|
||||||
best_number: u64,
|
best_number: u64,
|
||||||
@ -206,7 +206,7 @@ pub struct PowRebuilder {
|
|||||||
|
|
||||||
impl PowRebuilder {
|
impl PowRebuilder {
|
||||||
/// Create a new PowRebuilder.
|
/// Create a new PowRebuilder.
|
||||||
fn new(chain: BlockChain, db: Arc<KeyValueDB>, manifest: &ManifestData, snapshot_blocks: u64) -> Result<Self, ::error::Error> {
|
fn new(chain: BlockChain, db: Arc<dyn KeyValueDB>, manifest: &ManifestData, snapshot_blocks: u64) -> Result<Self, ::error::Error> {
|
||||||
Ok(PowRebuilder {
|
Ok(PowRebuilder {
|
||||||
chain: chain,
|
chain: chain,
|
||||||
db: db,
|
db: db,
|
||||||
@ -224,7 +224,7 @@ impl PowRebuilder {
|
|||||||
impl Rebuilder for PowRebuilder {
|
impl Rebuilder for PowRebuilder {
|
||||||
/// Feed the rebuilder an uncompressed block chunk.
|
/// Feed the rebuilder an uncompressed block chunk.
|
||||||
/// Returns the number of blocks fed or any errors.
|
/// Returns the number of blocks fed or any errors.
|
||||||
fn feed(&mut self, chunk: &[u8], engine: &EthEngine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> {
|
fn feed(&mut self, chunk: &[u8], engine: &dyn EthEngine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> {
|
||||||
use snapshot::verify_old_block;
|
use snapshot::verify_old_block;
|
||||||
use ethereum_types::U256;
|
use ethereum_types::U256;
|
||||||
use triehash::ordered_trie_root;
|
use triehash::ordered_trie_root;
|
||||||
@ -298,7 +298,7 @@ impl Rebuilder for PowRebuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Glue together any disconnected chunks and check that the chain is complete.
|
/// Glue together any disconnected chunks and check that the chain is complete.
|
||||||
fn finalize(&mut self, _: &EthEngine) -> Result<(), ::error::Error> {
|
fn finalize(&mut self, _: &dyn EthEngine) -> Result<(), ::error::Error> {
|
||||||
let mut batch = self.db.transaction();
|
let mut batch = self.db.transaction();
|
||||||
|
|
||||||
for (first_num, first_hash) in self.disconnected.drain(..) {
|
for (first_num, first_hash) in self.disconnected.drain(..) {
|
||||||
|
@ -71,7 +71,7 @@ pub enum Error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl error::Error for Error {
|
impl error::Error for Error {
|
||||||
fn source(&self) -> Option<&(error::Error + 'static)> {
|
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
|
||||||
match self {
|
match self {
|
||||||
Error::Trie(e) => Some(e),
|
Error::Trie(e) => Some(e),
|
||||||
Error::Decoder(e) => Some(e),
|
Error::Decoder(e) => Some(e),
|
||||||
|
@ -148,10 +148,10 @@ impl Progress {
|
|||||||
}
|
}
|
||||||
/// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer.
|
/// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer.
|
||||||
pub fn take_snapshot<W: SnapshotWriter + Send>(
|
pub fn take_snapshot<W: SnapshotWriter + Send>(
|
||||||
engine: &EthEngine,
|
engine: &dyn EthEngine,
|
||||||
chain: &BlockChain,
|
chain: &BlockChain,
|
||||||
block_at: H256,
|
block_at: H256,
|
||||||
state_db: &HashDB<KeccakHasher, DBValue>,
|
state_db: &dyn HashDB<KeccakHasher, DBValue>,
|
||||||
writer: W,
|
writer: W,
|
||||||
p: &Progress,
|
p: &Progress,
|
||||||
processing_threads: usize,
|
processing_threads: usize,
|
||||||
@ -228,7 +228,7 @@ pub fn take_snapshot<W: SnapshotWriter + Send>(
|
|||||||
/// Secondary chunks are engine-specific, but they intend to corroborate the state data
|
/// Secondary chunks are engine-specific, but they intend to corroborate the state data
|
||||||
/// in the state chunks.
|
/// in the state chunks.
|
||||||
/// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis.
|
/// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis.
|
||||||
pub fn chunk_secondary<'a>(mut chunker: Box<SnapshotComponents>, chain: &'a BlockChain, start_hash: H256, writer: &Mutex<SnapshotWriter + 'a>, progress: &'a Progress) -> Result<Vec<H256>, Error> {
|
pub fn chunk_secondary<'a>(mut chunker: Box<dyn SnapshotComponents>, chain: &'a BlockChain, start_hash: H256, writer: &Mutex<dyn SnapshotWriter + 'a>, progress: &'a Progress) -> Result<Vec<H256>, Error> {
|
||||||
let mut chunk_hashes = Vec::new();
|
let mut chunk_hashes = Vec::new();
|
||||||
let mut snappy_buffer = vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)];
|
let mut snappy_buffer = vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)];
|
||||||
|
|
||||||
@ -266,7 +266,7 @@ struct StateChunker<'a> {
|
|||||||
rlps: Vec<Bytes>,
|
rlps: Vec<Bytes>,
|
||||||
cur_size: usize,
|
cur_size: usize,
|
||||||
snappy_buffer: Vec<u8>,
|
snappy_buffer: Vec<u8>,
|
||||||
writer: &'a Mutex<SnapshotWriter + 'a>,
|
writer: &'a Mutex<dyn SnapshotWriter + 'a>,
|
||||||
progress: &'a Progress,
|
progress: &'a Progress,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -321,7 +321,7 @@ impl<'a> StateChunker<'a> {
|
|||||||
///
|
///
|
||||||
/// Returns a list of hashes of chunks created, or any error it may
|
/// Returns a list of hashes of chunks created, or any error it may
|
||||||
/// have encountered.
|
/// have encountered.
|
||||||
pub fn chunk_state<'a>(db: &HashDB<KeccakHasher, DBValue>, root: &H256, writer: &Mutex<SnapshotWriter + 'a>, progress: &'a Progress, part: Option<usize>) -> Result<Vec<H256>, Error> {
|
pub fn chunk_state<'a>(db: &dyn HashDB<KeccakHasher, DBValue>, root: &H256, writer: &Mutex<dyn SnapshotWriter + 'a>, progress: &'a Progress, part: Option<usize>) -> Result<Vec<H256>, Error> {
|
||||||
let account_trie = TrieDB::new(&db, &root)?;
|
let account_trie = TrieDB::new(&db, &root)?;
|
||||||
|
|
||||||
let mut chunker = StateChunker {
|
let mut chunker = StateChunker {
|
||||||
@ -383,7 +383,7 @@ pub fn chunk_state<'a>(db: &HashDB<KeccakHasher, DBValue>, root: &H256, writer:
|
|||||||
|
|
||||||
/// Used to rebuild the state trie piece by piece.
|
/// Used to rebuild the state trie piece by piece.
|
||||||
pub struct StateRebuilder {
|
pub struct StateRebuilder {
|
||||||
db: Box<JournalDB>,
|
db: Box<dyn JournalDB>,
|
||||||
state_root: H256,
|
state_root: H256,
|
||||||
known_code: HashMap<H256, H256>, // code hashes mapped to first account with this code.
|
known_code: HashMap<H256, H256>, // code hashes mapped to first account with this code.
|
||||||
missing_code: HashMap<H256, Vec<H256>>, // maps code hashes to lists of accounts missing that code.
|
missing_code: HashMap<H256, Vec<H256>>, // maps code hashes to lists of accounts missing that code.
|
||||||
@ -393,7 +393,7 @@ pub struct StateRebuilder {
|
|||||||
|
|
||||||
impl StateRebuilder {
|
impl StateRebuilder {
|
||||||
/// Create a new state rebuilder to write into the given backing DB.
|
/// Create a new state rebuilder to write into the given backing DB.
|
||||||
pub fn new(db: Arc<KeyValueDB>, pruning: Algorithm) -> Self {
|
pub fn new(db: Arc<dyn KeyValueDB>, pruning: Algorithm) -> Self {
|
||||||
StateRebuilder {
|
StateRebuilder {
|
||||||
db: journaldb::new(db.clone(), pruning, ::db::COL_STATE),
|
db: journaldb::new(db.clone(), pruning, ::db::COL_STATE),
|
||||||
state_root: KECCAK_NULL_RLP,
|
state_root: KECCAK_NULL_RLP,
|
||||||
@ -468,7 +468,7 @@ impl StateRebuilder {
|
|||||||
/// Finalize the restoration. Check for accounts missing code and make a dummy
|
/// Finalize the restoration. Check for accounts missing code and make a dummy
|
||||||
/// journal entry.
|
/// journal entry.
|
||||||
/// Once all chunks have been fed, there should be nothing missing.
|
/// Once all chunks have been fed, there should be nothing missing.
|
||||||
pub fn finalize(mut self, era: u64, id: H256) -> Result<Box<JournalDB>, ::error::Error> {
|
pub fn finalize(mut self, era: u64, id: H256) -> Result<Box<dyn JournalDB>, ::error::Error> {
|
||||||
let missing = self.missing_code.keys().cloned().collect::<Vec<_>>();
|
let missing = self.missing_code.keys().cloned().collect::<Vec<_>>();
|
||||||
if !missing.is_empty() { return Err(Error::MissingCode(missing).into()) }
|
if !missing.is_empty() { return Err(Error::MissingCode(missing).into()) }
|
||||||
|
|
||||||
@ -493,7 +493,7 @@ struct RebuiltStatus {
|
|||||||
// rebuild a set of accounts and their storage.
|
// rebuild a set of accounts and their storage.
|
||||||
// returns a status detailing newly-loaded code and accounts missing code.
|
// returns a status detailing newly-loaded code and accounts missing code.
|
||||||
fn rebuild_accounts(
|
fn rebuild_accounts(
|
||||||
db: &mut HashDB<KeccakHasher, DBValue>,
|
db: &mut dyn HashDB<KeccakHasher, DBValue>,
|
||||||
account_fat_rlps: Rlp,
|
account_fat_rlps: Rlp,
|
||||||
out_chunk: &mut [(H256, Bytes)],
|
out_chunk: &mut [(H256, Bytes)],
|
||||||
known_code: &HashMap<H256, H256>,
|
known_code: &HashMap<H256, H256>,
|
||||||
@ -560,7 +560,7 @@ const POW_VERIFY_RATE: f32 = 0.02;
|
|||||||
/// Verify an old block with the given header, engine, blockchain, body. If `always` is set, it will perform
|
/// Verify an old block with the given header, engine, blockchain, body. If `always` is set, it will perform
|
||||||
/// the fullest verification possible. If not, it will take a random sample to determine whether it will
|
/// the fullest verification possible. If not, it will take a random sample to determine whether it will
|
||||||
/// do heavy or light verification.
|
/// do heavy or light verification.
|
||||||
pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &EthEngine, chain: &BlockChain, always: bool) -> Result<(), ::error::Error> {
|
pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &dyn EthEngine, chain: &BlockChain, always: bool) -> Result<(), ::error::Error> {
|
||||||
engine.verify_block_basic(header)?;
|
engine.verify_block_basic(header)?;
|
||||||
|
|
||||||
if always || rng.gen::<f32>() <= POW_VERIFY_RATE {
|
if always || rng.gen::<f32>() <= POW_VERIFY_RATE {
|
||||||
|
@ -76,22 +76,22 @@ struct Restoration {
|
|||||||
state_chunks_left: HashSet<H256>,
|
state_chunks_left: HashSet<H256>,
|
||||||
block_chunks_left: HashSet<H256>,
|
block_chunks_left: HashSet<H256>,
|
||||||
state: StateRebuilder,
|
state: StateRebuilder,
|
||||||
secondary: Box<Rebuilder>,
|
secondary: Box<dyn Rebuilder>,
|
||||||
writer: Option<LooseWriter>,
|
writer: Option<LooseWriter>,
|
||||||
snappy_buffer: Bytes,
|
snappy_buffer: Bytes,
|
||||||
final_state_root: H256,
|
final_state_root: H256,
|
||||||
guard: Guard,
|
guard: Guard,
|
||||||
db: Arc<BlockChainDB>,
|
db: Arc<dyn BlockChainDB>,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct RestorationParams<'a> {
|
struct RestorationParams<'a> {
|
||||||
manifest: ManifestData, // manifest to base restoration on.
|
manifest: ManifestData, // manifest to base restoration on.
|
||||||
pruning: Algorithm, // pruning algorithm for the database.
|
pruning: Algorithm, // pruning algorithm for the database.
|
||||||
db: Arc<BlockChainDB>, // database
|
db: Arc<dyn BlockChainDB>, // database
|
||||||
writer: Option<LooseWriter>, // writer for recovered snapshot.
|
writer: Option<LooseWriter>, // writer for recovered snapshot.
|
||||||
genesis: &'a [u8], // genesis block of the chain.
|
genesis: &'a [u8], // genesis block of the chain.
|
||||||
guard: Guard, // guard for the restoration directory.
|
guard: Guard, // guard for the restoration directory.
|
||||||
engine: &'a EthEngine,
|
engine: &'a dyn EthEngine,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Restoration {
|
impl Restoration {
|
||||||
@ -149,7 +149,7 @@ impl Restoration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// feeds a block chunk
|
// feeds a block chunk
|
||||||
fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &EthEngine, flag: &AtomicBool) -> Result<(), Error> {
|
fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &dyn EthEngine, flag: &AtomicBool) -> Result<(), Error> {
|
||||||
if self.block_chunks_left.contains(&hash) {
|
if self.block_chunks_left.contains(&hash) {
|
||||||
let expected_len = snappy::decompressed_len(chunk)?;
|
let expected_len = snappy::decompressed_len(chunk)?;
|
||||||
if expected_len > MAX_CHUNK_SIZE {
|
if expected_len > MAX_CHUNK_SIZE {
|
||||||
@ -170,7 +170,7 @@ impl Restoration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// finish up restoration.
|
// finish up restoration.
|
||||||
fn finalize(mut self, engine: &EthEngine) -> Result<(), Error> {
|
fn finalize(mut self, engine: &dyn EthEngine) -> Result<(), Error> {
|
||||||
use trie::TrieError;
|
use trie::TrieError;
|
||||||
|
|
||||||
if !self.is_done() { return Ok(()) }
|
if !self.is_done() { return Ok(()) }
|
||||||
@ -211,37 +211,37 @@ pub trait SnapshotClient: BlockChainClient + BlockInfo + DatabaseRestore {}
|
|||||||
/// Snapshot service parameters.
|
/// Snapshot service parameters.
|
||||||
pub struct ServiceParams {
|
pub struct ServiceParams {
|
||||||
/// The consensus engine this is built on.
|
/// The consensus engine this is built on.
|
||||||
pub engine: Arc<EthEngine>,
|
pub engine: Arc<dyn EthEngine>,
|
||||||
/// The chain's genesis block.
|
/// The chain's genesis block.
|
||||||
pub genesis_block: Bytes,
|
pub genesis_block: Bytes,
|
||||||
/// State pruning algorithm.
|
/// State pruning algorithm.
|
||||||
pub pruning: Algorithm,
|
pub pruning: Algorithm,
|
||||||
/// Handler for opening a restoration DB.
|
/// Handler for opening a restoration DB.
|
||||||
pub restoration_db_handler: Box<BlockChainDBHandler>,
|
pub restoration_db_handler: Box<dyn BlockChainDBHandler>,
|
||||||
/// Async IO channel for sending messages.
|
/// Async IO channel for sending messages.
|
||||||
pub channel: Channel,
|
pub channel: Channel,
|
||||||
/// The directory to put snapshots in.
|
/// The directory to put snapshots in.
|
||||||
/// Usually "<chain hash>/snapshot"
|
/// Usually "<chain hash>/snapshot"
|
||||||
pub snapshot_root: PathBuf,
|
pub snapshot_root: PathBuf,
|
||||||
/// A handle for database restoration.
|
/// A handle for database restoration.
|
||||||
pub client: Arc<SnapshotClient>,
|
pub client: Arc<dyn SnapshotClient>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `SnapshotService` implementation.
|
/// `SnapshotService` implementation.
|
||||||
/// This controls taking snapshots and restoring from them.
|
/// This controls taking snapshots and restoring from them.
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
restoration: Mutex<Option<Restoration>>,
|
restoration: Mutex<Option<Restoration>>,
|
||||||
restoration_db_handler: Box<BlockChainDBHandler>,
|
restoration_db_handler: Box<dyn BlockChainDBHandler>,
|
||||||
snapshot_root: PathBuf,
|
snapshot_root: PathBuf,
|
||||||
io_channel: Mutex<Channel>,
|
io_channel: Mutex<Channel>,
|
||||||
pruning: Algorithm,
|
pruning: Algorithm,
|
||||||
status: Mutex<RestorationStatus>,
|
status: Mutex<RestorationStatus>,
|
||||||
reader: RwLock<Option<LooseReader>>,
|
reader: RwLock<Option<LooseReader>>,
|
||||||
engine: Arc<EthEngine>,
|
engine: Arc<dyn EthEngine>,
|
||||||
genesis_block: Bytes,
|
genesis_block: Bytes,
|
||||||
state_chunks: AtomicUsize,
|
state_chunks: AtomicUsize,
|
||||||
block_chunks: AtomicUsize,
|
block_chunks: AtomicUsize,
|
||||||
client: Arc<SnapshotClient>,
|
client: Arc<dyn SnapshotClient>,
|
||||||
progress: super::Progress,
|
progress: super::Progress,
|
||||||
taking_snapshot: AtomicBool,
|
taking_snapshot: AtomicBool,
|
||||||
restoring_snapshot: AtomicBool,
|
restoring_snapshot: AtomicBool,
|
||||||
|
@ -62,7 +62,7 @@ impl StateProducer {
|
|||||||
|
|
||||||
/// Tick the state producer. This alters the state, writing new data into
|
/// Tick the state producer. This alters the state, writing new data into
|
||||||
/// the database.
|
/// the database.
|
||||||
pub fn tick<R: Rng>(&mut self, rng: &mut R, db: &mut HashDB<KeccakHasher, DBValue>) {
|
pub fn tick<R: Rng>(&mut self, rng: &mut R, db: &mut dyn HashDB<KeccakHasher, DBValue>) {
|
||||||
// modify existing accounts.
|
// modify existing accounts.
|
||||||
let mut accounts_to_modify: Vec<_> = {
|
let mut accounts_to_modify: Vec<_> = {
|
||||||
let trie = TrieDB::new(&db, &self.state_root).unwrap();
|
let trie = TrieDB::new(&db, &self.state_root).unwrap();
|
||||||
@ -132,7 +132,7 @@ pub fn fill_storage(mut db: AccountDBMut, root: &mut H256, seed: &mut H256) {
|
|||||||
|
|
||||||
/// Take a snapshot from the given client into a temporary file.
|
/// Take a snapshot from the given client into a temporary file.
|
||||||
/// Return a snapshot reader for it.
|
/// Return a snapshot reader for it.
|
||||||
pub fn snap(client: &Client) -> (Box<SnapshotReader>, TempDir) {
|
pub fn snap(client: &Client) -> (Box<dyn SnapshotReader>, TempDir) {
|
||||||
use types::ids::BlockId;
|
use types::ids::BlockId;
|
||||||
|
|
||||||
let tempdir = TempDir::new("").unwrap();
|
let tempdir = TempDir::new("").unwrap();
|
||||||
@ -151,9 +151,9 @@ pub fn snap(client: &Client) -> (Box<SnapshotReader>, TempDir) {
|
|||||||
/// Restore a snapshot into a given database. This will read chunks from the given reader
|
/// Restore a snapshot into a given database. This will read chunks from the given reader
|
||||||
/// write into the given database.
|
/// write into the given database.
|
||||||
pub fn restore(
|
pub fn restore(
|
||||||
db: Arc<BlockChainDB>,
|
db: Arc<dyn BlockChainDB>,
|
||||||
engine: &EthEngine,
|
engine: &dyn EthEngine,
|
||||||
reader: &SnapshotReader,
|
reader: &dyn SnapshotReader,
|
||||||
genesis: &[u8],
|
genesis: &[u8],
|
||||||
) -> Result<(), ::error::Error> {
|
) -> Result<(), ::error::Error> {
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
|
@ -72,8 +72,8 @@ impl Broadcast for Mutex<IoChannel<ClientIoMessage>> {
|
|||||||
/// A `ChainNotify` implementation which will trigger a snapshot event
|
/// A `ChainNotify` implementation which will trigger a snapshot event
|
||||||
/// at certain block numbers.
|
/// at certain block numbers.
|
||||||
pub struct Watcher {
|
pub struct Watcher {
|
||||||
oracle: Box<Oracle>,
|
oracle: Box<dyn Oracle>,
|
||||||
broadcast: Box<Broadcast>,
|
broadcast: Box<dyn Broadcast>,
|
||||||
period: u64,
|
period: u64,
|
||||||
history: u64,
|
history: u64,
|
||||||
}
|
}
|
||||||
|
@ -382,7 +382,7 @@ pub struct Spec {
|
|||||||
/// User friendly spec name
|
/// User friendly spec name
|
||||||
pub name: String,
|
pub name: String,
|
||||||
/// What engine are we using for this?
|
/// What engine are we using for this?
|
||||||
pub engine: Arc<EthEngine>,
|
pub engine: Arc<dyn EthEngine>,
|
||||||
/// Name of the subdir inside the main data dir to use for chain data and settings.
|
/// Name of the subdir inside the main data dir to use for chain data and settings.
|
||||||
pub data_dir: String,
|
pub data_dir: String,
|
||||||
|
|
||||||
@ -601,7 +601,7 @@ impl Spec {
|
|||||||
engine_spec: ethjson::spec::Engine,
|
engine_spec: ethjson::spec::Engine,
|
||||||
params: CommonParams,
|
params: CommonParams,
|
||||||
builtins: BTreeMap<Address, Builtin>,
|
builtins: BTreeMap<Address, Builtin>,
|
||||||
) -> Arc<EthEngine> {
|
) -> Arc<dyn EthEngine> {
|
||||||
let machine = Self::machine(&engine_spec, params, builtins);
|
let machine = Self::machine(&engine_spec, params, builtins);
|
||||||
|
|
||||||
match engine_spec {
|
match engine_spec {
|
||||||
|
@ -217,7 +217,7 @@ impl Account {
|
|||||||
|
|
||||||
/// Get (and cache) the contents of the trie's storage at `key`.
|
/// Get (and cache) the contents of the trie's storage at `key`.
|
||||||
/// Takes modified storage into account.
|
/// Takes modified storage into account.
|
||||||
pub fn storage_at(&self, db: &HashDB<KeccakHasher, DBValue>, key: &H256) -> TrieResult<H256> {
|
pub fn storage_at(&self, db: &dyn HashDB<KeccakHasher, DBValue>, key: &H256) -> TrieResult<H256> {
|
||||||
if let Some(value) = self.cached_storage_at(key) {
|
if let Some(value) = self.cached_storage_at(key) {
|
||||||
return Ok(value);
|
return Ok(value);
|
||||||
}
|
}
|
||||||
@ -230,7 +230,7 @@ impl Account {
|
|||||||
|
|
||||||
/// Get (and cache) the contents of the trie's storage at `key`.
|
/// Get (and cache) the contents of the trie's storage at `key`.
|
||||||
/// Does not take modified storage into account.
|
/// Does not take modified storage into account.
|
||||||
pub fn original_storage_at(&self, db: &HashDB<KeccakHasher, DBValue>, key: &H256) -> TrieResult<H256> {
|
pub fn original_storage_at(&self, db: &dyn HashDB<KeccakHasher, DBValue>, key: &H256) -> TrieResult<H256> {
|
||||||
if let Some(value) = self.cached_original_storage_at(key) {
|
if let Some(value) = self.cached_original_storage_at(key) {
|
||||||
return Ok(value);
|
return Ok(value);
|
||||||
}
|
}
|
||||||
@ -252,7 +252,7 @@ impl Account {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_and_cache_storage(storage_root: &H256, storage_cache: &mut LruCache<H256, H256>, db: &HashDB<KeccakHasher, DBValue>, key: &H256) -> TrieResult<H256> {
|
fn get_and_cache_storage(storage_root: &H256, storage_cache: &mut LruCache<H256, H256>, db: &dyn HashDB<KeccakHasher, DBValue>, key: &H256) -> TrieResult<H256> {
|
||||||
let db = SecTrieDB::new(&db, storage_root)?;
|
let db = SecTrieDB::new(&db, storage_root)?;
|
||||||
let panicky_decoder = |bytes:&[u8]| ::rlp::decode(&bytes).expect("decoding db value failed");
|
let panicky_decoder = |bytes:&[u8]| ::rlp::decode(&bytes).expect("decoding db value failed");
|
||||||
let item: U256 = db.get_with(key.as_bytes(), panicky_decoder)?.unwrap_or_else(U256::zero);
|
let item: U256 = db.get_with(key.as_bytes(), panicky_decoder)?.unwrap_or_else(U256::zero);
|
||||||
@ -358,7 +358,7 @@ impl Account {
|
|||||||
|
|
||||||
/// Provide a database to get `code_hash`. Should not be called if it is a contract without code. Returns the cached code, if successful.
|
/// Provide a database to get `code_hash`. Should not be called if it is a contract without code. Returns the cached code, if successful.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn cache_code(&mut self, db: &HashDB<KeccakHasher, DBValue>) -> Option<Arc<Bytes>> {
|
pub fn cache_code(&mut self, db: &dyn HashDB<KeccakHasher, DBValue>) -> Option<Arc<Bytes>> {
|
||||||
// TODO: fill out self.code_cache;
|
// TODO: fill out self.code_cache;
|
||||||
trace!("Account::cache_code: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty());
|
trace!("Account::cache_code: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty());
|
||||||
|
|
||||||
@ -388,7 +388,7 @@ impl Account {
|
|||||||
/// Provide a database to get `code_size`. Should not be called if it is a contract without code. Returns whether
|
/// Provide a database to get `code_size`. Should not be called if it is a contract without code. Returns whether
|
||||||
/// the cache succeeds.
|
/// the cache succeeds.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn cache_code_size(&mut self, db: &HashDB<KeccakHasher, DBValue>) -> bool {
|
pub fn cache_code_size(&mut self, db: &dyn HashDB<KeccakHasher, DBValue>) -> bool {
|
||||||
// TODO: fill out self.code_cache;
|
// TODO: fill out self.code_cache;
|
||||||
trace!("Account::cache_code_size: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty());
|
trace!("Account::cache_code_size: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty());
|
||||||
self.code_size.is_some() ||
|
self.code_size.is_some() ||
|
||||||
@ -482,7 +482,7 @@ impl Account {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Commit the `storage_changes` to the backing DB and update `storage_root`.
|
/// Commit the `storage_changes` to the backing DB and update `storage_root`.
|
||||||
pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB<KeccakHasher, DBValue>) -> TrieResult<()> {
|
pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut dyn HashDB<KeccakHasher, DBValue>) -> TrieResult<()> {
|
||||||
let mut t = trie_factory.from_existing(db, &mut self.storage_root)?;
|
let mut t = trie_factory.from_existing(db, &mut self.storage_root)?;
|
||||||
for (k, v) in self.storage_changes.drain() {
|
for (k, v) in self.storage_changes.drain() {
|
||||||
// cast key and value to trait type,
|
// cast key and value to trait type,
|
||||||
@ -499,7 +499,7 @@ impl Account {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Commit any unsaved code. `code_hash` will always return the hash of the `code_cache` after this.
|
/// Commit any unsaved code. `code_hash` will always return the hash of the `code_cache` after this.
|
||||||
pub fn commit_code(&mut self, db: &mut HashDB<KeccakHasher, DBValue>) {
|
pub fn commit_code(&mut self, db: &mut dyn HashDB<KeccakHasher, DBValue>) {
|
||||||
trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_filth == Filth::Dirty, self.code_cache.is_empty());
|
trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_filth == Filth::Dirty, self.code_cache.is_empty());
|
||||||
match (self.code_filth == Filth::Dirty, self.code_cache.is_empty()) {
|
match (self.code_filth == Filth::Dirty, self.code_cache.is_empty()) {
|
||||||
(true, true) => {
|
(true, true) => {
|
||||||
@ -588,7 +588,7 @@ impl Account {
|
|||||||
/// trie.
|
/// trie.
|
||||||
/// `storage_key` is the hash of the desired storage key, meaning
|
/// `storage_key` is the hash of the desired storage key, meaning
|
||||||
/// this will only work correctly under a secure trie.
|
/// this will only work correctly under a secure trie.
|
||||||
pub fn prove_storage(&self, db: &HashDB<KeccakHasher, DBValue>, storage_key: H256) -> TrieResult<(Vec<Bytes>, H256)> {
|
pub fn prove_storage(&self, db: &dyn HashDB<KeccakHasher, DBValue>, storage_key: H256) -> TrieResult<(Vec<Bytes>, H256)> {
|
||||||
let mut recorder = Recorder::new();
|
let mut recorder = Recorder::new();
|
||||||
|
|
||||||
let trie = TrieDB::new(&db, &self.storage_root)?;
|
let trie = TrieDB::new(&db, &self.storage_root)?;
|
||||||
|
@ -36,10 +36,10 @@ use journaldb::AsKeyedHashDB;
|
|||||||
/// State backend. See module docs for more details.
|
/// State backend. See module docs for more details.
|
||||||
pub trait Backend: Send {
|
pub trait Backend: Send {
|
||||||
/// Treat the backend as a read-only hashdb.
|
/// Treat the backend as a read-only hashdb.
|
||||||
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue>;
|
fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue>;
|
||||||
|
|
||||||
/// Treat the backend as a writeable hashdb.
|
/// Treat the backend as a writeable hashdb.
|
||||||
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue>;
|
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue>;
|
||||||
|
|
||||||
/// Add an account entry to the cache.
|
/// Add an account entry to the cache.
|
||||||
fn add_to_account_cache(&mut self, addr: Address, data: Option<Account>, modified: bool);
|
fn add_to_account_cache(&mut self, addr: Address, data: Option<Account>, modified: bool);
|
||||||
@ -114,13 +114,13 @@ impl HashDB<KeccakHasher, DBValue> for ProofCheck {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl AsHashDB<KeccakHasher, DBValue> for ProofCheck {
|
impl AsHashDB<KeccakHasher, DBValue> for ProofCheck {
|
||||||
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Backend for ProofCheck {
|
impl Backend for ProofCheck {
|
||||||
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
fn add_to_account_cache(&mut self, _addr: Address, _data: Option<Account>, _modified: bool) {}
|
fn add_to_account_cache(&mut self, _addr: Address, _data: Option<Account>, _modified: bool) {}
|
||||||
fn cache_code(&self, _hash: H256, _code: Arc<Vec<u8>>) {}
|
fn cache_code(&self, _hash: H256, _code: Arc<Vec<u8>>) {}
|
||||||
fn get_cached_account(&self, _addr: &Address) -> Option<Option<Account>> { None }
|
fn get_cached_account(&self, _addr: &Address) -> Option<Option<Account>> { None }
|
||||||
@ -146,12 +146,12 @@ pub struct Proving<H> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<AH: AsKeyedHashDB + Send + Sync> AsKeyedHashDB for Proving<AH> {
|
impl<AH: AsKeyedHashDB + Send + Sync> AsKeyedHashDB for Proving<AH> {
|
||||||
fn as_keyed_hash_db(&self) -> &journaldb::KeyedHashDB { self }
|
fn as_keyed_hash_db(&self) -> &dyn journaldb::KeyedHashDB { self }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<AH: AsHashDB<KeccakHasher, DBValue> + Send + Sync> AsHashDB<KeccakHasher, DBValue> for Proving<AH> {
|
impl<AH: AsHashDB<KeccakHasher, DBValue> + Send + Sync> AsHashDB<KeccakHasher, DBValue> for Proving<AH> {
|
||||||
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<H: AsKeyedHashDB + Send + Sync> journaldb::KeyedHashDB for Proving<H> {
|
impl<H: AsKeyedHashDB + Send + Sync> journaldb::KeyedHashDB for Proving<H> {
|
||||||
@ -194,9 +194,9 @@ impl<H: AsHashDB<KeccakHasher, DBValue> + Send + Sync> HashDB<KeccakHasher, DBVa
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<H: AsHashDB<KeccakHasher, DBValue> + Send + Sync> Backend for Proving<H> {
|
impl<H: AsHashDB<KeccakHasher, DBValue> + Send + Sync> Backend for Proving<H> {
|
||||||
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
|
|
||||||
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> { self }
|
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> { self }
|
||||||
|
|
||||||
fn add_to_account_cache(&mut self, _: Address, _: Option<Account>, _: bool) { }
|
fn add_to_account_cache(&mut self, _: Address, _: Option<Account>, _: bool) { }
|
||||||
|
|
||||||
@ -248,11 +248,11 @@ impl<H: AsHashDB<KeccakHasher, DBValue> + Clone> Clone for Proving<H> {
|
|||||||
pub struct Basic<H>(pub H);
|
pub struct Basic<H>(pub H);
|
||||||
|
|
||||||
impl<H: AsHashDB<KeccakHasher, DBValue> + Send + Sync> Backend for Basic<H> {
|
impl<H: AsHashDB<KeccakHasher, DBValue> + Send + Sync> Backend for Basic<H> {
|
||||||
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> {
|
fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> {
|
||||||
self.0.as_hash_db()
|
self.0.as_hash_db()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> {
|
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> {
|
||||||
self.0.as_hash_db_mut()
|
self.0.as_hash_db_mut()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -638,7 +638,7 @@ impl<B: Backend> State<B> {
|
|||||||
&self, address: &Address, key: &H256, f_cached_at: FCachedStorageAt, f_at: FStorageAt,
|
&self, address: &Address, key: &H256, f_cached_at: FCachedStorageAt, f_at: FStorageAt,
|
||||||
) -> TrieResult<H256> where
|
) -> TrieResult<H256> where
|
||||||
FCachedStorageAt: Fn(&Account, &H256) -> Option<H256>,
|
FCachedStorageAt: Fn(&Account, &H256) -> Option<H256>,
|
||||||
FStorageAt: Fn(&Account, &HashDB<KeccakHasher, DBValue>, &H256) -> TrieResult<H256>
|
FStorageAt: Fn(&Account, &dyn HashDB<KeccakHasher, DBValue>, &H256) -> TrieResult<H256>
|
||||||
{
|
{
|
||||||
// Storage key search and update works like this:
|
// Storage key search and update works like this:
|
||||||
// 1. If there's an entry for the account in the local cache check for the key and return it if found.
|
// 1. If there's an entry for the account in the local cache check for the key and return it if found.
|
||||||
@ -1101,7 +1101,7 @@ impl<B: Backend> State<B> {
|
|||||||
|
|
||||||
/// Load required account data from the databases. Returns whether the cache succeeds.
|
/// Load required account data from the databases. Returns whether the cache succeeds.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
fn update_account_cache(require: RequireCache, account: &mut Account, state_db: &B, db: &HashDB<KeccakHasher, DBValue>) -> bool {
|
fn update_account_cache(require: RequireCache, account: &mut Account, state_db: &B, db: &dyn HashDB<KeccakHasher, DBValue>) -> bool {
|
||||||
if let RequireCache::None = require {
|
if let RequireCache::None = require {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -107,7 +107,7 @@ struct BlockChanges {
|
|||||||
/// `StateDB` is propagated into the global cache.
|
/// `StateDB` is propagated into the global cache.
|
||||||
pub struct StateDB {
|
pub struct StateDB {
|
||||||
/// Backing database.
|
/// Backing database.
|
||||||
db: Box<JournalDB>,
|
db: Box<dyn JournalDB>,
|
||||||
/// Shared canonical state cache.
|
/// Shared canonical state cache.
|
||||||
account_cache: Arc<Mutex<AccountCache>>,
|
account_cache: Arc<Mutex<AccountCache>>,
|
||||||
/// DB Code cache. Maps code hashes to shared bytes.
|
/// DB Code cache. Maps code hashes to shared bytes.
|
||||||
@ -132,7 +132,7 @@ impl StateDB {
|
|||||||
/// of the LRU cache in bytes. Actual used memory may (read: will) be higher due to bookkeeping.
|
/// of the LRU cache in bytes. Actual used memory may (read: will) be higher due to bookkeeping.
|
||||||
// TODO: make the cache size actually accurate by moving the account storage cache
|
// TODO: make the cache size actually accurate by moving the account storage cache
|
||||||
// into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`.
|
// into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`.
|
||||||
pub fn new(db: Box<JournalDB>, cache_size: usize) -> StateDB {
|
pub fn new(db: Box<dyn JournalDB>, cache_size: usize) -> StateDB {
|
||||||
let bloom = Self::load_bloom(&**db.backing());
|
let bloom = Self::load_bloom(&**db.backing());
|
||||||
let acc_cache_size = cache_size * ACCOUNT_CACHE_RATIO / 100;
|
let acc_cache_size = cache_size * ACCOUNT_CACHE_RATIO / 100;
|
||||||
let code_cache_size = cache_size - acc_cache_size;
|
let code_cache_size = cache_size - acc_cache_size;
|
||||||
@ -156,7 +156,7 @@ impl StateDB {
|
|||||||
|
|
||||||
/// Loads accounts bloom from the database
|
/// Loads accounts bloom from the database
|
||||||
/// This bloom is used to handle request for the non-existant account fast
|
/// This bloom is used to handle request for the non-existant account fast
|
||||||
pub fn load_bloom(db: &KeyValueDB) -> Bloom {
|
pub fn load_bloom(db: &dyn KeyValueDB) -> Bloom {
|
||||||
let hash_count_entry = db.get(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY)
|
let hash_count_entry = db.get(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY)
|
||||||
.expect("Low-level database error");
|
.expect("Low-level database error");
|
||||||
|
|
||||||
@ -313,12 +313,12 @@ impl StateDB {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Conversion method to interpret self as `HashDB` reference
|
/// Conversion method to interpret self as `HashDB` reference
|
||||||
pub fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> {
|
pub fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> {
|
||||||
self.db.as_hash_db()
|
self.db.as_hash_db()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Conversion method to interpret self as mutable `HashDB` reference
|
/// Conversion method to interpret self as mutable `HashDB` reference
|
||||||
pub fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> {
|
pub fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> {
|
||||||
self.db.as_hash_db_mut()
|
self.db.as_hash_db_mut()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -368,7 +368,7 @@ impl StateDB {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns underlying `JournalDB`.
|
/// Returns underlying `JournalDB`.
|
||||||
pub fn journal_db(&self) -> &JournalDB {
|
pub fn journal_db(&self) -> &dyn JournalDB {
|
||||||
&*self.db
|
&*self.db
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -407,9 +407,9 @@ impl StateDB {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl state::Backend for StateDB {
|
impl state::Backend for StateDB {
|
||||||
fn as_hash_db(&self) -> &HashDB<KeccakHasher, DBValue> { self.db.as_hash_db() }
|
fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> { self.db.as_hash_db() }
|
||||||
|
|
||||||
fn as_hash_db_mut(&mut self) -> &mut HashDB<KeccakHasher, DBValue> {
|
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> {
|
||||||
self.db.as_hash_db_mut()
|
self.db.as_hash_db_mut()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -267,11 +267,11 @@ struct TestBlockChainDB {
|
|||||||
_trace_blooms_dir: TempDir,
|
_trace_blooms_dir: TempDir,
|
||||||
blooms: blooms_db::Database,
|
blooms: blooms_db::Database,
|
||||||
trace_blooms: blooms_db::Database,
|
trace_blooms: blooms_db::Database,
|
||||||
key_value: Arc<KeyValueDB>,
|
key_value: Arc<dyn KeyValueDB>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockChainDB for TestBlockChainDB {
|
impl BlockChainDB for TestBlockChainDB {
|
||||||
fn key_value(&self) -> &Arc<KeyValueDB> {
|
fn key_value(&self) -> &Arc<dyn KeyValueDB> {
|
||||||
&self.key_value
|
&self.key_value
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -285,7 +285,7 @@ impl BlockChainDB for TestBlockChainDB {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Creates new test instance of `BlockChainDB`
|
/// Creates new test instance of `BlockChainDB`
|
||||||
pub fn new_db() -> Arc<BlockChainDB> {
|
pub fn new_db() -> Arc<dyn BlockChainDB> {
|
||||||
let blooms_dir = TempDir::new("").unwrap();
|
let blooms_dir = TempDir::new("").unwrap();
|
||||||
let trace_blooms_dir = TempDir::new("").unwrap();
|
let trace_blooms_dir = TempDir::new("").unwrap();
|
||||||
|
|
||||||
@ -301,7 +301,7 @@ pub fn new_db() -> Arc<BlockChainDB> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new temporary `BlockChainDB` on FS
|
/// Creates a new temporary `BlockChainDB` on FS
|
||||||
pub fn new_temp_db(tempdir: &Path) -> Arc<BlockChainDB> {
|
pub fn new_temp_db(tempdir: &Path) -> Arc<dyn BlockChainDB> {
|
||||||
let blooms_dir = TempDir::new("").unwrap();
|
let blooms_dir = TempDir::new("").unwrap();
|
||||||
let trace_blooms_dir = TempDir::new("").unwrap();
|
let trace_blooms_dir = TempDir::new("").unwrap();
|
||||||
let key_value_dir = tempdir.join("key_value");
|
let key_value_dir = tempdir.join("key_value");
|
||||||
@ -321,7 +321,7 @@ pub fn new_temp_db(tempdir: &Path) -> Arc<BlockChainDB> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Creates new instance of KeyValueDBHandler
|
/// Creates new instance of KeyValueDBHandler
|
||||||
pub fn restoration_db_handler(config: kvdb_rocksdb::DatabaseConfig) -> Box<BlockChainDBHandler> {
|
pub fn restoration_db_handler(config: kvdb_rocksdb::DatabaseConfig) -> Box<dyn BlockChainDBHandler> {
|
||||||
struct RestorationDBHandler {
|
struct RestorationDBHandler {
|
||||||
config: kvdb_rocksdb::DatabaseConfig,
|
config: kvdb_rocksdb::DatabaseConfig,
|
||||||
}
|
}
|
||||||
@ -329,11 +329,11 @@ pub fn restoration_db_handler(config: kvdb_rocksdb::DatabaseConfig) -> Box<Block
|
|||||||
struct RestorationDB {
|
struct RestorationDB {
|
||||||
blooms: blooms_db::Database,
|
blooms: blooms_db::Database,
|
||||||
trace_blooms: blooms_db::Database,
|
trace_blooms: blooms_db::Database,
|
||||||
key_value: Arc<KeyValueDB>,
|
key_value: Arc<dyn KeyValueDB>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockChainDB for RestorationDB {
|
impl BlockChainDB for RestorationDB {
|
||||||
fn key_value(&self) -> &Arc<KeyValueDB> {
|
fn key_value(&self) -> &Arc<dyn KeyValueDB> {
|
||||||
&self.key_value
|
&self.key_value
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -347,7 +347,7 @@ pub fn restoration_db_handler(config: kvdb_rocksdb::DatabaseConfig) -> Box<Block
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BlockChainDBHandler for RestorationDBHandler {
|
impl BlockChainDBHandler for RestorationDBHandler {
|
||||||
fn open(&self, db_path: &Path) -> io::Result<Arc<BlockChainDB>> {
|
fn open(&self, db_path: &Path) -> io::Result<Arc<dyn BlockChainDB>> {
|
||||||
let key_value = Arc::new(kvdb_rocksdb::Database::open(&self.config, &db_path.to_string_lossy())?);
|
let key_value = Arc::new(kvdb_rocksdb::Database::open(&self.config, &db_path.to_string_lossy())?);
|
||||||
let blooms_path = db_path.join("blooms");
|
let blooms_path = db_path.join("blooms");
|
||||||
let trace_blooms_path = db_path.join("trace_blooms");
|
let trace_blooms_path = db_path.join("trace_blooms");
|
||||||
|
@ -64,7 +64,7 @@ pub struct TraceDB<T> where T: DatabaseExtras {
|
|||||||
/// hashes of cached traces
|
/// hashes of cached traces
|
||||||
cache_manager: RwLock<CacheManager<H256>>,
|
cache_manager: RwLock<CacheManager<H256>>,
|
||||||
/// db
|
/// db
|
||||||
db: Arc<BlockChainDB>,
|
db: Arc<dyn BlockChainDB>,
|
||||||
/// tracing enabled
|
/// tracing enabled
|
||||||
enabled: bool,
|
enabled: bool,
|
||||||
/// extras
|
/// extras
|
||||||
@ -73,7 +73,7 @@ pub struct TraceDB<T> where T: DatabaseExtras {
|
|||||||
|
|
||||||
impl<T> TraceDB<T> where T: DatabaseExtras {
|
impl<T> TraceDB<T> where T: DatabaseExtras {
|
||||||
/// Creates new instance of `TraceDB`.
|
/// Creates new instance of `TraceDB`.
|
||||||
pub fn new(config: Config, db: Arc<BlockChainDB>, extras: Arc<T>) -> Self {
|
pub fn new(config: Config, db: Arc<dyn BlockChainDB>, extras: Arc<T>) -> Self {
|
||||||
let mut batch = DBTransaction::new();
|
let mut batch = DBTransaction::new();
|
||||||
let genesis = extras.block_hash(0)
|
let genesis = extras.block_hash(0)
|
||||||
.expect("Genesis block is always inserted upon extras db creation qed");
|
.expect("Genesis block is always inserted upon extras db creation qed");
|
||||||
|
@ -32,7 +32,7 @@ impl<C: BlockInfo + CallContract> Verifier<C> for CanonVerifier {
|
|||||||
&self,
|
&self,
|
||||||
header: &Header,
|
header: &Header,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
engine: &EthEngine,
|
engine: &dyn EthEngine,
|
||||||
do_full: Option<verification::FullFamilyParams<C>>,
|
do_full: Option<verification::FullFamilyParams<C>>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
verification::verify_block_family(header, parent, engine, do_full)
|
verification::verify_block_family(header, parent, engine, do_full)
|
||||||
@ -42,7 +42,7 @@ impl<C: BlockInfo + CallContract> Verifier<C> for CanonVerifier {
|
|||||||
verification::verify_block_final(expected, got)
|
verification::verify_block_final(expected, got)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_block_external(&self, header: &Header, engine: &EthEngine) -> Result<(), Error> {
|
fn verify_block_external(&self, header: &Header, engine: &dyn EthEngine) -> Result<(), Error> {
|
||||||
engine.verify_block_external(header)
|
engine.verify_block_external(header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ pub enum VerifierType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new verifier based on type.
|
/// Create a new verifier based on type.
|
||||||
pub fn new<C: BlockInfo + CallContract>(v: VerifierType) -> Box<Verifier<C>> {
|
pub fn new<C: BlockInfo + CallContract>(v: VerifierType) -> Box<dyn Verifier<C>> {
|
||||||
match v {
|
match v {
|
||||||
VerifierType::Canon | VerifierType::CanonNoSeal => Box::new(CanonVerifier),
|
VerifierType::Canon | VerifierType::CanonNoSeal => Box::new(CanonVerifier),
|
||||||
VerifierType::Noop => Box::new(NoopVerifier),
|
VerifierType::Noop => Box::new(NoopVerifier),
|
||||||
|
@ -32,7 +32,7 @@ impl<C: BlockInfo + CallContract> Verifier<C> for NoopVerifier {
|
|||||||
&self,
|
&self,
|
||||||
_: &Header,
|
_: &Header,
|
||||||
_t: &Header,
|
_t: &Header,
|
||||||
_: &EthEngine,
|
_: &dyn EthEngine,
|
||||||
_: Option<verification::FullFamilyParams<C>>
|
_: Option<verification::FullFamilyParams<C>>
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -42,7 +42,7 @@ impl<C: BlockInfo + CallContract> Verifier<C> for NoopVerifier {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_block_external(&self, _header: &Header, _engine: &EthEngine) -> Result<(), Error> {
|
fn verify_block_external(&self, _header: &Header, _engine: &dyn EthEngine) -> Result<(), Error> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -58,10 +58,10 @@ pub trait Kind: 'static + Sized + Send + Sync {
|
|||||||
type Verified: Sized + Send + BlockLike + HeapSizeOf;
|
type Verified: Sized + Send + BlockLike + HeapSizeOf;
|
||||||
|
|
||||||
/// Attempt to create the `Unverified` item from the input.
|
/// Attempt to create the `Unverified` item from the input.
|
||||||
fn create(input: Self::Input, engine: &EthEngine, check_seal: bool) -> Result<Self::Unverified, (Self::Input, Error)>;
|
fn create(input: Self::Input, engine: &dyn EthEngine, check_seal: bool) -> Result<Self::Unverified, (Self::Input, Error)>;
|
||||||
|
|
||||||
/// Attempt to verify the `Unverified` item using the given engine.
|
/// Attempt to verify the `Unverified` item using the given engine.
|
||||||
fn verify(unverified: Self::Unverified, engine: &EthEngine, check_seal: bool) -> Result<Self::Verified, Error>;
|
fn verify(unverified: Self::Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result<Self::Verified, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The blocks verification module.
|
/// The blocks verification module.
|
||||||
@ -86,7 +86,7 @@ pub mod blocks {
|
|||||||
type Unverified = Unverified;
|
type Unverified = Unverified;
|
||||||
type Verified = PreverifiedBlock;
|
type Verified = PreverifiedBlock;
|
||||||
|
|
||||||
fn create(input: Self::Input, engine: &EthEngine, check_seal: bool) -> Result<Self::Unverified, (Self::Input, Error)> {
|
fn create(input: Self::Input, engine: &dyn EthEngine, check_seal: bool) -> Result<Self::Unverified, (Self::Input, Error)> {
|
||||||
match verify_block_basic(&input, engine, check_seal) {
|
match verify_block_basic(&input, engine, check_seal) {
|
||||||
Ok(()) => Ok(input),
|
Ok(()) => Ok(input),
|
||||||
Err(Error::Block(BlockError::TemporarilyInvalid(oob))) => {
|
Err(Error::Block(BlockError::TemporarilyInvalid(oob))) => {
|
||||||
@ -100,7 +100,7 @@ pub mod blocks {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify(un: Self::Unverified, engine: &EthEngine, check_seal: bool) -> Result<Self::Verified, Error> {
|
fn verify(un: Self::Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result<Self::Verified, Error> {
|
||||||
let hash = un.hash();
|
let hash = un.hash();
|
||||||
match verify_block_unordered(un, engine, check_seal) {
|
match verify_block_unordered(un, engine, check_seal) {
|
||||||
Ok(verified) => Ok(verified),
|
Ok(verified) => Ok(verified),
|
||||||
@ -209,14 +209,14 @@ pub mod headers {
|
|||||||
type Unverified = Header;
|
type Unverified = Header;
|
||||||
type Verified = Header;
|
type Verified = Header;
|
||||||
|
|
||||||
fn create(input: Self::Input, engine: &EthEngine, check_seal: bool) -> Result<Self::Unverified, (Self::Input, Error)> {
|
fn create(input: Self::Input, engine: &dyn EthEngine, check_seal: bool) -> Result<Self::Unverified, (Self::Input, Error)> {
|
||||||
match verify_header_params(&input, engine, true, check_seal) {
|
match verify_header_params(&input, engine, true, check_seal) {
|
||||||
Ok(_) => Ok(input),
|
Ok(_) => Ok(input),
|
||||||
Err(err) => Err((input, err))
|
Err(err) => Err((input, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify(unverified: Self::Unverified, engine: &EthEngine, check_seal: bool) -> Result<Self::Verified, Error> {
|
fn verify(unverified: Self::Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result<Self::Verified, Error> {
|
||||||
match check_seal {
|
match check_seal {
|
||||||
true => engine.verify_block_unordered(&unverified,).map(|_| unverified),
|
true => engine.verify_block_unordered(&unverified,).map(|_| unverified),
|
||||||
false => Ok(unverified),
|
false => Ok(unverified),
|
||||||
|
@ -138,7 +138,7 @@ struct Sizes {
|
|||||||
/// A queue of items to be verified. Sits between network or other I/O and the `BlockChain`.
|
/// A queue of items to be verified. Sits between network or other I/O and the `BlockChain`.
|
||||||
/// Keeps them in the same order as inserted, minus invalid items.
|
/// Keeps them in the same order as inserted, minus invalid items.
|
||||||
pub struct VerificationQueue<K: Kind> {
|
pub struct VerificationQueue<K: Kind> {
|
||||||
engine: Arc<EthEngine>,
|
engine: Arc<dyn EthEngine>,
|
||||||
more_to_verify: Arc<Condvar>,
|
more_to_verify: Arc<Condvar>,
|
||||||
verification: Arc<Verification<K>>,
|
verification: Arc<Verification<K>>,
|
||||||
deleting: Arc<AtomicBool>,
|
deleting: Arc<AtomicBool>,
|
||||||
@ -206,7 +206,7 @@ struct Verification<K: Kind> {
|
|||||||
|
|
||||||
impl<K: Kind> VerificationQueue<K> {
|
impl<K: Kind> VerificationQueue<K> {
|
||||||
/// Creates a new queue instance.
|
/// Creates a new queue instance.
|
||||||
pub fn new(config: Config, engine: Arc<EthEngine>, message_channel: IoChannel<ClientIoMessage>, check_seal: bool) -> Self {
|
pub fn new(config: Config, engine: Arc<dyn EthEngine>, message_channel: IoChannel<ClientIoMessage>, check_seal: bool) -> Self {
|
||||||
let verification = Arc::new(Verification {
|
let verification = Arc::new(Verification {
|
||||||
unverified: LenCachingMutex::new(VecDeque::new()),
|
unverified: LenCachingMutex::new(VecDeque::new()),
|
||||||
verifying: LenCachingMutex::new(VecDeque::new()),
|
verifying: LenCachingMutex::new(VecDeque::new()),
|
||||||
@ -293,7 +293,7 @@ impl<K: Kind> VerificationQueue<K> {
|
|||||||
|
|
||||||
fn verify(
|
fn verify(
|
||||||
verification: Arc<Verification<K>>,
|
verification: Arc<Verification<K>>,
|
||||||
engine: Arc<EthEngine>,
|
engine: Arc<dyn EthEngine>,
|
||||||
wait: Arc<Condvar>,
|
wait: Arc<Condvar>,
|
||||||
ready: Arc<QueueSignal>,
|
ready: Arc<QueueSignal>,
|
||||||
empty: Arc<Condvar>,
|
empty: Arc<Condvar>,
|
||||||
|
@ -64,7 +64,7 @@ impl HeapSizeOf for PreverifiedBlock {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block
|
/// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block
|
||||||
pub fn verify_block_basic(block: &Unverified, engine: &EthEngine, check_seal: bool) -> Result<(), Error> {
|
pub fn verify_block_basic(block: &Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result<(), Error> {
|
||||||
verify_header_params(&block.header, engine, true, check_seal)?;
|
verify_header_params(&block.header, engine, true, check_seal)?;
|
||||||
verify_block_integrity(block)?;
|
verify_block_integrity(block)?;
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ pub fn verify_block_basic(block: &Unverified, engine: &EthEngine, check_seal: bo
|
|||||||
/// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash.
|
/// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash.
|
||||||
/// Still operates on a individual block
|
/// Still operates on a individual block
|
||||||
/// Returns a `PreverifiedBlock` structure populated with transactions
|
/// Returns a `PreverifiedBlock` structure populated with transactions
|
||||||
pub fn verify_block_unordered(block: Unverified, engine: &EthEngine, check_seal: bool) -> Result<PreverifiedBlock, Error> {
|
pub fn verify_block_unordered(block: Unverified, engine: &dyn EthEngine, check_seal: bool) -> Result<PreverifiedBlock, Error> {
|
||||||
let header = block.header;
|
let header = block.header;
|
||||||
if check_seal {
|
if check_seal {
|
||||||
engine.verify_block_unordered(&header)?;
|
engine.verify_block_unordered(&header)?;
|
||||||
@ -131,14 +131,14 @@ pub struct FullFamilyParams<'a, C: BlockInfo + CallContract + 'a> {
|
|||||||
pub block: &'a PreverifiedBlock,
|
pub block: &'a PreverifiedBlock,
|
||||||
|
|
||||||
/// Block provider to use during verification
|
/// Block provider to use during verification
|
||||||
pub block_provider: &'a BlockProvider,
|
pub block_provider: &'a dyn BlockProvider,
|
||||||
|
|
||||||
/// Engine client to use during verification
|
/// Engine client to use during verification
|
||||||
pub client: &'a C,
|
pub client: &'a C,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Phase 3 verification. Check block information against parent and uncles.
|
/// Phase 3 verification. Check block information against parent and uncles.
|
||||||
pub fn verify_block_family<C: BlockInfo + CallContract>(header: &Header, parent: &Header, engine: &EthEngine, do_full: Option<FullFamilyParams<C>>) -> Result<(), Error> {
|
pub fn verify_block_family<C: BlockInfo + CallContract>(header: &Header, parent: &Header, engine: &dyn EthEngine, do_full: Option<FullFamilyParams<C>>) -> Result<(), Error> {
|
||||||
// TODO: verify timestamp
|
// TODO: verify timestamp
|
||||||
verify_parent(&header, &parent, engine)?;
|
verify_parent(&header, &parent, engine)?;
|
||||||
engine.verify_block_family(&header, &parent)?;
|
engine.verify_block_family(&header, &parent)?;
|
||||||
@ -159,7 +159,7 @@ pub fn verify_block_family<C: BlockInfo + CallContract>(header: &Header, parent:
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_uncles(block: &PreverifiedBlock, bc: &BlockProvider, engine: &EthEngine) -> Result<(), Error> {
|
fn verify_uncles(block: &PreverifiedBlock, bc: &dyn BlockProvider, engine: &dyn EthEngine) -> Result<(), Error> {
|
||||||
let header = &block.header;
|
let header = &block.header;
|
||||||
let num_uncles = block.uncles.len();
|
let num_uncles = block.uncles.len();
|
||||||
let max_uncles = engine.maximum_uncle_count(header.number());
|
let max_uncles = engine.maximum_uncle_count(header.number());
|
||||||
@ -267,7 +267,7 @@ pub fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error>
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Check basic header parameters.
|
/// Check basic header parameters.
|
||||||
pub fn verify_header_params(header: &Header, engine: &EthEngine, is_full: bool, check_seal: bool) -> Result<(), Error> {
|
pub fn verify_header_params(header: &Header, engine: &dyn EthEngine, is_full: bool, check_seal: bool) -> Result<(), Error> {
|
||||||
if check_seal {
|
if check_seal {
|
||||||
let expected_seal_fields = engine.seal_fields(header);
|
let expected_seal_fields = engine.seal_fields(header);
|
||||||
if header.seal().len() != expected_seal_fields {
|
if header.seal().len() != expected_seal_fields {
|
||||||
@ -326,7 +326,7 @@ pub fn verify_header_params(header: &Header, engine: &EthEngine, is_full: bool,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Check header parameters agains parent header.
|
/// Check header parameters agains parent header.
|
||||||
fn verify_parent(header: &Header, parent: &Header, engine: &EthEngine) -> Result<(), Error> {
|
fn verify_parent(header: &Header, parent: &Header, engine: &dyn EthEngine) -> Result<(), Error> {
|
||||||
assert!(header.parent_hash().is_zero() || &parent.hash() == header.parent_hash(),
|
assert!(header.parent_hash().is_zero() || &parent.hash() == header.parent_hash(),
|
||||||
"Parent hash should already have been verified; qed");
|
"Parent hash should already have been verified; qed");
|
||||||
|
|
||||||
@ -516,12 +516,12 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn basic_test(bytes: &[u8], engine: &EthEngine) -> Result<(), Error> {
|
fn basic_test(bytes: &[u8], engine: &dyn EthEngine) -> Result<(), Error> {
|
||||||
let unverified = Unverified::from_rlp(bytes.to_vec())?;
|
let unverified = Unverified::from_rlp(bytes.to_vec())?;
|
||||||
verify_block_basic(&unverified, engine, true)
|
verify_block_basic(&unverified, engine, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn family_test<BC>(bytes: &[u8], engine: &EthEngine, bc: &BC) -> Result<(), Error> where BC: BlockProvider {
|
fn family_test<BC>(bytes: &[u8], engine: &dyn EthEngine, bc: &BC) -> Result<(), Error> where BC: BlockProvider {
|
||||||
let block = Unverified::from_rlp(bytes.to_vec()).unwrap();
|
let block = Unverified::from_rlp(bytes.to_vec()).unwrap();
|
||||||
let header = block.header;
|
let header = block.header;
|
||||||
let transactions: Vec<_> = block.transactions
|
let transactions: Vec<_> = block.transactions
|
||||||
@ -547,13 +547,13 @@ mod tests {
|
|||||||
|
|
||||||
let full_params = FullFamilyParams {
|
let full_params = FullFamilyParams {
|
||||||
block: &block,
|
block: &block,
|
||||||
block_provider: bc as &BlockProvider,
|
block_provider: bc as &dyn BlockProvider,
|
||||||
client: &client,
|
client: &client,
|
||||||
};
|
};
|
||||||
verify_block_family(&block.header, &parent, engine, Some(full_params))
|
verify_block_family(&block.header, &parent, engine, Some(full_params))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn unordered_test(bytes: &[u8], engine: &EthEngine) -> Result<(), Error> {
|
fn unordered_test(bytes: &[u8], engine: &dyn EthEngine) -> Result<(), Error> {
|
||||||
let un = Unverified::from_rlp(bytes.to_vec())?;
|
let un = Unverified::from_rlp(bytes.to_vec())?;
|
||||||
verify_block_unordered(un, engine, false)?;
|
verify_block_unordered(un, engine, false)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -32,12 +32,12 @@ pub trait Verifier<C>: Send + Sync
|
|||||||
&self,
|
&self,
|
||||||
header: &Header,
|
header: &Header,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
engine: &EthEngine,
|
engine: &dyn EthEngine,
|
||||||
do_full: Option<verification::FullFamilyParams<C>>
|
do_full: Option<verification::FullFamilyParams<C>>
|
||||||
) -> Result<(), Error>;
|
) -> Result<(), Error>;
|
||||||
|
|
||||||
/// Do a final verification check for an enacted header vs its expected counterpart.
|
/// Do a final verification check for an enacted header vs its expected counterpart.
|
||||||
fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error>;
|
fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error>;
|
||||||
/// Verify a block, inspecting external state.
|
/// Verify a block, inspecting external state.
|
||||||
fn verify_block_external(&self, header: &Header, engine: &EthEngine) -> Result<(), Error>;
|
fn verify_block_external(&self, header: &Header, engine: &dyn EthEngine) -> Result<(), Error>;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user