2020-09-22 14:53:52 +02:00
|
|
|
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of OpenEthereum.
|
2016-09-27 18:02:11 +02:00
|
|
|
|
2020-09-22 14:53:52 +02:00
|
|
|
// OpenEthereum is free software: you can redistribute it and/or modify
|
2016-09-27 18:02:11 +02:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2020-09-22 14:53:52 +02:00
|
|
|
// OpenEthereum is distributed in the hope that it will be useful,
|
2016-09-27 18:02:11 +02:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2020-09-22 14:53:52 +02:00
|
|
|
// along with OpenEthereum. If not, see <http://www.gnu.org/licenses/>.
|
2016-09-27 18:02:11 +02:00
|
|
|
|
2018-03-03 18:42:13 +01:00
|
|
|
//! State database abstraction. For more info, see the doc for `StateDB`
|
2018-01-17 22:11:13 +01:00
|
|
|
|
2018-07-06 15:09:39 +02:00
|
|
|
use std::{
|
2020-09-14 16:08:57 +02:00
|
|
|
collections::{BTreeMap, HashSet, VecDeque},
|
2018-07-06 15:09:39 +02:00
|
|
|
io,
|
|
|
|
sync::Arc,
|
|
|
|
};
|
|
|
|
|
2018-01-10 13:35:18 +01:00
|
|
|
use ethereum_types::{Address, H256};
|
2019-02-20 19:09:34 +01:00
|
|
|
use hash_db::HashDB;
|
2018-07-02 18:50:05 +02:00
|
|
|
use journaldb::JournalDB;
|
2019-01-04 14:05:46 +01:00
|
|
|
use keccak_hasher::KeccakHasher;
|
2020-09-22 12:41:04 +02:00
|
|
|
use kvdb::{DBTransaction, DBValue};
|
2018-07-02 18:50:05 +02:00
|
|
|
use lru_cache::LruCache;
|
|
|
|
use memory_cache::MemoryLruCache;
|
2017-09-02 20:09:13 +02:00
|
|
|
use parking_lot::Mutex;
|
2019-01-04 14:05:46 +01:00
|
|
|
use types::BlockNumber;
|
|
|
|
|
2018-07-02 18:50:05 +02:00
|
|
|
use state::{self, Account};
|
2016-09-27 18:02:11 +02:00
|
|
|
|
2016-10-14 14:44:11 +02:00
|
|
|
const STATE_CACHE_BLOCKS: usize = 12;
|
2016-10-07 13:55:20 +02:00
|
|
|
|
2016-10-28 16:04:44 +02:00
|
|
|
// The percentage of supplied cache size to go to accounts.
|
|
|
|
const ACCOUNT_CACHE_RATIO: usize = 90;
|
|
|
|
|
2016-10-11 19:37:31 +02:00
|
|
|
/// Shared canonical state cache.
|
2016-09-27 18:02:11 +02:00
|
|
|
struct AccountCache {
|
|
|
|
/// DB Account cache. `None` indicates that account is known to be missing.
|
2016-10-07 00:28:42 +02:00
|
|
|
// When changing the type of the values here, be sure to update `mem_used` and
|
|
|
|
// `new`.
|
2016-09-27 18:02:11 +02:00
|
|
|
accounts: LruCache<Address, Option<Account>>,
|
2016-10-11 19:37:31 +02:00
|
|
|
/// Information on the modifications in recently committed blocks; specifically which addresses
|
|
|
|
/// changed in which block. Ordered by block number.
|
|
|
|
modifications: VecDeque<BlockChanges>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Buffered account cache item.
|
|
|
|
struct CacheQueueItem {
|
|
|
|
/// Account address.
|
|
|
|
address: Address,
|
|
|
|
/// Acccount data or `None` if account does not exist.
|
2018-01-09 16:10:39 +01:00
|
|
|
account: SyncAccount,
|
2016-10-11 19:37:31 +02:00
|
|
|
/// Indicates that the account was modified before being
|
|
|
|
/// added to the cache.
|
|
|
|
modified: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
/// Accumulates a list of accounts changed in a block.
|
|
|
|
struct BlockChanges {
|
|
|
|
/// Block number.
|
|
|
|
number: BlockNumber,
|
|
|
|
/// Block hash.
|
|
|
|
hash: H256,
|
|
|
|
/// Parent block hash.
|
|
|
|
parent: H256,
|
|
|
|
/// A set of modified account addresses.
|
|
|
|
accounts: HashSet<Address>,
|
|
|
|
/// Block is part of the canonical chain.
|
|
|
|
is_canon: bool,
|
2016-09-27 18:02:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// State database abstraction.
|
2016-10-11 19:37:31 +02:00
|
|
|
/// Manages shared global state cache which reflects the canonical
|
|
|
|
/// state as it is on the disk. All the entries in the cache are clean.
|
2016-09-27 18:02:11 +02:00
|
|
|
/// A clone of `StateDB` may be created as canonical or not.
|
2016-10-11 19:37:31 +02:00
|
|
|
/// For canonical clones local cache is accumulated and applied
|
|
|
|
/// in `sync_cache`
|
|
|
|
/// For non-canonical clones local cache is dropped.
|
|
|
|
///
|
|
|
|
/// Global cache propagation.
|
|
|
|
/// After a `State` object has been committed to the trie it
|
|
|
|
/// propagates its local cache into the `StateDB` local cache
|
|
|
|
/// using `add_to_account_cache` function.
|
|
|
|
/// Then, after the block has been added to the chain the local cache in the
|
|
|
|
/// `StateDB` is propagated into the global cache.
|
2016-09-27 18:02:11 +02:00
|
|
|
pub struct StateDB {
|
2016-10-11 19:37:31 +02:00
|
|
|
/// Backing database.
|
2020-07-29 10:36:15 +02:00
|
|
|
db: Box<dyn JournalDB>,
|
2016-10-11 19:37:31 +02:00
|
|
|
/// Shared canonical state cache.
|
2016-09-27 18:02:11 +02:00
|
|
|
account_cache: Arc<Mutex<AccountCache>>,
|
2016-10-29 21:27:53 +02:00
|
|
|
/// DB Code cache. Maps code hashes to shared bytes.
|
|
|
|
code_cache: Arc<Mutex<MemoryLruCache<H256, Arc<Vec<u8>>>>>,
|
2016-10-11 19:37:31 +02:00
|
|
|
/// Local dirty cache.
|
|
|
|
local_cache: Vec<CacheQueueItem>,
|
2016-10-07 00:28:42 +02:00
|
|
|
cache_size: usize,
|
2016-10-11 19:37:31 +02:00
|
|
|
/// Hash of the block on top of which this instance was created or
|
|
|
|
/// `None` if cache is disabled
|
|
|
|
parent_hash: Option<H256>,
|
|
|
|
/// Hash of the committing block or `None` if not committed yet.
|
|
|
|
commit_hash: Option<H256>,
|
|
|
|
/// Number of the committing block or `None` if not committed yet.
|
|
|
|
commit_number: Option<BlockNumber>,
|
2016-09-27 18:02:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl StateDB {
|
2016-10-07 00:28:42 +02:00
|
|
|
/// Create a new instance wrapping `JournalDB` and the maximum allowed size
|
|
|
|
/// of the LRU cache in bytes. Actual used memory may (read: will) be higher due to bookkeeping.
|
|
|
|
// TODO: make the cache size actually accurate by moving the account storage cache
|
|
|
|
// into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`.
|
2020-07-29 10:36:15 +02:00
|
|
|
pub fn new(db: Box<dyn JournalDB>, cache_size: usize) -> StateDB {
|
2016-10-28 16:04:44 +02:00
|
|
|
let acc_cache_size = cache_size * ACCOUNT_CACHE_RATIO / 100;
|
|
|
|
let code_cache_size = cache_size - acc_cache_size;
|
|
|
|
let cache_items = acc_cache_size / ::std::mem::size_of::<Option<Account>>();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-27 18:02:11 +02:00
|
|
|
StateDB {
|
|
|
|
db: db,
|
2016-10-07 13:55:20 +02:00
|
|
|
account_cache: Arc::new(Mutex::new(AccountCache {
|
|
|
|
accounts: LruCache::new(cache_items),
|
|
|
|
modifications: VecDeque::new(),
|
|
|
|
})),
|
2016-10-29 21:27:53 +02:00
|
|
|
code_cache: Arc::new(Mutex::new(MemoryLruCache::new(code_cache_size))),
|
2016-10-07 13:55:20 +02:00
|
|
|
local_cache: Vec::new(),
|
2016-10-07 00:28:42 +02:00
|
|
|
cache_size: cache_size,
|
2016-10-07 13:55:20 +02:00
|
|
|
parent_hash: None,
|
|
|
|
commit_hash: None,
|
|
|
|
commit_number: None,
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2016-10-03 12:02:43 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-13 12:59:32 +02:00
|
|
|
/// Journal all recent operations under the given era and ID.
|
2018-07-06 15:09:39 +02:00
|
|
|
pub fn journal_under(
|
|
|
|
&mut self,
|
|
|
|
batch: &mut DBTransaction,
|
|
|
|
now: u64,
|
|
|
|
id: &H256,
|
|
|
|
) -> io::Result<u32> {
|
2016-12-27 12:53:56 +01:00
|
|
|
let records = self.db.journal_under(batch, now, id)?;
|
2016-10-11 19:37:31 +02:00
|
|
|
self.commit_hash = Some(id.clone());
|
|
|
|
self.commit_number = Some(now);
|
2016-09-27 18:02:11 +02:00
|
|
|
Ok(records)
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2020-12-02 11:31:11 +01:00
|
|
|
// t_nb 9.15
|
2016-10-13 12:59:32 +02:00
|
|
|
/// Mark a given candidate from an ancient era as canonical, enacting its removals from the
|
|
|
|
/// backing database and reverting any non-canonical historical commit's insertions.
|
2018-07-06 15:09:39 +02:00
|
|
|
pub fn mark_canonical(
|
|
|
|
&mut self,
|
|
|
|
batch: &mut DBTransaction,
|
|
|
|
end_era: u64,
|
|
|
|
canon_id: &H256,
|
|
|
|
) -> io::Result<u32> {
|
2016-10-13 12:59:32 +02:00
|
|
|
self.db.mark_canonical(batch, end_era, canon_id)
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2020-12-02 11:31:11 +01:00
|
|
|
// t_nb 9.10 Propagate local cache into the global cache and synchonize
|
2016-10-11 19:37:31 +02:00
|
|
|
/// the global cache with the best block state.
|
|
|
|
/// This function updates the global cache by removing entries
|
|
|
|
/// that are invalidated by chain reorganization. `sync_cache`
|
|
|
|
/// should be called after the block has been committed and the
|
|
|
|
/// blockchain route has ben calculated.
|
|
|
|
pub fn sync_cache(&mut self, enacted: &[H256], retracted: &[H256], is_best: bool) {
|
|
|
|
trace!(
|
|
|
|
"sync_cache id = (#{:?}, {:?}), parent={:?}, best={}",
|
|
|
|
self.commit_number,
|
|
|
|
self.commit_hash,
|
|
|
|
self.parent_hash,
|
|
|
|
is_best
|
|
|
|
);
|
|
|
|
let mut cache = self.account_cache.lock();
|
2017-10-15 15:10:20 +02:00
|
|
|
let cache = &mut *cache;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-11 19:37:31 +02:00
|
|
|
// Purge changes from re-enacted and retracted blocks.
|
|
|
|
// Filter out commiting block if any.
|
|
|
|
let mut clear = false;
|
|
|
|
for block in enacted
|
|
|
|
.iter()
|
|
|
|
.filter(|h| self.commit_hash.as_ref().map_or(true, |p| *h != p))
|
|
|
|
{
|
|
|
|
clear = clear || {
|
2016-10-27 08:28:12 +02:00
|
|
|
if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) {
|
2016-10-11 19:37:31 +02:00
|
|
|
trace!("Reverting enacted block {:?}", block);
|
|
|
|
m.is_canon = true;
|
|
|
|
for a in &m.accounts {
|
|
|
|
trace!("Reverting enacted address {:?}", a);
|
|
|
|
cache.accounts.remove(a);
|
|
|
|
}
|
|
|
|
false
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-11 19:37:31 +02:00
|
|
|
for block in retracted {
|
|
|
|
clear = clear || {
|
2016-10-27 08:28:12 +02:00
|
|
|
if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) {
|
2016-10-11 19:37:31 +02:00
|
|
|
trace!("Retracting block {:?}", block);
|
|
|
|
m.is_canon = false;
|
|
|
|
for a in &m.accounts {
|
|
|
|
trace!("Retracted address {:?}", a);
|
|
|
|
cache.accounts.remove(a);
|
|
|
|
}
|
|
|
|
false
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
if clear {
|
|
|
|
// We don't know anything about the block; clear everything
|
|
|
|
trace!("Wiping cache");
|
|
|
|
cache.accounts.clear();
|
|
|
|
cache.modifications.clear();
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-11 19:37:31 +02:00
|
|
|
// Propagate cache only if committing on top of the latest canonical state
|
|
|
|
// blocks are ordered by number and only one block with a given number is marked as canonical
|
|
|
|
// (contributed to canonical state cache)
|
|
|
|
if let (Some(ref number), Some(ref hash), Some(ref parent)) =
|
|
|
|
(self.commit_number, self.commit_hash, self.parent_hash)
|
|
|
|
{
|
|
|
|
if cache.modifications.len() == STATE_CACHE_BLOCKS {
|
|
|
|
cache.modifications.pop_back();
|
|
|
|
}
|
|
|
|
let mut modifications = HashSet::new();
|
|
|
|
trace!("committing {} cache entries", self.local_cache.len());
|
|
|
|
for account in self.local_cache.drain(..) {
|
|
|
|
if account.modified {
|
|
|
|
modifications.insert(account.address.clone());
|
|
|
|
}
|
|
|
|
if is_best {
|
2018-01-09 16:10:39 +01:00
|
|
|
let acc = account.account.0;
|
2016-10-11 19:37:31 +02:00
|
|
|
if let Some(&mut Some(ref mut existing)) =
|
|
|
|
cache.accounts.get_mut(&account.address)
|
|
|
|
{
|
2018-01-02 09:43:08 +01:00
|
|
|
if let Some(new) = acc {
|
2016-10-11 19:37:31 +02:00
|
|
|
if account.modified {
|
|
|
|
existing.overwrite_with(new);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2018-01-02 09:43:08 +01:00
|
|
|
cache.accounts.insert(account.address, acc);
|
2016-10-11 19:37:31 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2016-10-11 19:37:31 +02:00
|
|
|
// Save modified accounts. These are ordered by the block number.
|
|
|
|
let block_changes = BlockChanges {
|
|
|
|
accounts: modifications,
|
|
|
|
number: *number,
|
|
|
|
hash: hash.clone(),
|
|
|
|
is_canon: is_best,
|
|
|
|
parent: parent.clone(),
|
|
|
|
};
|
2016-10-27 08:28:12 +02:00
|
|
|
let insert_at = cache
|
|
|
|
.modifications
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.find(|&(_, m)| m.number < *number)
|
|
|
|
.map(|(i, _)| i);
|
2016-10-11 19:37:31 +02:00
|
|
|
trace!("inserting modifications at {:?}", insert_at);
|
|
|
|
if let Some(insert_at) = insert_at {
|
|
|
|
cache.modifications.insert(insert_at, block_changes);
|
|
|
|
} else {
|
|
|
|
cache.modifications.push_back(block_changes);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-03 18:42:13 +01:00
|
|
|
/// Conversion method to interpret self as `HashDB` reference
|
2020-07-29 10:36:15 +02:00
|
|
|
pub fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> {
|
2019-02-20 19:09:34 +01:00
|
|
|
self.db.as_hash_db()
|
2016-09-27 18:02:11 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-03-03 18:42:13 +01:00
|
|
|
/// Conversion method to interpret self as mutable `HashDB` reference
|
2020-07-29 10:36:15 +02:00
|
|
|
pub fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> {
|
2019-02-20 19:09:34 +01:00
|
|
|
self.db.as_hash_db_mut()
|
2016-09-27 18:02:11 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-27 18:02:11 +02:00
|
|
|
/// Clone the database.
|
|
|
|
pub fn boxed_clone(&self) -> StateDB {
|
|
|
|
StateDB {
|
|
|
|
db: self.db.boxed_clone(),
|
|
|
|
account_cache: self.account_cache.clone(),
|
2016-10-29 21:27:53 +02:00
|
|
|
code_cache: self.code_cache.clone(),
|
2016-10-11 19:37:31 +02:00
|
|
|
local_cache: Vec::new(),
|
2016-10-07 00:28:42 +02:00
|
|
|
cache_size: self.cache_size,
|
2016-10-11 19:37:31 +02:00
|
|
|
parent_hash: None,
|
|
|
|
commit_hash: None,
|
|
|
|
commit_number: None,
|
2016-09-27 18:02:11 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2016-09-27 18:02:11 +02:00
|
|
|
/// Clone the database for a canonical state.
|
2016-10-11 19:37:31 +02:00
|
|
|
pub fn boxed_clone_canon(&self, parent: &H256) -> StateDB {
|
2016-09-27 18:02:11 +02:00
|
|
|
StateDB {
|
|
|
|
db: self.db.boxed_clone(),
|
|
|
|
account_cache: self.account_cache.clone(),
|
2016-10-29 21:27:53 +02:00
|
|
|
code_cache: self.code_cache.clone(),
|
2016-10-11 19:37:31 +02:00
|
|
|
local_cache: Vec::new(),
|
2016-10-07 00:28:42 +02:00
|
|
|
cache_size: self.cache_size,
|
2016-10-11 19:37:31 +02:00
|
|
|
parent_hash: Some(parent.clone()),
|
|
|
|
commit_hash: None,
|
|
|
|
commit_number: None,
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2016-09-27 18:02:11 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-27 18:02:11 +02:00
|
|
|
/// Check if pruning is enabled on the database.
|
|
|
|
pub fn is_pruned(&self) -> bool {
|
|
|
|
self.db.is_pruned()
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-27 18:02:11 +02:00
|
|
|
/// Heap size used.
|
2020-09-14 16:08:57 +02:00
|
|
|
pub fn get_sizes(&self, sizes: &mut BTreeMap<String, usize>) {
|
|
|
|
self.db.get_sizes(sizes);
|
|
|
|
|
|
|
|
sizes.insert(
|
|
|
|
String::from("account_cache_len"),
|
|
|
|
self.account_cache.lock().accounts.len(),
|
|
|
|
);
|
|
|
|
sizes.insert(
|
|
|
|
String::from("code_cache_size"),
|
|
|
|
self.code_cache.lock().current_size(),
|
|
|
|
);
|
2016-10-28 16:04:44 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-27 18:02:11 +02:00
|
|
|
/// Returns underlying `JournalDB`.
|
2020-07-29 10:36:15 +02:00
|
|
|
pub fn journal_db(&self) -> &dyn JournalDB {
|
2016-09-27 18:02:11 +02:00
|
|
|
&*self.db
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 00:28:42 +02:00
|
|
|
/// Query how much memory is set aside for the accounts cache (in bytes).
|
|
|
|
pub fn cache_size(&self) -> usize {
|
|
|
|
self.cache_size
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-11 19:37:31 +02:00
|
|
|
/// Check if the account can be returned from cache by matching current block parent hash against canonical
|
|
|
|
/// state and filtering out account modified in later blocks.
|
2019-01-14 15:33:10 +01:00
|
|
|
fn is_allowed(
|
|
|
|
addr: &Address,
|
|
|
|
parent_hash: &H256,
|
|
|
|
modifications: &VecDeque<BlockChanges>,
|
|
|
|
) -> bool {
|
2016-10-11 19:37:31 +02:00
|
|
|
if modifications.is_empty() {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// Ignore all accounts modified in later blocks
|
|
|
|
// Modifications contains block ordered by the number
|
|
|
|
// We search for our parent in that list first and then for
|
|
|
|
// all its parent until we hit the canonical block,
|
|
|
|
// checking against all the intermediate modifications.
|
2019-01-14 15:33:10 +01:00
|
|
|
let mut parent = parent_hash;
|
2016-10-27 08:28:12 +02:00
|
|
|
for m in modifications {
|
2016-10-11 19:37:31 +02:00
|
|
|
if &m.hash == parent {
|
|
|
|
if m.is_canon {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
parent = &m.parent;
|
|
|
|
}
|
|
|
|
if m.accounts.contains(addr) {
|
|
|
|
trace!(
|
|
|
|
"Cache lookup skipped for {:?}: modified in a later block",
|
|
|
|
addr
|
|
|
|
);
|
|
|
|
return false;
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2016-10-11 19:37:31 +02:00
|
|
|
}
|
|
|
|
trace!(
|
|
|
|
"Cache lookup skipped for {:?}: parent hash is unknown",
|
|
|
|
addr
|
|
|
|
);
|
2016-10-27 08:28:12 +02:00
|
|
|
false
|
2016-10-11 19:37:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-21 12:35:21 +01:00
|
|
|
impl state::Backend for StateDB {
|
2020-07-29 10:36:15 +02:00
|
|
|
fn as_hash_db(&self) -> &dyn HashDB<KeccakHasher, DBValue> {
|
2019-02-20 19:09:34 +01:00
|
|
|
self.db.as_hash_db()
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2020-07-29 10:36:15 +02:00
|
|
|
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue> {
|
2019-02-20 19:09:34 +01:00
|
|
|
self.db.as_hash_db_mut()
|
2017-02-21 12:35:21 +01:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-02-21 12:35:21 +01:00
|
|
|
fn add_to_account_cache(&mut self, addr: Address, data: Option<Account>, modified: bool) {
|
|
|
|
self.local_cache.push(CacheQueueItem {
|
|
|
|
address: addr,
|
2018-01-09 16:10:39 +01:00
|
|
|
account: SyncAccount(data),
|
2017-02-21 12:35:21 +01:00
|
|
|
modified: modified,
|
|
|
|
})
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-02-21 12:35:21 +01:00
|
|
|
fn cache_code(&self, hash: H256, code: Arc<Vec<u8>>) {
|
|
|
|
let mut cache = self.code_cache.lock();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-02-21 12:35:21 +01:00
|
|
|
cache.insert(hash, code);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-02-21 12:35:21 +01:00
|
|
|
fn get_cached_account(&self, addr: &Address) -> Option<Option<Account>> {
|
2019-01-14 15:33:10 +01:00
|
|
|
self.parent_hash.as_ref().and_then(|parent_hash| {
|
|
|
|
let mut cache = self.account_cache.lock();
|
|
|
|
if !Self::is_allowed(addr, parent_hash, &cache.modifications) {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
cache
|
|
|
|
.accounts
|
|
|
|
.get_mut(addr)
|
|
|
|
.map(|a| a.as_ref().map(|a| a.clone_basic()))
|
|
|
|
})
|
2017-02-21 12:35:21 +01:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-02-21 12:35:21 +01:00
|
|
|
fn get_cached<F, U>(&self, a: &Address, f: F) -> Option<U>
|
2019-01-14 15:33:10 +01:00
|
|
|
where
|
|
|
|
F: FnOnce(Option<&mut Account>) -> U,
|
|
|
|
{
|
|
|
|
self.parent_hash.as_ref().and_then(|parent_hash| {
|
|
|
|
let mut cache = self.account_cache.lock();
|
|
|
|
if !Self::is_allowed(a, parent_hash, &cache.modifications) {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
cache.accounts.get_mut(a).map(|c| f(c.as_mut()))
|
|
|
|
})
|
2017-02-21 12:35:21 +01:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-03-20 15:46:03 +01:00
|
|
|
fn get_cached_code(&self, hash: &H256) -> Option<Arc<Vec<u8>>> {
|
|
|
|
let mut cache = self.code_cache.lock();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-03-20 15:46:03 +01:00
|
|
|
cache.get_mut(hash).map(|code| code.clone())
|
|
|
|
}
|
2017-02-21 12:35:21 +01:00
|
|
|
}
|
|
|
|
|
2018-01-09 16:10:39 +01:00
|
|
|
/// Sync wrapper for the account.
|
|
|
|
struct SyncAccount(Option<Account>);
|
|
|
|
/// That implementation is safe because account is never modified or accessed in any way.
|
|
|
|
/// We only need `Sync` here to allow `StateDb` to be kept in a `RwLock`.
|
|
|
|
/// `Account` is `!Sync` by default because of `RefCell`s inside it.
|
|
|
|
unsafe impl Sync for SyncAccount {}
|
|
|
|
|
2016-10-11 19:37:31 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2018-01-10 13:35:18 +01:00
|
|
|
use ethereum_types::{Address, H256, U256};
|
2017-10-10 20:01:27 +02:00
|
|
|
use kvdb::DBTransaction;
|
2017-02-21 12:35:21 +01:00
|
|
|
use state::{Account, Backend};
|
2018-04-09 16:14:33 +02:00
|
|
|
use test_helpers::get_temp_state_db;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 13:55:20 +02:00
|
|
|
#[test]
|
|
|
|
fn state_db_smoke() {
|
2019-01-08 15:07:20 +01:00
|
|
|
let _ = ::env_logger::try_init();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-06 19:26:17 +02:00
|
|
|
let state_db = get_temp_state_db();
|
2016-10-07 13:55:20 +02:00
|
|
|
let root_parent = H256::random();
|
|
|
|
let address = Address::random();
|
|
|
|
let h0 = H256::random();
|
|
|
|
let h1a = H256::random();
|
|
|
|
let h1b = H256::random();
|
|
|
|
let h2a = H256::random();
|
|
|
|
let h2b = H256::random();
|
|
|
|
let h3a = H256::random();
|
|
|
|
let h3b = H256::random();
|
2017-02-20 17:21:55 +01:00
|
|
|
let mut batch = DBTransaction::new();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 13:55:20 +02:00
|
|
|
// blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ]
|
2017-06-11 16:41:16 +02:00
|
|
|
// balance [ 5 5 4 3 2 2 ]
|
2016-10-07 13:55:20 +02:00
|
|
|
let mut s = state_db.boxed_clone_canon(&root_parent);
|
|
|
|
s.add_to_account_cache(address, Some(Account::new_basic(2.into(), 0.into())), false);
|
2016-10-13 12:59:32 +02:00
|
|
|
s.journal_under(&mut batch, 0, &h0).unwrap();
|
2016-10-07 13:55:20 +02:00
|
|
|
s.sync_cache(&[], &[], true);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 13:55:20 +02:00
|
|
|
let mut s = state_db.boxed_clone_canon(&h0);
|
2016-10-13 12:59:32 +02:00
|
|
|
s.journal_under(&mut batch, 1, &h1a).unwrap();
|
2016-10-07 13:55:20 +02:00
|
|
|
s.sync_cache(&[], &[], true);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 13:55:20 +02:00
|
|
|
let mut s = state_db.boxed_clone_canon(&h0);
|
|
|
|
s.add_to_account_cache(address, Some(Account::new_basic(3.into(), 0.into())), true);
|
2016-10-13 12:59:32 +02:00
|
|
|
s.journal_under(&mut batch, 1, &h1b).unwrap();
|
2016-10-07 13:55:20 +02:00
|
|
|
s.sync_cache(&[], &[], false);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 13:55:20 +02:00
|
|
|
let mut s = state_db.boxed_clone_canon(&h1b);
|
|
|
|
s.add_to_account_cache(address, Some(Account::new_basic(4.into(), 0.into())), true);
|
2016-10-13 12:59:32 +02:00
|
|
|
s.journal_under(&mut batch, 2, &h2b).unwrap();
|
2016-10-07 13:55:20 +02:00
|
|
|
s.sync_cache(&[], &[], false);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 13:55:20 +02:00
|
|
|
let mut s = state_db.boxed_clone_canon(&h1a);
|
|
|
|
s.add_to_account_cache(address, Some(Account::new_basic(5.into(), 0.into())), true);
|
2016-10-13 12:59:32 +02:00
|
|
|
s.journal_under(&mut batch, 2, &h2a).unwrap();
|
2016-10-07 13:55:20 +02:00
|
|
|
s.sync_cache(&[], &[], true);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 13:55:20 +02:00
|
|
|
let mut s = state_db.boxed_clone_canon(&h2a);
|
2016-10-13 12:59:32 +02:00
|
|
|
s.journal_under(&mut batch, 3, &h3a).unwrap();
|
2016-10-07 13:55:20 +02:00
|
|
|
s.sync_cache(&[], &[], true);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 13:55:20 +02:00
|
|
|
let s = state_db.boxed_clone_canon(&h3a);
|
|
|
|
assert_eq!(
|
|
|
|
s.get_cached_account(&address).unwrap().unwrap().balance(),
|
|
|
|
&U256::from(5)
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 13:55:20 +02:00
|
|
|
let s = state_db.boxed_clone_canon(&h1a);
|
|
|
|
assert!(s.get_cached_account(&address).is_none());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 13:55:20 +02:00
|
|
|
let s = state_db.boxed_clone_canon(&h2b);
|
|
|
|
assert!(s.get_cached_account(&address).is_none());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 13:55:20 +02:00
|
|
|
let s = state_db.boxed_clone_canon(&h1b);
|
|
|
|
assert!(s.get_cached_account(&address).is_none());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-07 13:55:20 +02:00
|
|
|
// reorg to 3b
|
|
|
|
// blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ]
|
|
|
|
let mut s = state_db.boxed_clone_canon(&h2b);
|
2016-10-13 12:59:32 +02:00
|
|
|
s.journal_under(&mut batch, 3, &h3b).unwrap();
|
2016-10-07 13:55:20 +02:00
|
|
|
s.sync_cache(
|
|
|
|
&[h1b.clone(), h2b.clone(), h3b.clone()],
|
|
|
|
&[h1a.clone(), h2a.clone(), h3a.clone()],
|
|
|
|
true,
|
|
|
|
);
|
|
|
|
let s = state_db.boxed_clone_canon(&h3a);
|
|
|
|
assert!(s.get_cached_account(&address).is_none());
|
|
|
|
}
|
2016-09-27 18:02:11 +02:00
|
|
|
}
|