Remove accounts bloom (#33)
This commit is contained in:
parent
4fb4ef6d24
commit
d17ee979b8
1058
Cargo.lock
generated
1058
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -9,7 +9,6 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
[dependencies]
|
||||
ansi_term = "0.10"
|
||||
blooms-db = { path = "../util/blooms-db", optional = true }
|
||||
byteorder = "1.0"
|
||||
common-types = { path = "types" }
|
||||
crossbeam-utils = "0.6"
|
||||
eip-152 = { version = "0.1", path = "../util/EIP-152" }
|
||||
|
@ -33,12 +33,13 @@ pub const COL_BODIES: Option<u32> = Some(2);
|
||||
pub const COL_EXTRA: Option<u32> = Some(3);
|
||||
/// Column for Traces
|
||||
pub const COL_TRACE: Option<u32> = Some(4);
|
||||
/// Column for the empty accounts bloom filter.
|
||||
/// Column for the accounts existence bloom filter.
|
||||
#[deprecated(since = "3.0.0", note = "Accounts bloom column is deprecated")]
|
||||
pub const COL_ACCOUNT_BLOOM: Option<u32> = Some(5);
|
||||
/// Column for general information from the local node which can persist.
|
||||
pub const COL_NODE_INFO: Option<u32> = Some(6);
|
||||
/// Number of columns in DB
|
||||
pub const NUM_COLUMNS: Option<u32> = Some(8);
|
||||
pub const NUM_COLUMNS: Option<u32> = Some(7);
|
||||
|
||||
/// Modes for updating caches.
|
||||
#[derive(Clone, Copy)]
|
||||
|
@ -59,13 +59,11 @@
|
||||
#![recursion_limit = "128"]
|
||||
|
||||
extern crate ansi_term;
|
||||
extern crate byteorder;
|
||||
extern crate common_types as types;
|
||||
extern crate crossbeam_utils;
|
||||
extern crate ethabi;
|
||||
extern crate ethash;
|
||||
extern crate ethcore_blockchain as blockchain;
|
||||
extern crate ethcore_bloom_journal as bloom_journal;
|
||||
extern crate ethcore_builtin as builtin;
|
||||
extern crate ethcore_call_contract as call_contract;
|
||||
extern crate ethcore_db as db;
|
||||
|
@ -34,9 +34,8 @@ use blockchain::{BlockChain, BlockProvider};
|
||||
use engines::EthEngine;
|
||||
use types::{header::Header, ids::BlockId};
|
||||
|
||||
use bloom_journal::Bloom;
|
||||
use bytes::Bytes;
|
||||
use ethereum_types::{H256, U256};
|
||||
use ethereum_types::H256;
|
||||
use ethtrie::{TrieDB, TrieDBMut};
|
||||
use hash_db::HashDB;
|
||||
use journaldb::{self, Algorithm, JournalDB};
|
||||
@ -50,8 +49,6 @@ use trie::{Trie, TrieMut};
|
||||
|
||||
use self::io::SnapshotWriter;
|
||||
|
||||
use super::{state::Account as StateAccount, state_db::StateDB};
|
||||
|
||||
use crossbeam_utils::thread;
|
||||
use rand::{OsRng, Rng};
|
||||
|
||||
@ -433,7 +430,6 @@ pub struct StateRebuilder {
|
||||
state_root: H256,
|
||||
known_code: HashMap<H256, H256>, // code hashes mapped to first account with this code.
|
||||
missing_code: HashMap<H256, Vec<H256>>, // maps code hashes to lists of accounts missing that code.
|
||||
bloom: Bloom,
|
||||
known_storage_roots: HashMap<H256, H256>, // maps account hashes to last known storage root. Only filled for last account per chunk.
|
||||
}
|
||||
|
||||
@ -445,7 +441,6 @@ impl StateRebuilder {
|
||||
state_root: KECCAK_NULL_RLP,
|
||||
known_code: HashMap::new(),
|
||||
missing_code: HashMap::new(),
|
||||
bloom: StateDB::load_bloom(&*db),
|
||||
known_storage_roots: HashMap::new(),
|
||||
}
|
||||
}
|
||||
@ -453,7 +448,6 @@ impl StateRebuilder {
|
||||
/// Feed an uncompressed state chunk into the rebuilder.
|
||||
pub fn feed(&mut self, chunk: &[u8], flag: &AtomicBool) -> Result<(), ::error::Error> {
|
||||
let rlp = Rlp::new(chunk);
|
||||
let empty_rlp = StateAccount::new_basic(U256::zero(), U256::zero()).rlp();
|
||||
let mut pairs = Vec::with_capacity(rlp.item_count()?);
|
||||
|
||||
// initialize the pairs vector with empty values so we have slots to write into.
|
||||
@ -489,8 +483,6 @@ impl StateRebuilder {
|
||||
self.known_code.insert(code_hash, first_with);
|
||||
}
|
||||
|
||||
let backing = self.db.backing().clone();
|
||||
|
||||
// batch trie writes
|
||||
{
|
||||
let mut account_trie = if self.state_root != KECCAK_NULL_RLP {
|
||||
@ -504,18 +496,10 @@ impl StateRebuilder {
|
||||
return Err(Error::RestorationAborted.into());
|
||||
}
|
||||
|
||||
if &thin_rlp[..] != &empty_rlp[..] {
|
||||
self.bloom.set(&*hash);
|
||||
}
|
||||
account_trie.insert(&hash, &thin_rlp)?;
|
||||
}
|
||||
}
|
||||
|
||||
let bloom_journal = self.bloom.drain_journal();
|
||||
let mut batch = backing.transaction();
|
||||
StateDB::commit_bloom(&mut batch, bloom_journal)?;
|
||||
self.db.inject(&mut batch)?;
|
||||
backing.write_buffered(batch);
|
||||
trace!(target: "snapshot", "current state root: {:?}", self.state_root);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -707,7 +707,6 @@ impl Spec {
|
||||
}
|
||||
|
||||
for (address, account) in self.genesis_state.get().iter() {
|
||||
db.note_non_null_account(address);
|
||||
account.insert_additional(
|
||||
&mut *factories
|
||||
.accountdb
|
||||
|
@ -65,13 +65,6 @@ pub trait Backend: Send {
|
||||
|
||||
/// Get cached code based on hash.
|
||||
fn get_cached_code(&self, hash: &H256) -> Option<Arc<Vec<u8>>>;
|
||||
|
||||
/// Note that an account with the given address is non-null.
|
||||
fn note_non_null_account(&self, address: &Address);
|
||||
|
||||
/// Check whether an account is known to be empty. Returns true if known to be
|
||||
/// empty, false otherwise.
|
||||
fn is_known_null(&self, address: &Address) -> bool;
|
||||
}
|
||||
|
||||
/// A raw backend used to check proofs of execution.
|
||||
@ -150,10 +143,6 @@ impl Backend for ProofCheck {
|
||||
fn get_cached_code(&self, _hash: &H256) -> Option<Arc<Vec<u8>>> {
|
||||
None
|
||||
}
|
||||
fn note_non_null_account(&self, _address: &Address) {}
|
||||
fn is_known_null(&self, _address: &Address) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Proving state backend.
|
||||
@ -252,10 +241,6 @@ impl<H: AsHashDB<KeccakHasher, DBValue> + Send + Sync> Backend for Proving<H> {
|
||||
fn get_cached_code(&self, _: &H256) -> Option<Arc<Vec<u8>>> {
|
||||
None
|
||||
}
|
||||
fn note_non_null_account(&self, _: &Address) {}
|
||||
fn is_known_null(&self, _: &Address) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: AsHashDB<KeccakHasher, DBValue>> Proving<H> {
|
||||
@ -317,8 +302,4 @@ impl<H: AsHashDB<KeccakHasher, DBValue> + Send + Sync> Backend for Basic<H> {
|
||||
fn get_cached_code(&self, _: &H256) -> Option<Arc<Vec<u8>>> {
|
||||
None
|
||||
}
|
||||
fn note_non_null_account(&self, _: &Address) {}
|
||||
fn is_known_null(&self, _: &Address) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
@ -554,19 +554,17 @@ impl<B: Backend> State<B> {
|
||||
pub fn exists(&self, a: &Address) -> TrieResult<bool> {
|
||||
// Bloom filter does not contain empty accounts, so it is important here to
|
||||
// check if account exists in the database directly before EIP-161 is in effect.
|
||||
self.ensure_cached(a, RequireCache::None, false, |a| a.is_some())
|
||||
self.ensure_cached(a, RequireCache::None, |a| a.is_some())
|
||||
}
|
||||
|
||||
/// Determine whether an account exists and if not empty.
|
||||
pub fn exists_and_not_null(&self, a: &Address) -> TrieResult<bool> {
|
||||
self.ensure_cached(a, RequireCache::None, false, |a| {
|
||||
a.map_or(false, |a| !a.is_null())
|
||||
})
|
||||
self.ensure_cached(a, RequireCache::None, |a| a.map_or(false, |a| !a.is_null()))
|
||||
}
|
||||
|
||||
/// Determine whether an account exists and has code or non-zero nonce.
|
||||
pub fn exists_and_has_code_or_nonce(&self, a: &Address) -> TrieResult<bool> {
|
||||
self.ensure_cached(a, RequireCache::CodeSize, false, |a| {
|
||||
self.ensure_cached(a, RequireCache::CodeSize, |a| {
|
||||
a.map_or(false, |a| {
|
||||
a.code_hash() != KECCAK_EMPTY || *a.nonce() != self.account_start_nonce
|
||||
})
|
||||
@ -575,7 +573,7 @@ impl<B: Backend> State<B> {
|
||||
|
||||
/// Get the balance of account `a`.
|
||||
pub fn balance(&self, a: &Address) -> TrieResult<U256> {
|
||||
self.ensure_cached(a, RequireCache::None, true, |a| {
|
||||
self.ensure_cached(a, RequireCache::None, |a| {
|
||||
a.as_ref()
|
||||
.map_or(U256::zero(), |account| *account.balance())
|
||||
})
|
||||
@ -583,7 +581,7 @@ impl<B: Backend> State<B> {
|
||||
|
||||
/// Get the nonce of account `a`.
|
||||
pub fn nonce(&self, a: &Address) -> TrieResult<U256> {
|
||||
self.ensure_cached(a, RequireCache::None, true, |a| {
|
||||
self.ensure_cached(a, RequireCache::None, |a| {
|
||||
a.as_ref()
|
||||
.map_or(self.account_start_nonce, |account| *account.nonce())
|
||||
})
|
||||
@ -592,7 +590,7 @@ impl<B: Backend> State<B> {
|
||||
/// Whether the base storage root of an account remains unchanged.
|
||||
pub fn is_base_storage_root_unchanged(&self, a: &Address) -> TrieResult<bool> {
|
||||
Ok(self
|
||||
.ensure_cached(a, RequireCache::None, true, |a| {
|
||||
.ensure_cached(a, RequireCache::None, |a| {
|
||||
a.as_ref()
|
||||
.map(|account| account.is_base_storage_root_unchanged())
|
||||
})?
|
||||
@ -601,7 +599,7 @@ impl<B: Backend> State<B> {
|
||||
|
||||
/// Get the storage root of account `a`.
|
||||
pub fn storage_root(&self, a: &Address) -> TrieResult<Option<H256>> {
|
||||
self.ensure_cached(a, RequireCache::None, true, |a| {
|
||||
self.ensure_cached(a, RequireCache::None, |a| {
|
||||
a.as_ref().and_then(|account| account.storage_root())
|
||||
})
|
||||
}
|
||||
@ -609,7 +607,7 @@ impl<B: Backend> State<B> {
|
||||
/// Get the original storage root since last commit of account `a`.
|
||||
pub fn original_storage_root(&self, a: &Address) -> TrieResult<H256> {
|
||||
Ok(self
|
||||
.ensure_cached(a, RequireCache::None, true, |a| {
|
||||
.ensure_cached(a, RequireCache::None, |a| {
|
||||
a.as_ref().map(|account| account.original_storage_root())
|
||||
})?
|
||||
.unwrap_or(KECCAK_NULL_RLP))
|
||||
@ -755,11 +753,6 @@ impl<B: Backend> State<B> {
|
||||
}
|
||||
}
|
||||
|
||||
// check if the account could exist before any requests to trie
|
||||
if self.db.is_known_null(address) {
|
||||
return Ok(H256::zero());
|
||||
}
|
||||
|
||||
// account is not found in the global cache, get from the DB and insert into local
|
||||
let db = &self.db.as_hash_db();
|
||||
let db = self
|
||||
@ -802,21 +795,19 @@ impl<B: Backend> State<B> {
|
||||
|
||||
/// Get accounts' code.
|
||||
pub fn code(&self, a: &Address) -> TrieResult<Option<Arc<Bytes>>> {
|
||||
self.ensure_cached(a, RequireCache::Code, true, |a| {
|
||||
self.ensure_cached(a, RequireCache::Code, |a| {
|
||||
a.as_ref().map_or(None, |a| a.code().clone())
|
||||
})
|
||||
}
|
||||
|
||||
/// Get an account's code hash.
|
||||
pub fn code_hash(&self, a: &Address) -> TrieResult<Option<H256>> {
|
||||
self.ensure_cached(a, RequireCache::None, true, |a| {
|
||||
a.as_ref().map(|a| a.code_hash())
|
||||
})
|
||||
self.ensure_cached(a, RequireCache::None, |a| a.as_ref().map(|a| a.code_hash()))
|
||||
}
|
||||
|
||||
/// Get accounts' code size.
|
||||
pub fn code_size(&self, a: &Address) -> TrieResult<Option<usize>> {
|
||||
self.ensure_cached(a, RequireCache::CodeSize, true, |a| {
|
||||
self.ensure_cached(a, RequireCache::CodeSize, |a| {
|
||||
a.as_ref().and_then(|a| a.code_size())
|
||||
})
|
||||
}
|
||||
@ -1021,9 +1012,6 @@ impl<B: Backend> State<B> {
|
||||
account.commit_storage(&self.factories.trie, account_db.as_hash_db_mut())?;
|
||||
account.commit_code(account_db.as_hash_db_mut());
|
||||
}
|
||||
if !account.is_empty() {
|
||||
self.db.note_non_null_account(address);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1208,7 +1196,7 @@ impl<B: Backend> State<B> {
|
||||
|m: TrieResult<_>, address| {
|
||||
let mut m = m?;
|
||||
|
||||
let account = self.ensure_cached(&address, RequireCache::Code, true, |acc| {
|
||||
let account = self.ensure_cached(&address, RequireCache::Code, |acc| {
|
||||
acc.map(|acc| {
|
||||
// Merge all modified storage keys.
|
||||
let all_keys = {
|
||||
@ -1324,13 +1312,7 @@ impl<B: Backend> State<B> {
|
||||
/// Check caches for required data
|
||||
/// First searches for account in the local, then the shared cache.
|
||||
/// Populates local cache if nothing found.
|
||||
fn ensure_cached<F, U>(
|
||||
&self,
|
||||
a: &Address,
|
||||
require: RequireCache,
|
||||
check_null: bool,
|
||||
f: F,
|
||||
) -> TrieResult<U>
|
||||
fn ensure_cached<F, U>(&self, a: &Address, require: RequireCache, f: F) -> TrieResult<U>
|
||||
where
|
||||
F: Fn(Option<&Account>) -> U,
|
||||
{
|
||||
@ -1365,11 +1347,6 @@ impl<B: Backend> State<B> {
|
||||
match result {
|
||||
Some(r) => Ok(r?),
|
||||
None => {
|
||||
// first check if it is not in database for sure
|
||||
if check_null && self.db.is_known_null(a) {
|
||||
return Ok(f(None));
|
||||
}
|
||||
|
||||
// not found in the global cache, get from the DB and insert into local
|
||||
let db = &self.db.as_hash_db();
|
||||
let db = self.factories.trie.readonly(db, &self.root)?;
|
||||
@ -1424,15 +1401,11 @@ impl<B: Backend> State<B> {
|
||||
match self.db.get_cached_account(a) {
|
||||
Some(acc) => self.insert_cache(a, AccountEntry::new_clean_cached(acc)),
|
||||
None => {
|
||||
let maybe_acc = if !self.db.is_known_null(a) {
|
||||
let db = &self.db.as_hash_db();
|
||||
let db = self.factories.trie.readonly(db, &self.root)?;
|
||||
let from_rlp =
|
||||
|b: &[u8]| Account::from_rlp(b).expect("decoding db value failed");
|
||||
AccountEntry::new_clean(db.get_with(a, from_rlp)?)
|
||||
} else {
|
||||
AccountEntry::new_clean(None)
|
||||
};
|
||||
let maybe_acc = AccountEntry::new_clean(db.get_with(a, from_rlp)?);
|
||||
self.insert_cache(a, maybe_acc);
|
||||
}
|
||||
}
|
||||
|
@ -22,15 +22,11 @@ use std::{
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use bloom_journal::{Bloom, BloomJournal};
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use db::COL_ACCOUNT_BLOOM;
|
||||
use ethereum_types::{Address, H256};
|
||||
use hash::keccak;
|
||||
use hash_db::HashDB;
|
||||
use journaldb::JournalDB;
|
||||
use keccak_hasher::KeccakHasher;
|
||||
use kvdb::{DBTransaction, DBValue, KeyValueDB};
|
||||
use kvdb::{DBTransaction, DBValue};
|
||||
use lru_cache::LruCache;
|
||||
use memory_cache::MemoryLruCache;
|
||||
use parking_lot::Mutex;
|
||||
@ -38,19 +34,6 @@ use types::BlockNumber;
|
||||
|
||||
use state::{self, Account};
|
||||
|
||||
/// Value used to initialize bloom bitmap size.
|
||||
///
|
||||
/// Bitmap size is the size in bytes (not bits) that will be allocated in memory.
|
||||
pub const ACCOUNT_BLOOM_SPACE: usize = 1048576;
|
||||
|
||||
/// Value used to initialize bloom items count.
|
||||
///
|
||||
/// Items count is an estimation of the maximum number of items to store.
|
||||
pub const DEFAULT_ACCOUNT_PRESET: usize = 1000000;
|
||||
|
||||
/// Key for a value storing amount of hashes
|
||||
pub const ACCOUNT_BLOOM_HASHCOUNT_KEY: &'static [u8] = b"account_hash_count";
|
||||
|
||||
const STATE_CACHE_BLOCKS: usize = 12;
|
||||
|
||||
// The percentage of supplied cache size to go to accounts.
|
||||
@ -116,8 +99,6 @@ pub struct StateDB {
|
||||
code_cache: Arc<Mutex<MemoryLruCache<H256, Arc<Vec<u8>>>>>,
|
||||
/// Local dirty cache.
|
||||
local_cache: Vec<CacheQueueItem>,
|
||||
/// Shared account bloom. Does not handle chain reorganizations.
|
||||
account_bloom: Arc<Mutex<Bloom>>,
|
||||
cache_size: usize,
|
||||
/// Hash of the block on top of which this instance was created or
|
||||
/// `None` if cache is disabled
|
||||
@ -134,7 +115,6 @@ impl StateDB {
|
||||
// TODO: make the cache size actually accurate by moving the account storage cache
|
||||
// into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`.
|
||||
pub fn new(db: Box<dyn JournalDB>, cache_size: usize) -> StateDB {
|
||||
let bloom = Self::load_bloom(&**db.backing());
|
||||
let acc_cache_size = cache_size * ACCOUNT_CACHE_RATIO / 100;
|
||||
let code_cache_size = cache_size - acc_cache_size;
|
||||
let cache_items = acc_cache_size / ::std::mem::size_of::<Option<Account>>();
|
||||
@ -147,7 +127,6 @@ impl StateDB {
|
||||
})),
|
||||
code_cache: Arc::new(Mutex::new(MemoryLruCache::new(code_cache_size))),
|
||||
local_cache: Vec::new(),
|
||||
account_bloom: Arc::new(Mutex::new(bloom)),
|
||||
cache_size: cache_size,
|
||||
parent_hash: None,
|
||||
commit_hash: None,
|
||||
@ -155,56 +134,6 @@ impl StateDB {
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads accounts bloom from the database
|
||||
/// This bloom is used to handle request for the non-existant account fast
|
||||
pub fn load_bloom(db: &dyn KeyValueDB) -> Bloom {
|
||||
let hash_count_entry = db
|
||||
.get(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY)
|
||||
.expect("Low-level database error");
|
||||
|
||||
let hash_count_bytes = match hash_count_entry {
|
||||
Some(bytes) => bytes,
|
||||
None => return Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET),
|
||||
};
|
||||
|
||||
assert_eq!(hash_count_bytes.len(), 1);
|
||||
let hash_count = hash_count_bytes[0];
|
||||
|
||||
let mut bloom_parts = vec![0u64; ACCOUNT_BLOOM_SPACE / 8];
|
||||
let mut key = [0u8; 8];
|
||||
for i in 0..ACCOUNT_BLOOM_SPACE / 8 {
|
||||
LittleEndian::write_u64(&mut key, i as u64);
|
||||
bloom_parts[i] = db
|
||||
.get(COL_ACCOUNT_BLOOM, &key)
|
||||
.expect("low-level database error")
|
||||
.and_then(|val| Some(LittleEndian::read_u64(&val[..])))
|
||||
.unwrap_or(0u64);
|
||||
}
|
||||
|
||||
let bloom = Bloom::from_parts(&bloom_parts, hash_count as u32);
|
||||
trace!(target: "account_bloom", "Bloom is {:?} full, hash functions count = {:?}", bloom.saturation(), hash_count);
|
||||
bloom
|
||||
}
|
||||
|
||||
/// Commit blooms journal to the database transaction
|
||||
pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> io::Result<()> {
|
||||
assert!(journal.hash_functions <= 255);
|
||||
batch.put(
|
||||
COL_ACCOUNT_BLOOM,
|
||||
ACCOUNT_BLOOM_HASHCOUNT_KEY,
|
||||
&[journal.hash_functions as u8],
|
||||
);
|
||||
let mut key = [0u8; 8];
|
||||
let mut val = [0u8; 8];
|
||||
|
||||
for (bloom_part_index, bloom_part_value) in journal.entries {
|
||||
LittleEndian::write_u64(&mut key, bloom_part_index as u64);
|
||||
LittleEndian::write_u64(&mut val, bloom_part_value);
|
||||
batch.put(COL_ACCOUNT_BLOOM, &key, &val);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Journal all recent operations under the given era and ID.
|
||||
pub fn journal_under(
|
||||
&mut self,
|
||||
@ -212,10 +141,6 @@ impl StateDB {
|
||||
now: u64,
|
||||
id: &H256,
|
||||
) -> io::Result<u32> {
|
||||
{
|
||||
let mut bloom_lock = self.account_bloom.lock();
|
||||
Self::commit_bloom(batch, bloom_lock.drain_journal())?;
|
||||
}
|
||||
let records = self.db.journal_under(batch, now, id)?;
|
||||
self.commit_hash = Some(id.clone());
|
||||
self.commit_number = Some(now);
|
||||
@ -365,7 +290,6 @@ impl StateDB {
|
||||
account_cache: self.account_cache.clone(),
|
||||
code_cache: self.code_cache.clone(),
|
||||
local_cache: Vec::new(),
|
||||
account_bloom: self.account_bloom.clone(),
|
||||
cache_size: self.cache_size,
|
||||
parent_hash: None,
|
||||
commit_hash: None,
|
||||
@ -380,7 +304,6 @@ impl StateDB {
|
||||
account_cache: self.account_cache.clone(),
|
||||
code_cache: self.code_cache.clone(),
|
||||
local_cache: Vec::new(),
|
||||
account_bloom: self.account_bloom.clone(),
|
||||
cache_size: self.cache_size,
|
||||
parent_hash: Some(parent.clone()),
|
||||
commit_hash: None,
|
||||
@ -510,19 +433,6 @@ impl state::Backend for StateDB {
|
||||
|
||||
cache.get_mut(hash).map(|code| code.clone())
|
||||
}
|
||||
|
||||
fn note_non_null_account(&self, address: &Address) {
|
||||
trace!(target: "account_bloom", "Note account bloom: {:?}", address);
|
||||
let mut bloom = self.account_bloom.lock();
|
||||
bloom.set(&*keccak(address));
|
||||
}
|
||||
|
||||
fn is_known_null(&self, address: &Address) -> bool {
|
||||
trace!(target: "account_bloom", "Check account bloom: {:?}", address);
|
||||
let bloom = self.account_bloom.lock();
|
||||
let is_null = !bloom.check(&*keccak(address));
|
||||
is_null
|
||||
}
|
||||
}
|
||||
|
||||
/// Sync wrapper for the account.
|
||||
|
@ -47,7 +47,7 @@ pub const TO_V12: ChangeColumns = ChangeColumns {
|
||||
/// Database is assumed to be at default version, when no version file is found.
|
||||
const DEFAULT_VERSION: u32 = 5;
|
||||
/// Current version of database models.
|
||||
const CURRENT_VERSION: u32 = 13;
|
||||
const CURRENT_VERSION: u32 = 16;
|
||||
/// A version of database at which blooms-db was introduced
|
||||
const BLOOMS_DB_VERSION: u32 = 13;
|
||||
/// Defines how many items are migrated to the new version of database at once.
|
||||
|
Loading…
Reference in New Issue
Block a user