configurable state cache size
This commit is contained in:
parent
0c7a28779d
commit
745a50dfdf
@ -177,7 +177,7 @@ impl Client {
|
||||
};
|
||||
|
||||
let journal_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE);
|
||||
let mut state_db = StateDB::new(journal_db);
|
||||
let mut state_db = StateDB::new(journal_db, config.state_cache_size);
|
||||
if state_db.journal_db().is_empty() && try!(spec.ensure_db_good(&mut state_db)) {
|
||||
let mut batch = DBTransaction::new(&db);
|
||||
try!(state_db.commit(&mut batch, 0, &spec.genesis_header().hash(), None));
|
||||
@ -691,7 +691,8 @@ impl snapshot::DatabaseRestore for Client {
|
||||
let db = self.db.write();
|
||||
try!(db.restore(new_db));
|
||||
|
||||
*state_db = StateDB::new(journaldb::new(db.clone(), self.pruning, ::db::COL_STATE));
|
||||
let cache_size = state_db.cache_size();
|
||||
*state_db = StateDB::new(journaldb::new(db.clone(), self.pruning, ::db::COL_STATE), cache_size);
|
||||
*chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone()));
|
||||
*tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone());
|
||||
Ok(())
|
||||
|
@ -96,7 +96,7 @@ pub struct ClientConfig {
|
||||
pub pruning: journaldb::Algorithm,
|
||||
/// The name of the client instance.
|
||||
pub name: String,
|
||||
/// State db cache-size if not default
|
||||
/// RocksDB state column cache-size if not default
|
||||
pub db_cache_size: Option<usize>,
|
||||
/// State db compaction profile
|
||||
pub db_compaction: DatabaseCompactionProfile,
|
||||
@ -106,6 +106,8 @@ pub struct ClientConfig {
|
||||
pub mode: Mode,
|
||||
/// Type of block verifier used by client.
|
||||
pub verifier_type: VerifierType,
|
||||
/// State db cache-size.
|
||||
pub state_cache_size: usize,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -289,7 +289,7 @@ pub fn get_temp_state_db() -> GuardedTempResult<StateDB> {
|
||||
let temp = RandomTempPath::new();
|
||||
let db = Database::open(&DatabaseConfig::with_columns(NUM_COLUMNS), temp.as_str()).unwrap();
|
||||
let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE);
|
||||
let state_db = StateDB::new(journal_db);
|
||||
let state_db = StateDB::new(journal_db, 1024 * 1024);
|
||||
GuardedTempResult {
|
||||
_temp: temp,
|
||||
result: Some(state_db)
|
||||
|
@ -23,13 +23,13 @@ use super::super::instructions;
|
||||
|
||||
const CACHE_CODE_ITEMS: usize = 4096;
|
||||
|
||||
/// GLobal cache for EVM interpreter
|
||||
/// Global cache for EVM interpreter
|
||||
pub struct SharedCache {
|
||||
jump_destinations: Mutex<LruCache<H256, Arc<BitSet>>>
|
||||
}
|
||||
|
||||
impl SharedCache {
|
||||
/// Get jump destincations bitmap for a contract.
|
||||
/// Get jump destinations bitmap for a contract.
|
||||
pub fn jump_destinations(&self, code_hash: &H256, code: &[u8]) -> Arc<BitSet> {
|
||||
if code_hash == &SHA3_EMPTY {
|
||||
return Self::find_jump_destinations(code);
|
||||
|
@ -24,8 +24,6 @@ use bloom_journal::{Bloom, BloomJournal};
|
||||
use db::COL_ACCOUNT_BLOOM;
|
||||
use byteorder::{LittleEndian, ByteOrder};
|
||||
|
||||
const STATE_CACHE_ITEMS: usize = 65536;
|
||||
|
||||
pub const ACCOUNT_BLOOM_SPACE: usize = 1048576;
|
||||
pub const DEFAULT_ACCOUNT_PRESET: usize = 1000000;
|
||||
|
||||
@ -33,6 +31,8 @@ pub const ACCOUNT_BLOOM_HASHCOUNT_KEY: &'static [u8] = b"account_hash_count";
|
||||
|
||||
struct AccountCache {
|
||||
/// DB Account cache. `None` indicates that account is known to be missing.
|
||||
// When changing the type of the values here, be sure to update `mem_used` and
|
||||
// `new`.
|
||||
accounts: LruCache<Address, Option<Account>>,
|
||||
}
|
||||
|
||||
@ -48,19 +48,26 @@ pub struct StateDB {
|
||||
cache_overlay: Vec<(Address, Option<Account>)>,
|
||||
is_canon: bool,
|
||||
account_bloom: Arc<Mutex<Bloom>>,
|
||||
cache_size: usize,
|
||||
}
|
||||
|
||||
impl StateDB {
|
||||
|
||||
/// Create a new instance wrapping `JournalDB`
|
||||
pub fn new(db: Box<JournalDB>) -> StateDB {
|
||||
/// Create a new instance wrapping `JournalDB` and the maximum allowed size
|
||||
/// of the LRU cache in bytes. Actual used memory may (read: will) be higher due to bookkeeping.
|
||||
// TODO: make the cache size actually accurate by moving the account storage cache
|
||||
// into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`.
|
||||
pub fn new(db: Box<JournalDB>, cache_size: usize) -> StateDB {
|
||||
let bloom = Self::load_bloom(db.backing());
|
||||
let cache_items = cache_size / ::std::mem::size_of::<Option<Account>>();
|
||||
|
||||
StateDB {
|
||||
db: db,
|
||||
account_cache: Arc::new(Mutex::new(AccountCache { accounts: LruCache::new(STATE_CACHE_ITEMS) })),
|
||||
account_cache: Arc::new(Mutex::new(AccountCache { accounts: LruCache::new(cache_items) })),
|
||||
cache_overlay: Vec::new(),
|
||||
is_canon: false,
|
||||
account_bloom: Arc::new(Mutex::new(bloom)),
|
||||
cache_size: cache_size,
|
||||
}
|
||||
}
|
||||
|
||||
@ -151,6 +158,7 @@ impl StateDB {
|
||||
cache_overlay: Vec::new(),
|
||||
is_canon: false,
|
||||
account_bloom: self.account_bloom.clone(),
|
||||
cache_size: self.cache_size,
|
||||
}
|
||||
}
|
||||
|
||||
@ -162,6 +170,7 @@ impl StateDB {
|
||||
cache_overlay: Vec::new(),
|
||||
is_canon: true,
|
||||
account_bloom: self.account_bloom.clone(),
|
||||
cache_size: self.cache_size,
|
||||
}
|
||||
}
|
||||
|
||||
@ -172,7 +181,8 @@ impl StateDB {
|
||||
|
||||
/// Heap size used.
|
||||
pub fn mem_used(&self) -> usize {
|
||||
self.db.mem_used() //TODO: + self.account_cache.lock().heap_size_of_children()
|
||||
// TODO: account for LRU-cache overhead; this is a close approximation.
|
||||
self.db.mem_used() + self.account_cache.lock().accounts.len() * ::std::mem::size_of::<Option<Account>>()
|
||||
}
|
||||
|
||||
/// Returns underlying `JournalDB`.
|
||||
@ -228,5 +238,10 @@ impl StateDB {
|
||||
let mut cache = self.account_cache.lock();
|
||||
cache.accounts.get_mut(a).map(|c| f(c.as_mut()))
|
||||
}
|
||||
|
||||
/// Query how much memory is set aside for the accounts cache (in bytes).
|
||||
pub fn cache_size(&self) -> usize {
|
||||
self.cache_size
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -346,7 +346,7 @@ pub fn get_temp_state() -> GuardedTempResult<State> {
|
||||
pub fn get_temp_state_db_in(path: &Path) -> StateDB {
|
||||
let db = new_db(path.to_str().expect("Only valid utf8 paths for tests."));
|
||||
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, COL_STATE);
|
||||
StateDB::new(journal_db)
|
||||
StateDB::new(journal_db, 5 * 1024 * 1024)
|
||||
}
|
||||
|
||||
pub fn get_temp_state_in(path: &Path) -> State {
|
||||
|
@ -21,15 +21,13 @@ const MIN_DB_CACHE_MB: u32 = 2;
|
||||
const MIN_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 16;
|
||||
const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 50;
|
||||
const DEFAULT_TRACE_CACHE_SIZE: u32 = 20;
|
||||
const DEFAULT_STATE_CACHE_SIZE: u32 = 25;
|
||||
|
||||
/// Configuration for application cache sizes.
|
||||
/// All values are represented in MB.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct CacheConfig {
|
||||
/// Size of database cache set using option `set_block_cache_size_mb`
|
||||
/// 50% is blockchain
|
||||
/// 25% is tracing
|
||||
/// 25% is state
|
||||
/// Size of rocksDB cache. Almost all goes to the state column.
|
||||
db: u32,
|
||||
/// Size of blockchain cache.
|
||||
blockchain: u32,
|
||||
@ -37,11 +35,13 @@ pub struct CacheConfig {
|
||||
queue: u32,
|
||||
/// Size of traces cache.
|
||||
traces: u32,
|
||||
/// Size of the state cache.
|
||||
state: u32,
|
||||
}
|
||||
|
||||
impl Default for CacheConfig {
|
||||
fn default() -> Self {
|
||||
CacheConfig::new(64, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB)
|
||||
CacheConfig::new(64, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, DEFAULT_STATE_CACHE_SIZE)
|
||||
}
|
||||
}
|
||||
|
||||
@ -49,26 +49,28 @@ impl CacheConfig {
|
||||
/// Creates new cache config with cumulative size equal `total`.
|
||||
pub fn new_with_total_cache_size(total: u32) -> Self {
|
||||
CacheConfig {
|
||||
db: total * 7 / 8,
|
||||
blockchain: total / 8,
|
||||
db: total * 7 / 10,
|
||||
blockchain: total / 10,
|
||||
queue: DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB,
|
||||
traces: DEFAULT_TRACE_CACHE_SIZE,
|
||||
state: total * 2 / 10,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates new cache config with gitven details.
|
||||
pub fn new(db: u32, blockchain: u32, queue: u32) -> Self {
|
||||
pub fn new(db: u32, blockchain: u32, queue: u32, state: u32) -> Self {
|
||||
CacheConfig {
|
||||
db: db,
|
||||
blockchain: blockchain,
|
||||
queue: queue,
|
||||
traces: DEFAULT_TRACE_CACHE_SIZE,
|
||||
state: state,
|
||||
}
|
||||
}
|
||||
|
||||
/// Size of db cache for blockchain.
|
||||
pub fn db_blockchain_cache_size(&self) -> u32 {
|
||||
max(MIN_DB_CACHE_MB, self.blockchain / 4)
|
||||
max(MIN_DB_CACHE_MB, self.db / 4)
|
||||
}
|
||||
|
||||
/// Size of db cache for state.
|
||||
@ -90,6 +92,11 @@ impl CacheConfig {
|
||||
pub fn traces(&self) -> u32 {
|
||||
self.traces
|
||||
}
|
||||
|
||||
/// Size of the state cache.
|
||||
pub fn state(&self) -> u32 {
|
||||
self.state
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -99,21 +106,23 @@ mod tests {
|
||||
#[test]
|
||||
fn test_cache_config_constructor() {
|
||||
let config = CacheConfig::new_with_total_cache_size(200);
|
||||
assert_eq!(config.db, 175);
|
||||
assert_eq!(config.blockchain(), 25);
|
||||
assert_eq!(config.db, 140);
|
||||
assert_eq!(config.blockchain(), 20);
|
||||
assert_eq!(config.queue(), 50);
|
||||
assert_eq!(config.state(), 40);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cache_config_db_cache_sizes() {
|
||||
let config = CacheConfig::new_with_total_cache_size(400);
|
||||
assert_eq!(config.db, 350);
|
||||
assert_eq!(config.db_blockchain_cache_size(), 12);
|
||||
assert_eq!(config.db_state_cache_size(), 262);
|
||||
assert_eq!(config.db, 280);
|
||||
assert_eq!(config.db_blockchain_cache_size(), 70);
|
||||
assert_eq!(config.db_state_cache_size(), 210);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cache_config_default() {
|
||||
assert_eq!(CacheConfig::default(), CacheConfig::new(64, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB));
|
||||
assert_eq!(CacheConfig::default(),
|
||||
CacheConfig::new(64, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, super::DEFAULT_STATE_CACHE_SIZE));
|
||||
}
|
||||
}
|
||||
|
@ -79,6 +79,7 @@ pruning = "auto"
|
||||
cache_size_db = 64
|
||||
cache_size_blocks = 8
|
||||
cache_size_queue = 50
|
||||
cache_size_state = 25
|
||||
cache_size = 128 # Overrides above caches with total size
|
||||
fast_and_loose = false
|
||||
db_compaction = "ssd"
|
||||
|
@ -48,6 +48,7 @@ pruning = "fast"
|
||||
cache_size_db = 128
|
||||
cache_size_blocks = 16
|
||||
cache_size_queue = 100
|
||||
cache_size_state = 25
|
||||
db_compaction = "ssd"
|
||||
fat_db = "off"
|
||||
|
||||
|
@ -211,6 +211,8 @@ usage! {
|
||||
or |c: &Config| otry!(c.footprint).cache_size_blocks.clone(),
|
||||
flag_cache_size_queue: u32 = 50u32,
|
||||
or |c: &Config| otry!(c.footprint).cache_size_queue.clone(),
|
||||
flag_cache_size_state: u32 = 25u32,
|
||||
or |c: &Config| otry!(c.footprint).cache_size_state.clone(),
|
||||
flag_cache_size: Option<u32> = None,
|
||||
or |c: &Config| otry!(c.footprint).cache_size.clone().map(Some),
|
||||
flag_fast_and_loose: bool = false,
|
||||
@ -361,6 +363,7 @@ struct Footprint {
|
||||
cache_size_db: Option<u32>,
|
||||
cache_size_blocks: Option<u32>,
|
||||
cache_size_queue: Option<u32>,
|
||||
cache_size_state: Option<u32>,
|
||||
db_compaction: Option<String>,
|
||||
fat_db: Option<String>,
|
||||
}
|
||||
@ -532,6 +535,7 @@ mod tests {
|
||||
flag_cache_size_db: 64u32,
|
||||
flag_cache_size_blocks: 8u32,
|
||||
flag_cache_size_queue: 50u32,
|
||||
flag_cache_size_state: 25u32,
|
||||
flag_cache_size: Some(128),
|
||||
flag_fast_and_loose: false,
|
||||
flag_db_compaction: "ssd".into(),
|
||||
@ -686,6 +690,7 @@ mod tests {
|
||||
cache_size_db: Some(128),
|
||||
cache_size_blocks: Some(16),
|
||||
cache_size_queue: Some(100),
|
||||
cache_size_state: Some(25),
|
||||
db_compaction: Some("ssd".into()),
|
||||
fat_db: Some("off".into()),
|
||||
}),
|
||||
|
@ -209,6 +209,8 @@ Footprint Options:
|
||||
megabytes (default: {flag_cache_size_blocks}).
|
||||
--cache-size-queue MB Specify the maximum size of memory to use for block
|
||||
queue (default: {flag_cache_size_queue}).
|
||||
--cache-size-state MB Specify the maximum size of memory to use for
|
||||
the state cache (default: {flag_cache_size_state}).
|
||||
--cache-size MB Set total amount of discretionary memory to use for
|
||||
the entire system, overrides other cache and queue
|
||||
options.a (default: {flag_cache_size:?})
|
||||
|
@ -291,7 +291,12 @@ impl Configuration {
|
||||
fn cache_config(&self) -> CacheConfig {
|
||||
match self.args.flag_cache_size.or(self.args.flag_cache) {
|
||||
Some(size) => CacheConfig::new_with_total_cache_size(size),
|
||||
None => CacheConfig::new(self.args.flag_cache_size_db, self.args.flag_cache_size_blocks, self.args.flag_cache_size_queue),
|
||||
None => CacheConfig::new(
|
||||
self.args.flag_cache_size_db,
|
||||
self.args.flag_cache_size_blocks,
|
||||
self.args.flag_cache_size_queue,
|
||||
self.args.flag_cache_size_state,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -215,6 +215,7 @@ pub fn to_client_config(
|
||||
client_config.tracing.max_cache_size = cache_config.traces() as usize * mb;
|
||||
// in bytes
|
||||
client_config.tracing.pref_cache_size = cache_config.traces() as usize * 3 / 4 * mb;
|
||||
client_config.state_cache_size = cache_config.state() as usize;
|
||||
|
||||
client_config.mode = mode;
|
||||
client_config.tracing.enabled = tracing;
|
||||
|
Loading…
Reference in New Issue
Block a user