Memory usage optimization (#5526)

This commit is contained in:
Arkadiy Paronyan 2017-05-02 11:40:03 +02:00 committed by Gav Wood
parent c39da9643e
commit da2f684f18
6 changed files with 18 additions and 16 deletions

4
Cargo.lock generated
View File

@ -2044,7 +2044,7 @@ dependencies = [
[[package]]
name = "rocksdb"
version = "0.4.5"
source = "git+https://github.com/paritytech/rust-rocksdb#8579e896a98cdeff086392236d411dd4aa141774"
source = "git+https://github.com/paritytech/rust-rocksdb#acd192f6ee017a3e8be704958617349d20ee783b"
dependencies = [
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb-sys 0.3.0 (git+https://github.com/paritytech/rust-rocksdb)",
@ -2053,7 +2053,7 @@ dependencies = [
[[package]]
name = "rocksdb-sys"
version = "0.3.0"
source = "git+https://github.com/paritytech/rust-rocksdb#8579e896a98cdeff086392236d411dd4aa141774"
source = "git+https://github.com/paritytech/rust-rocksdb#acd192f6ee017a3e8be704958617349d20ee783b"
dependencies = [
"gcc 0.3.43 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -123,7 +123,7 @@ impl Light {
return Err(io::Error::new(io::ErrorKind::Other, "Cache file size mismatch"));
}
let num_nodes = cache_size / NODE_BYTES;
let mut nodes: Vec<Node> = Vec::new();
let mut nodes: Vec<Node> = Vec::with_capacity(num_nodes);
nodes.resize(num_nodes, unsafe { mem::uninitialized() });
let buf = unsafe { slice::from_raw_parts_mut(nodes.as_mut_ptr() as *mut u8, cache_size) };
file.read_exact(buf)?;
@ -342,7 +342,6 @@ fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node {
}
fn light_new(block_number: u64) -> Light {
let seed_compute = SeedHashCompute::new();
let seedhash = seed_compute.get_seedhash(block_number);
let cache_size = get_cache_size(block_number);

View File

@ -19,7 +19,7 @@ use std::cmp::max;
const MIN_BC_CACHE_MB: u32 = 4;
const MIN_DB_CACHE_MB: u32 = 2;
const MIN_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 16;
const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 50;
const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 40;
const DEFAULT_TRACE_CACHE_SIZE: u32 = 20;
const DEFAULT_STATE_CACHE_SIZE: u32 = 25;
@ -41,7 +41,7 @@ pub struct CacheConfig {
impl Default for CacheConfig {
fn default() -> Self {
CacheConfig::new(64, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, DEFAULT_STATE_CACHE_SIZE)
CacheConfig::new(32, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, DEFAULT_STATE_CACHE_SIZE)
}
}
@ -113,7 +113,7 @@ mod tests {
let config = CacheConfig::new_with_total_cache_size(200);
assert_eq!(config.db, 140);
assert_eq!(config.blockchain(), 20);
assert_eq!(config.queue(), 50);
assert_eq!(config.queue(), 40);
assert_eq!(config.state(), 30);
assert_eq!(config.jump_tables(), 10);
}
@ -129,6 +129,6 @@ mod tests {
#[test]
fn test_cache_config_default() {
assert_eq!(CacheConfig::default(),
CacheConfig::new(64, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, super::DEFAULT_STATE_CACHE_SIZE));
CacheConfig::new(32, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, super::DEFAULT_STATE_CACHE_SIZE));
}
}

View File

@ -294,13 +294,13 @@ usage! {
or |c: &Config| otry!(c.footprint).pruning.clone(),
flag_pruning_history: u64 = 64u64,
or |c: &Config| otry!(c.footprint).pruning_history.clone(),
flag_pruning_memory: usize = 75usize,
flag_pruning_memory: usize = 32usize,
or |c: &Config| otry!(c.footprint).pruning_memory.clone(),
flag_cache_size_db: u32 = 64u32,
flag_cache_size_db: u32 = 32u32,
or |c: &Config| otry!(c.footprint).cache_size_db.clone(),
flag_cache_size_blocks: u32 = 8u32,
or |c: &Config| otry!(c.footprint).cache_size_blocks.clone(),
flag_cache_size_queue: u32 = 50u32,
flag_cache_size_queue: u32 = 40u32,
or |c: &Config| otry!(c.footprint).cache_size_queue.clone(),
flag_cache_size_state: u32 = 25u32,
or |c: &Config| otry!(c.footprint).cache_size_state.clone(),

View File

@ -1124,7 +1124,7 @@ mod tests {
format: Default::default(),
pruning: Default::default(),
pruning_history: 64,
pruning_memory: 75,
pruning_memory: 32,
compaction: Default::default(),
wal: true,
tracing: Default::default(),
@ -1147,7 +1147,7 @@ mod tests {
file_path: Some("blockchain.json".into()),
pruning: Default::default(),
pruning_history: 64,
pruning_memory: 75,
pruning_memory: 32,
format: Default::default(),
compaction: Default::default(),
wal: true,
@ -1170,7 +1170,7 @@ mod tests {
file_path: Some("state.json".into()),
pruning: Default::default(),
pruning_history: 64,
pruning_memory: 75,
pruning_memory: 32,
format: Default::default(),
compaction: Default::default(),
wal: true,
@ -1195,7 +1195,7 @@ mod tests {
file_path: Some("blockchain.json".into()),
pruning: Default::default(),
pruning_history: 64,
pruning_memory: 75,
pruning_memory: 32,
format: Some(DataFormat::Hex),
compaction: Default::default(),
wal: true,
@ -1231,7 +1231,7 @@ mod tests {
spec: Default::default(),
pruning: Default::default(),
pruning_history: 64,
pruning_memory: 75,
pruning_memory: 32,
daemon: None,
logger_config: Default::default(),
miner_options: Default::default(),

View File

@ -35,6 +35,7 @@ use std::fs::File;
const DB_BACKGROUND_FLUSHES: i32 = 2;
const DB_BACKGROUND_COMPACTIONS: i32 = 2;
const DB_WRITE_BUFFER_SIZE: usize = 2048 * 1000;
/// Required length of prefixes.
pub const PREFIX_LEN: usize = 12;
@ -440,6 +441,7 @@ fn col_config(col: u32, config: &DatabaseConfig) -> Options {
opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction);
opts.set_target_file_size_base(config.compaction.initial_file_size);
opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier);
opts.set_db_write_buffer_size(DB_WRITE_BUFFER_SIZE);
let col_opt = config.columns.map(|_| col);
@ -487,6 +489,7 @@ impl Database {
opts.set_max_open_files(config.max_open_files);
opts.create_if_missing(true);
opts.set_use_fsync(false);
opts.set_db_write_buffer_size(DB_WRITE_BUFFER_SIZE);
opts.set_max_background_flushes(DB_BACKGROUND_FLUSHES);
opts.set_max_background_compactions(DB_BACKGROUND_COMPACTIONS);