From b0a9c1e0fa87eb25a729d47af4ae5d1ae3b838b5 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sat, 25 Mar 2017 15:25:20 +0100 Subject: [PATCH] Limit by entry count --- ethcore/src/snapshot/account.rs | 7 +++---- ethcore/src/snapshot/mod.rs | 5 ++++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index 4f80d61ea..b06c220e7 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -63,15 +63,14 @@ impl CodeState { // walk the account's storage trie, returning an RLP item containing the // account properties and the storage. -pub fn to_fat_rlps(acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet, preferred_size: usize) -> Result, Error> { - const AVERAGE_BYTES_PER_STORAGE_ENTRY: usize = 47; +pub fn to_fat_rlps(acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet, max_storage_items: usize) -> Result, Error> { if acc == &ACC_EMPTY { return Ok(vec![::rlp::NULL_RLP.to_vec()]); } let db = TrieDB::new(acct_db, &acc.storage_root)?; - let chunks = db.iter()?.chunks(preferred_size / AVERAGE_BYTES_PER_STORAGE_ENTRY); + let chunks = db.iter()?.chunks(max_storage_items); let pair_chunks = chunks.into_iter().map(|chunk| chunk.collect()); pair_chunks.pad_using(1, |_| Vec::new(), ).map(|pairs| { let mut stream = RlpStream::new_list(pairs.len()); @@ -253,7 +252,7 @@ mod tests { let thin_rlp = ::rlp::encode(&account); assert_eq!(::rlp::decode::(&thin_rlp), account); - let fat_rlps = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 1000).unwrap(); + let fat_rlps = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 100).unwrap(); let mut root = SHA3_NULL_RLP; let mut restored_account = None; for rlp in fat_rlps { diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 57d044a47..1241152d0 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -83,6 +83,9 @@ mod traits { // Try to have chunks be around 4MB (before compression) const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024; +// Try to have chunks be around 4MB (before compression) +const MAX_STORAGE_ENTRIES_PER_ACCOUNT_RECORD: usize = 80_000; + // How many blocks to include in a snapshot, starting from the head of the chain. const SNAPSHOT_BLOCKS: u64 = 30000; @@ -374,7 +377,7 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex 0)?; }