Limit by entry count

This commit is contained in:
arkpar 2017-03-25 15:25:20 +01:00
parent b52c7bba4b
commit b0a9c1e0fa
2 changed files with 7 additions and 5 deletions

View File

@ -63,15 +63,14 @@ impl CodeState {
// walk the account's storage trie, returning an RLP item containing the // walk the account's storage trie, returning an RLP item containing the
// account properties and the storage. // account properties and the storage.
pub fn to_fat_rlps(acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet<H256>, preferred_size: usize) -> Result<Vec<Bytes>, Error> { pub fn to_fat_rlps(acc: &BasicAccount, acct_db: &AccountDB, used_code: &mut HashSet<H256>, max_storage_items: usize) -> Result<Vec<Bytes>, Error> {
const AVERAGE_BYTES_PER_STORAGE_ENTRY: usize = 47;
if acc == &ACC_EMPTY { if acc == &ACC_EMPTY {
return Ok(vec![::rlp::NULL_RLP.to_vec()]); return Ok(vec![::rlp::NULL_RLP.to_vec()]);
} }
let db = TrieDB::new(acct_db, &acc.storage_root)?; let db = TrieDB::new(acct_db, &acc.storage_root)?;
let chunks = db.iter()?.chunks(preferred_size / AVERAGE_BYTES_PER_STORAGE_ENTRY); let chunks = db.iter()?.chunks(max_storage_items);
let pair_chunks = chunks.into_iter().map(|chunk| chunk.collect()); let pair_chunks = chunks.into_iter().map(|chunk| chunk.collect());
pair_chunks.pad_using(1, |_| Vec::new(), ).map(|pairs| { pair_chunks.pad_using(1, |_| Vec::new(), ).map(|pairs| {
let mut stream = RlpStream::new_list(pairs.len()); let mut stream = RlpStream::new_list(pairs.len());
@ -253,7 +252,7 @@ mod tests {
let thin_rlp = ::rlp::encode(&account); let thin_rlp = ::rlp::encode(&account);
assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp), account); assert_eq!(::rlp::decode::<BasicAccount>(&thin_rlp), account);
let fat_rlps = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 1000).unwrap(); let fat_rlps = to_fat_rlps(&account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 100).unwrap();
let mut root = SHA3_NULL_RLP; let mut root = SHA3_NULL_RLP;
let mut restored_account = None; let mut restored_account = None;
for rlp in fat_rlps { for rlp in fat_rlps {

View File

@ -83,6 +83,9 @@ mod traits {
// Try to have chunks be around 4MB (before compression) // Try to have chunks be around 4MB (before compression)
const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024; const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024;
// Try to have chunks be around 4MB (before compression)
const MAX_STORAGE_ENTRIES_PER_ACCOUNT_RECORD: usize = 80_000;
// How many blocks to include in a snapshot, starting from the head of the chain. // How many blocks to include in a snapshot, starting from the head of the chain.
const SNAPSHOT_BLOCKS: u64 = 30000; const SNAPSHOT_BLOCKS: u64 = 30000;
@ -374,7 +377,7 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter +
let account_db = AccountDB::from_hash(db, account_key_hash); let account_db = AccountDB::from_hash(db, account_key_hash);
let fat_rlps = account::to_fat_rlps(&account, &account_db, &mut used_code, PREFERRED_CHUNK_SIZE)?; let fat_rlps = account::to_fat_rlps(&account, &account_db, &mut used_code, MAX_STORAGE_ENTRIES_PER_ACCOUNT_RECORD)?;
for (i, fat_rlp) in fat_rlps.into_iter().enumerate() { for (i, fat_rlp) in fat_rlps.into_iter().enumerate() {
chunker.push(account_key.clone(), fat_rlp, i > 0)?; chunker.push(account_key.clone(), fat_rlp, i > 0)?;
} }