bloomfilter reset_chain_head

This commit is contained in:
debris 2016-02-12 02:03:04 +01:00
parent 160c52a14b
commit b73d528365
2 changed files with 61 additions and 18 deletions

View File

@ -522,25 +522,13 @@ impl BlockChain {
}); });
} }
// update block blooms
let blooms: Vec<H2048> = receipts.iter().map(|r| r.log_bloom.clone()).collect();
let modified_blooms = {
let filter = ChainFilter::new(self, self.bloom_index_size, self.bloom_levels);
let bloom = blooms.iter().fold(H2048::new(), | ref acc, b | acc | b);
filter.add_bloom(&bloom, header.number() as usize)
};
for (bloom_index, bloom) in modified_blooms.into_iter() { // save blooms (is it really required?). maybe store receipt whole instead?
let location = self.blocks_bloom_location(&bloom_index); //let blooms: Vec<H2048> = receipts.iter().map(|r| r.log_bloom.clone()).collect();
let mut blocks_blooms = self.blocks_blooms(&location.hash).unwrap_or_else(BlocksBlooms::new); //batch.put_extras(&hash, &BlockLogBlooms {
blocks_blooms.blooms[location.index] = bloom; //blooms: blooms
batch.put_extras(&location.hash, &blocks_blooms); //});
}
batch.put_extras(&hash, &BlockLogBlooms {
blooms: blooms
});
// if it's not new best block, just return // if it's not new best block, just return
if !is_new_best { if !is_new_best {
@ -556,7 +544,21 @@ impl BlockChain {
match route.blocks.len() { match route.blocks.len() {
// its our parent // its our parent
1 => batch.put_extras(&header.number(), &hash), 1 => {
// update block blooms
let modified_blooms = ChainFilter::new(self, self.bloom_index_size, self.bloom_levels)
.add_bloom(&header.log_bloom(), header.number() as usize);
for (bloom_index, bloom) in modified_blooms.into_iter() {
let location = self.blocks_bloom_location(&bloom_index);
let mut blocks_blooms = self.blocks_blooms(&location.hash).unwrap_or_else(BlocksBlooms::new);
blocks_blooms.blooms[location.index] = bloom;
batch.put_extras(&location.hash, &blocks_blooms);
}
batch.put_extras(&header.number(), &hash)
},
// it is a fork // it is a fork
i if i > 1 => { i if i > 1 => {
let ancestor_number = self.block_number(&route.ancestor).unwrap(); let ancestor_number = self.block_number(&route.ancestor).unwrap();
@ -564,6 +566,8 @@ impl BlockChain {
for (index, hash) in route.blocks.iter().skip(route.index).enumerate() { for (index, hash) in route.blocks.iter().skip(route.index).enumerate() {
batch.put_extras(&(start_number + index as BlockNumber), hash); batch.put_extras(&(start_number + index as BlockNumber), hash);
} }
// TODO: replace blooms from start_number to current
}, },
// route.blocks.len() could be 0 only if inserted block is best block, // route.blocks.len() could be 0 only if inserted block is best block,
// and this is not possible at this stage // and this is not possible at this stage

View File

@ -307,6 +307,45 @@ impl<'a, D> ChainFilter<'a, D> where D: FilterDataSource
result result
} }
/// Resets blooms at level 0 and forces rebuild on higher levels.
pub fn reset_chain_head(&self, blooms: &[H2048], block_number: usize, old_highest_block: usize) -> HashMap<BloomIndex, H2048> {
let mut result: HashMap<BloomIndex, H2048> = HashMap::new();
// insert all new blooms at level 0
for (i, bloom) in blooms.iter().enumerate() {
result.insert(self.bloom_index(block_number + i, 0), bloom.clone());
}
// reset the rest of blooms
for reset_number in block_number + blooms.len()..old_highest_block {
result.insert(self.bloom_index(reset_number, 0), H2048::new());
}
for level in 1..self.levels() {
for i in 0..blooms.len() {
let index = self.bloom_index(block_number + i, level);
let new_bloom = {
// use new blooms before db blooms where necessary
let bloom_at = | index | { result.get(&index).cloned().or_else(|| self.data_source.bloom_at_index(&index)) };
self.lower_level_bloom_indexes(&index)
.into_iter()
// get blooms
.map(bloom_at)
// filter existing ones
.filter_map(|b| b)
// BitOr all of them
.fold(H2048::new(), |acc, bloom| acc | bloom)
};
result.insert(index, new_bloom);
}
}
result
}
/// Sets lowest level bloom to 0 and forces rebuild on higher levels. /// Sets lowest level bloom to 0 and forces rebuild on higher levels.
pub fn clear_bloom(&self, block_number: usize) -> HashMap<BloomIndex, H2048> { pub fn clear_bloom(&self, block_number: usize) -> HashMap<BloomIndex, H2048> {
self.reset_bloom(&H2048::new(), block_number) self.reset_bloom(&H2048::new(), block_number)