2015-11-28 21:42:50 +01:00
|
|
|
//! Multilevel blockchain bloom filter.
|
2015-11-28 22:43:16 +01:00
|
|
|
//!
|
|
|
|
//! ```
|
|
|
|
//! extern crate ethcore_util as util;
|
|
|
|
//! use std::str::FromStr;
|
|
|
|
//! use util::chainfilter::*;
|
|
|
|
//! use util::sha3::*;
|
|
|
|
//! use util::hash::*;
|
|
|
|
//!
|
|
|
|
//! fn main() {
|
|
|
|
//! let (index_size, bloom_levels) = (16, 3);
|
|
|
|
//! let mut cache = MemoryCache::new();
|
|
|
|
//!
|
|
|
|
//! let address = Address::from_str("ef2d6d194084c2de36e0dabfce45d046b37d1106").unwrap();
|
|
|
|
//!
|
|
|
|
//! // borrow cache for reading inside the scope
|
|
|
|
//! let modified_blooms = {
|
|
|
|
//! let filter = ChainFilter::new(&cache, index_size, bloom_levels);
|
|
|
|
//! let block_number = 39;
|
|
|
|
//! let mut bloom = H2048::new();
|
|
|
|
//! bloom.shift_bloom(&address.sha3());
|
|
|
|
//! filter.add_bloom(&bloom, block_number)
|
|
|
|
//! };
|
|
|
|
//!
|
|
|
|
//! // number of updated blooms is equal number of levels
|
|
|
|
//! assert_eq!(modified_blooms.len(), bloom_levels as usize);
|
|
|
|
//!
|
|
|
|
//! // lets inserts modified blooms into the cache
|
|
|
|
//! cache.insert_blooms(modified_blooms);
|
|
|
|
//!
|
|
|
|
//! // borrow cache for another reading operations
|
|
|
|
//! {
|
|
|
|
//! let filter = ChainFilter::new(&cache, index_size, bloom_levels);
|
|
|
|
//! let blocks = filter.blocks_with_address(&address, 10, 40);
|
|
|
|
//! assert_eq!(blocks.len(), 1);
|
|
|
|
//! assert_eq!(blocks[0], 39);
|
|
|
|
//! }
|
|
|
|
//! }
|
|
|
|
//! ```
|
|
|
|
//!
|
2015-11-28 19:11:04 +01:00
|
|
|
use std::collections::{HashMap};
|
2015-11-28 17:08:38 +01:00
|
|
|
use hash::*;
|
|
|
|
use sha3::*;
|
|
|
|
|
2015-11-28 21:42:50 +01:00
|
|
|
/// Represents bloom index in cache
|
|
|
|
///
|
|
|
|
/// On cache level 0, every block bloom is represented by different index.
|
|
|
|
/// On higher cache levels, multiple block blooms are represented by one
|
|
|
|
/// index. Their `BloomIndex` can be created from block number and given level.
|
|
|
|
#[derive(Eq, PartialEq, Hash, Clone, Debug)]
|
|
|
|
pub struct BloomIndex {
|
|
|
|
pub level: u8,
|
|
|
|
pub index: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl BloomIndex {
|
|
|
|
/// Default constructor for `BloomIndex`
|
|
|
|
pub fn new(level: u8, index: usize) -> BloomIndex {
|
|
|
|
BloomIndex {
|
|
|
|
level: level,
|
|
|
|
index: index,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Types implementing this trait should provide read access for bloom filters database.
|
|
|
|
pub trait FilterDataSource {
|
|
|
|
/// returns reference to log at given position if it exists
|
|
|
|
fn bloom_at_index(&self, index: &BloomIndex) -> Option<&H2048>;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// In memory cache for blooms.
|
|
|
|
///
|
|
|
|
/// Stores all blooms in HashMap, which indexes them by `BloomIndex`.
|
2015-11-28 17:08:38 +01:00
|
|
|
pub struct MemoryCache {
|
|
|
|
blooms: HashMap<BloomIndex, H2048>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl MemoryCache {
|
2015-11-28 21:42:50 +01:00
|
|
|
/// Default constructor for MemoryCache
|
2015-11-28 17:08:38 +01:00
|
|
|
pub fn new() -> MemoryCache {
|
|
|
|
MemoryCache { blooms: HashMap::new() }
|
|
|
|
}
|
|
|
|
|
2015-11-28 19:11:04 +01:00
|
|
|
/// inserts all blooms into cache
|
|
|
|
///
|
2015-11-28 19:48:41 +01:00
|
|
|
/// if bloom at given index already exists, overwrites it
|
2015-11-28 17:08:38 +01:00
|
|
|
pub fn insert_blooms(&mut self, blooms: HashMap<BloomIndex, H2048>) {
|
|
|
|
self.blooms.extend(blooms);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl FilterDataSource for MemoryCache {
|
|
|
|
fn bloom_at_index(&self, index: &BloomIndex) -> Option<&H2048> {
|
|
|
|
self.blooms.get(index)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-28 21:42:50 +01:00
|
|
|
/// Should be used for search operations on blockchain.
|
2015-11-28 17:08:38 +01:00
|
|
|
pub struct ChainFilter<'a, D>
|
|
|
|
where D: FilterDataSource + 'a
|
|
|
|
{
|
|
|
|
data_source: &'a D,
|
|
|
|
index_size: usize,
|
2015-11-28 20:32:46 +01:00
|
|
|
level_sizes: Vec<usize>,
|
2015-11-28 17:08:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, D> ChainFilter<'a, D> where D: FilterDataSource
|
|
|
|
{
|
2015-11-28 21:42:50 +01:00
|
|
|
/// Creates new filter instance.
|
|
|
|
///
|
|
|
|
/// Borrows `FilterDataSource` for reading.
|
2015-11-28 17:08:38 +01:00
|
|
|
pub fn new(data_source: &'a D, index_size: usize, levels: u8) -> Self {
|
2015-11-28 20:32:46 +01:00
|
|
|
if levels == 0 {
|
2015-11-28 21:54:41 +01:00
|
|
|
panic!("ChainFilter requires at least 1 level");
|
2015-11-28 20:32:46 +01:00
|
|
|
}
|
|
|
|
|
2015-11-28 17:08:38 +01:00
|
|
|
let mut filter = ChainFilter {
|
|
|
|
data_source: data_source,
|
|
|
|
index_size: index_size,
|
2015-12-04 11:40:54 +01:00
|
|
|
// 0 level has always a size of 1
|
|
|
|
level_sizes: vec![1]
|
2015-11-28 17:08:38 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
// cache level sizes, so we do not have to calculate them all the time
|
2015-12-04 11:40:54 +01:00
|
|
|
// eg. if levels == 3, index_size = 16
|
|
|
|
// level_sizes = [1, 16, 256]
|
|
|
|
let additional: Vec<usize> = (1..).into_iter()
|
|
|
|
.scan(1, |acc, _| {
|
|
|
|
*acc = *acc * index_size;
|
|
|
|
Some(*acc)
|
|
|
|
})
|
|
|
|
.take(levels as usize - 1)
|
|
|
|
.collect();
|
|
|
|
filter.level_sizes.extend(additional);
|
2015-11-28 17:08:38 +01:00
|
|
|
|
|
|
|
filter
|
|
|
|
}
|
|
|
|
|
|
|
|
/// unsafely get level size
|
|
|
|
fn level_size(&self, level: u8) -> usize {
|
2015-11-28 20:32:46 +01:00
|
|
|
self.level_sizes[level as usize]
|
2015-11-28 17:08:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// converts block number and level to `BloomIndex`
|
|
|
|
fn bloom_index(&self, block_number: usize, level: u8) -> BloomIndex {
|
|
|
|
BloomIndex {
|
|
|
|
level: level,
|
|
|
|
index: block_number / self.level_size(level),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// return bloom which are dependencies for given index
|
2015-11-28 19:11:04 +01:00
|
|
|
///
|
|
|
|
/// bloom indexes are ordered from lowest to highest
|
|
|
|
fn lower_level_bloom_indexes(&self, index: &BloomIndex) -> Vec<BloomIndex> {
|
2015-11-28 21:11:10 +01:00
|
|
|
// this is the lowest level
|
2015-11-28 17:08:38 +01:00
|
|
|
if index.level == 0 {
|
2015-11-28 21:11:10 +01:00
|
|
|
return vec![];
|
2015-11-28 17:08:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
let new_level = index.level - 1;
|
|
|
|
let offset = self.index_size * index.index;
|
|
|
|
|
2015-11-28 21:11:10 +01:00
|
|
|
(0..self.index_size).map(|i| BloomIndex::new(new_level, offset + i)).collect()
|
2015-11-28 17:08:38 +01:00
|
|
|
}
|
2015-11-28 19:11:04 +01:00
|
|
|
|
2015-11-28 20:32:46 +01:00
|
|
|
/// return number of levels
|
|
|
|
fn levels(&self) -> u8 {
|
|
|
|
self.level_sizes.len() as u8
|
|
|
|
}
|
|
|
|
|
2015-11-28 19:11:04 +01:00
|
|
|
/// returns max filter level
|
|
|
|
fn max_level(&self) -> u8 {
|
2015-11-28 20:32:46 +01:00
|
|
|
self.level_sizes.len() as u8 - 1
|
2015-11-28 19:11:04 +01:00
|
|
|
}
|
|
|
|
|
2015-11-28 21:42:50 +01:00
|
|
|
/// internal function which does bloom search recursively
|
2015-11-28 20:44:38 +01:00
|
|
|
fn blocks(&self, bloom: &H2048, from_block: usize, to_block: usize, level: u8, offset: usize) -> Option<Vec<usize>> {
|
2015-11-28 19:11:04 +01:00
|
|
|
let index = self.bloom_index(offset, level);
|
|
|
|
|
2015-11-28 20:44:38 +01:00
|
|
|
match self.data_source.bloom_at_index(&index) {
|
|
|
|
None => return None,
|
2015-11-28 19:11:04 +01:00
|
|
|
Some(level_bloom) => match level {
|
2015-11-28 20:00:14 +01:00
|
|
|
// if we are on the lowest level
|
|
|
|
// take the value, exclude to_block
|
2015-11-28 20:44:38 +01:00
|
|
|
0 if offset < to_block => return Some(vec![offset]),
|
|
|
|
// return None if it is is equal to to_block
|
|
|
|
0 => return None,
|
|
|
|
// return None if current level doesnt contain given bloom
|
|
|
|
_ if !level_bloom.contains(bloom) => return None,
|
|
|
|
// continue processing && go down
|
|
|
|
_ => ()
|
2015-11-28 19:11:04 +01:00
|
|
|
}
|
2015-11-28 20:00:14 +01:00
|
|
|
};
|
|
|
|
|
2015-11-28 20:44:38 +01:00
|
|
|
let level_size = self.level_size(level - 1);
|
|
|
|
let from_index = self.bloom_index(from_block, level - 1);
|
|
|
|
let to_index = self.bloom_index(to_block, level - 1);
|
|
|
|
let res: Vec<usize> = self.lower_level_bloom_indexes(&index).into_iter()
|
|
|
|
// chose only blooms in range
|
|
|
|
.filter(|li| li.index >= from_index.index && li.index <= to_index.index)
|
|
|
|
// map them to offsets
|
|
|
|
.map(|li| li.index * level_size)
|
|
|
|
// get all blocks that may contain our bloom
|
|
|
|
.map(|off| self.blocks(bloom, from_block, to_block, level - 1, off))
|
|
|
|
// filter existing ones
|
|
|
|
.filter_map(|x| x)
|
2015-11-28 21:11:10 +01:00
|
|
|
// flatten nested structures
|
2015-11-28 20:44:38 +01:00
|
|
|
.flat_map(|v| v)
|
|
|
|
.collect();
|
|
|
|
Some(res)
|
2015-11-28 19:11:04 +01:00
|
|
|
}
|
2015-11-28 17:08:38 +01:00
|
|
|
|
2015-11-28 21:42:50 +01:00
|
|
|
/// Adds new bloom to all filter levels
|
|
|
|
pub fn add_bloom(&self, bloom: &H2048, block_number: usize) -> HashMap<BloomIndex, H2048> {
|
2015-11-28 17:08:38 +01:00
|
|
|
let mut result: HashMap<BloomIndex, H2048> = HashMap::new();
|
2015-11-28 17:10:14 +01:00
|
|
|
|
2015-11-28 20:32:46 +01:00
|
|
|
for level in 0..self.levels() {
|
2015-11-28 17:10:14 +01:00
|
|
|
let bloom_index = self.bloom_index(block_number, level);
|
2015-11-28 17:08:38 +01:00
|
|
|
let new_bloom = match self.data_source.bloom_at_index(&bloom_index) {
|
|
|
|
Some(old_bloom) => old_bloom | bloom,
|
2015-11-28 17:10:14 +01:00
|
|
|
None => bloom.clone(),
|
2015-11-28 17:08:38 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
result.insert(bloom_index, new_bloom);
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2015-11-28 21:42:50 +01:00
|
|
|
/// Adds new blooms starting from block number.
|
|
|
|
pub fn add_blooms(&self, blooms: &[H2048], block_number: usize) -> HashMap<BloomIndex, H2048> {
|
2015-11-28 17:08:38 +01:00
|
|
|
let mut result: HashMap<BloomIndex, H2048> = HashMap::new();
|
|
|
|
|
2015-11-28 20:32:46 +01:00
|
|
|
for level in 0..self.levels() {
|
2015-11-28 17:08:38 +01:00
|
|
|
for i in 0..blooms.len() {
|
|
|
|
let bloom_index = self.bloom_index(block_number + i, level);
|
|
|
|
let is_new_bloom = match result.get_mut(&bloom_index) {
|
|
|
|
|
|
|
|
// it was already modified
|
2015-11-28 17:10:14 +01:00
|
|
|
Some(to_shift) => {
|
2015-11-28 17:08:38 +01:00
|
|
|
*to_shift = &blooms[i] | to_shift;
|
|
|
|
false
|
2015-11-28 17:10:14 +01:00
|
|
|
}
|
|
|
|
None => true,
|
2015-11-28 17:08:38 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
// it hasn't been modified yet
|
|
|
|
if is_new_bloom {
|
|
|
|
let new_bloom = match self.data_source.bloom_at_index(&bloom_index) {
|
|
|
|
Some(old_bloom) => old_bloom | &blooms[i],
|
2015-11-28 17:10:14 +01:00
|
|
|
None => blooms[i].clone(),
|
2015-11-28 17:08:38 +01:00
|
|
|
};
|
|
|
|
result.insert(bloom_index, new_bloom);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2015-11-28 21:42:50 +01:00
|
|
|
/// Resets bloom at level 0 and forces rebuild on higher levels.
|
|
|
|
pub fn reset_bloom(&self, bloom: &H2048, block_number: usize) -> HashMap<BloomIndex, H2048> {
|
2015-11-28 17:08:38 +01:00
|
|
|
let mut result: HashMap<BloomIndex, H2048> = HashMap::new();
|
|
|
|
|
|
|
|
let mut reset_index = self.bloom_index(block_number, 0);
|
|
|
|
result.insert(reset_index.clone(), bloom.clone());
|
2015-11-28 17:10:14 +01:00
|
|
|
|
2015-11-28 20:32:46 +01:00
|
|
|
for level in 1..self.levels() {
|
2015-11-28 17:08:38 +01:00
|
|
|
let index = self.bloom_index(block_number, level);
|
2015-11-28 20:32:46 +01:00
|
|
|
// get all bloom indexes that were used to construct this bloom
|
2015-11-28 17:08:38 +01:00
|
|
|
let lower_indexes = self.lower_level_bloom_indexes(&index);
|
|
|
|
let new_bloom = lower_indexes.into_iter()
|
2015-11-28 21:42:50 +01:00
|
|
|
// skip reseted one
|
|
|
|
.filter(|li| li != &reset_index)
|
|
|
|
// get blooms for these indexes
|
|
|
|
.map(|li| self.data_source.bloom_at_index(&li))
|
|
|
|
// filter existing ones
|
|
|
|
.filter_map(|b| b)
|
|
|
|
// BitOr all of them
|
|
|
|
.fold(H2048::new(), |acc, bloom| &acc | bloom);
|
2015-11-28 17:10:14 +01:00
|
|
|
|
2015-11-28 17:08:38 +01:00
|
|
|
reset_index = index.clone();
|
|
|
|
result.insert(index, &new_bloom | bloom);
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2015-11-28 21:42:50 +01:00
|
|
|
/// Sets lowest level bloom to 0 and forces rebuild on higher levels.
|
|
|
|
pub fn clear_bloom(&self, block_number: usize) -> HashMap<BloomIndex, H2048> {
|
2015-11-28 17:08:38 +01:00
|
|
|
self.reset_bloom(&H2048::new(), block_number)
|
|
|
|
}
|
|
|
|
|
2015-11-28 21:42:50 +01:00
|
|
|
/// Returns numbers of blocks that may contain Address.
|
|
|
|
pub fn blocks_with_address(&self, address: &Address, from_block: usize, to_block: usize) -> Vec<usize> {
|
2015-11-28 17:08:38 +01:00
|
|
|
let mut bloom = H2048::new();
|
|
|
|
bloom.shift_bloom(&address.sha3());
|
|
|
|
self.blocks_with_bloom(&bloom, from_block, to_block)
|
|
|
|
}
|
|
|
|
|
2015-11-28 21:42:50 +01:00
|
|
|
/// Returns numbers of blocks that may contain Topic.
|
2015-11-28 22:43:16 +01:00
|
|
|
pub fn blocks_with_topic(&self, topic: &H256, from_block: usize, to_block: usize) -> Vec<usize> {
|
2015-11-28 17:08:38 +01:00
|
|
|
let mut bloom = H2048::new();
|
|
|
|
bloom.shift_bloom(&topic.sha3());
|
|
|
|
self.blocks_with_bloom(&bloom, from_block, to_block)
|
|
|
|
}
|
|
|
|
|
2015-11-28 21:42:50 +01:00
|
|
|
/// Returns numbers of blocks that may log bloom.
|
|
|
|
pub fn blocks_with_bloom(&self, bloom: &H2048, from_block: usize, to_block: usize) -> Vec<usize> {
|
2015-11-28 19:11:04 +01:00
|
|
|
let mut result = vec![];
|
|
|
|
// lets start from highest level
|
|
|
|
let max_level = self.max_level();
|
|
|
|
let level_size = self.level_size(max_level);
|
|
|
|
let from_index = self.bloom_index(from_block, max_level);
|
|
|
|
let to_index = self.bloom_index(to_block, max_level);
|
|
|
|
|
|
|
|
for index in from_index.index..to_index.index + 1 {
|
|
|
|
// offset will be used to calculate where we are right now
|
|
|
|
let offset = level_size * index;
|
|
|
|
|
|
|
|
// go doooown!
|
2015-11-28 20:44:38 +01:00
|
|
|
match self.blocks(bloom, from_block, to_block, max_level, offset) {
|
|
|
|
Some(blocks) => result.extend(blocks),
|
|
|
|
None => ()
|
|
|
|
};
|
2015-11-28 19:11:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
result
|
2015-11-28 17:08:38 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use hash::*;
|
|
|
|
use chainfilter::*;
|
2015-11-28 19:11:04 +01:00
|
|
|
use sha3::*;
|
|
|
|
use std::str::FromStr;
|
2015-11-28 17:08:38 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_level_size() {
|
|
|
|
let cache = MemoryCache::new();
|
|
|
|
let filter = ChainFilter::new(&cache, 16, 3);
|
|
|
|
assert_eq!(filter.level_size(0), 1);
|
|
|
|
assert_eq!(filter.level_size(1), 16);
|
|
|
|
assert_eq!(filter.level_size(2), 256);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_bloom_index() {
|
|
|
|
let cache = MemoryCache::new();
|
|
|
|
let filter = ChainFilter::new(&cache, 16, 3);
|
|
|
|
|
|
|
|
let bi0 = filter.bloom_index(0, 0);
|
|
|
|
assert_eq!(bi0.level, 0);
|
|
|
|
assert_eq!(bi0.index, 0);
|
|
|
|
|
|
|
|
let bi1 = filter.bloom_index(1, 0);
|
|
|
|
assert_eq!(bi1.level, 0);
|
|
|
|
assert_eq!(bi1.index, 1);
|
|
|
|
|
|
|
|
let bi2 = filter.bloom_index(2, 0);
|
|
|
|
assert_eq!(bi2.level, 0);
|
|
|
|
assert_eq!(bi2.index, 2);
|
|
|
|
|
|
|
|
let bi3 = filter.bloom_index(3, 1);
|
|
|
|
assert_eq!(bi3.level, 1);
|
|
|
|
assert_eq!(bi3.index, 0);
|
|
|
|
|
|
|
|
let bi4 = filter.bloom_index(15, 1);
|
|
|
|
assert_eq!(bi4.level, 1);
|
|
|
|
assert_eq!(bi4.index, 0);
|
|
|
|
|
|
|
|
let bi5 = filter.bloom_index(16, 1);
|
|
|
|
assert_eq!(bi5.level, 1);
|
|
|
|
assert_eq!(bi5.index, 1);
|
|
|
|
|
|
|
|
let bi6 = filter.bloom_index(255, 2);
|
|
|
|
assert_eq!(bi6.level, 2);
|
|
|
|
assert_eq!(bi6.index, 0);
|
|
|
|
|
|
|
|
let bi7 = filter.bloom_index(256, 2);
|
|
|
|
assert_eq!(bi7.level, 2);
|
|
|
|
assert_eq!(bi7.index, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_lower_level_bloom_indexes() {
|
|
|
|
let cache = MemoryCache::new();
|
|
|
|
let filter = ChainFilter::new(&cache, 16, 3);
|
|
|
|
|
|
|
|
let bi = filter.bloom_index(256, 2);
|
|
|
|
assert_eq!(bi.level, 2);
|
|
|
|
assert_eq!(bi.index, 1);
|
|
|
|
|
2015-11-28 19:11:04 +01:00
|
|
|
let mut ebis = vec![];
|
2015-11-28 17:08:38 +01:00
|
|
|
for i in 16..32 {
|
2015-11-28 19:11:04 +01:00
|
|
|
ebis.push(BloomIndex::new(1, i));
|
2015-11-28 17:08:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
let bis = filter.lower_level_bloom_indexes(&bi);
|
|
|
|
assert_eq!(ebis, bis);
|
|
|
|
}
|
2015-11-28 19:11:04 +01:00
|
|
|
|
|
|
|
#[test]
|
2015-11-28 19:48:41 +01:00
|
|
|
fn test_topic_basic_search() {
|
2015-11-28 19:11:04 +01:00
|
|
|
let index_size = 16;
|
|
|
|
let bloom_levels = 3;
|
|
|
|
|
|
|
|
let mut cache = MemoryCache::new();
|
|
|
|
let topic = H256::from_str("8d936b1bd3fc635710969ccfba471fb17d598d9d1971b538dd712e1e4b4f4dba").unwrap();
|
|
|
|
|
|
|
|
let modified_blooms = {
|
|
|
|
let filter = ChainFilter::new(&cache, index_size, bloom_levels);
|
|
|
|
let block_number = 23;
|
|
|
|
let mut bloom = H2048::new();
|
|
|
|
bloom.shift_bloom(&topic.sha3());
|
|
|
|
filter.add_bloom(&bloom, block_number)
|
|
|
|
};
|
|
|
|
|
|
|
|
// number of modified blooms should always be equal number of levels
|
|
|
|
assert_eq!(modified_blooms.len(), bloom_levels as usize);
|
|
|
|
cache.insert_blooms(modified_blooms);
|
|
|
|
|
|
|
|
{
|
|
|
|
let filter = ChainFilter::new(&cache, index_size, bloom_levels);
|
2015-11-28 22:43:16 +01:00
|
|
|
let blocks = filter.blocks_with_topic(&topic, 0, 100);
|
2015-11-28 19:48:41 +01:00
|
|
|
assert_eq!(blocks.len(), 1);
|
|
|
|
assert_eq!(blocks[0], 23);
|
2015-11-28 19:11:04 +01:00
|
|
|
}
|
|
|
|
|
2015-11-28 19:48:41 +01:00
|
|
|
{
|
|
|
|
let filter = ChainFilter::new(&cache, index_size, bloom_levels);
|
2015-11-28 22:43:16 +01:00
|
|
|
let blocks = filter.blocks_with_topic(&topic, 0, 23);
|
2015-11-28 19:48:41 +01:00
|
|
|
assert_eq!(blocks.len(), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
let filter = ChainFilter::new(&cache, index_size, bloom_levels);
|
2015-11-28 22:43:16 +01:00
|
|
|
let blocks = filter.blocks_with_topic(&topic, 23, 24);
|
2015-11-28 19:48:41 +01:00
|
|
|
assert_eq!(blocks.len(), 1);
|
|
|
|
assert_eq!(blocks[0], 23);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
let filter = ChainFilter::new(&cache, index_size, bloom_levels);
|
2015-11-28 22:43:16 +01:00
|
|
|
let blocks = filter.blocks_with_topic(&topic, 24, 100);
|
2015-11-28 19:48:41 +01:00
|
|
|
assert_eq!(blocks.len(), 0);
|
|
|
|
}
|
2015-11-28 19:11:04 +01:00
|
|
|
}
|
2015-11-28 17:08:38 +01:00
|
|
|
}
|