Merge pull request #564 from ethcore/inclusive_bloom_ranges

chainfilter shouldnt exclude to_block from results
This commit is contained in:
Gav Wood 2016-03-02 13:05:45 +01:00
commit 3b3399ccc3
2 changed files with 20 additions and 20 deletions

View File

@ -15,7 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Multilevel blockchain bloom filter. //! Multilevel blockchain bloom filter.
//! //!
//! ```not_run //! ```not_run
//! extern crate ethcore_util as util; //! extern crate ethcore_util as util;
//! extern crate ethcore; //! extern crate ethcore;
@ -23,33 +23,33 @@
//! use util::sha3::*; //! use util::sha3::*;
//! use util::hash::*; //! use util::hash::*;
//! use ethcore::chainfilter::*; //! use ethcore::chainfilter::*;
//! //!
//! fn main() { //! fn main() {
//! let (index_size, bloom_levels) = (16, 3); //! let (index_size, bloom_levels) = (16, 3);
//! let mut cache = MemoryCache::new(); //! let mut cache = MemoryCache::new();
//! //!
//! let address = Address::from_str("ef2d6d194084c2de36e0dabfce45d046b37d1106").unwrap(); //! let address = Address::from_str("ef2d6d194084c2de36e0dabfce45d046b37d1106").unwrap();
//! //!
//! // borrow cache for reading inside the scope //! // borrow cache for reading inside the scope
//! let modified_blooms = { //! let modified_blooms = {
//! let filter = ChainFilter::new(&cache, index_size, bloom_levels); //! let filter = ChainFilter::new(&cache, index_size, bloom_levels);
//! let block_number = 39; //! let block_number = 39;
//! let mut bloom = H2048::new(); //! let mut bloom = H2048::new();
//! bloom.shift_bloomed(&address.sha3()); //! bloom.shift_bloomed(&address.sha3());
//! filter.add_bloom(&bloom, block_number) //! filter.add_bloom(&bloom, block_number)
//! }; //! };
//! //!
//! // number of updated blooms is equal number of levels //! // number of updated blooms is equal number of levels
//! assert_eq!(modified_blooms.len(), bloom_levels as usize); //! assert_eq!(modified_blooms.len(), bloom_levels as usize);
//! //!
//! // lets inserts modified blooms into the cache //! // lets inserts modified blooms into the cache
//! cache.insert_blooms(modified_blooms); //! cache.insert_blooms(modified_blooms);
//! //!
//! // borrow cache for another reading operations //! // borrow cache for another reading operations
//! { //! {
//! let filter = ChainFilter::new(&cache, index_size, bloom_levels); //! let filter = ChainFilter::new(&cache, index_size, bloom_levels);
//! let blocks = filter.blocks_with_address(&address, 10, 40); //! let blocks = filter.blocks_with_address(&address, 10, 40);
//! assert_eq!(blocks.len(), 1); //! assert_eq!(blocks.len(), 1);
//! assert_eq!(blocks[0], 39); //! assert_eq!(blocks[0], 39);
//! } //! }
//! } //! }
@ -71,7 +71,7 @@ pub struct ChainFilter<'a, D>
impl<'a, D> ChainFilter<'a, D> where D: FilterDataSource impl<'a, D> ChainFilter<'a, D> where D: FilterDataSource
{ {
/// Creates new filter instance. /// Creates new filter instance.
/// ///
/// Borrows `FilterDataSource` for reading. /// Borrows `FilterDataSource` for reading.
pub fn new(data_source: &'a D, index_size: usize, levels: u8) -> Self { pub fn new(data_source: &'a D, index_size: usize, levels: u8) -> Self {
ChainFilter { ChainFilter {
@ -88,7 +88,7 @@ impl<'a, D> ChainFilter<'a, D> where D: FilterDataSource
None => return None, None => return None,
Some(level_bloom) => match level { Some(level_bloom) => match level {
// if we are on the lowest level // if we are on the lowest level
0 => return match offset < to_block { 0 => return match offset <= to_block {
// take the value if its smaller than to_block // take the value if its smaller than to_block
true if level_bloom.contains(bloom) => Some(vec![offset]), true if level_bloom.contains(bloom) => Some(vec![offset]),
// return None if it is is equal to to_block // return None if it is is equal to to_block
@ -153,7 +153,7 @@ impl<'a, D> ChainFilter<'a, D> where D: FilterDataSource
for i in 0..blooms.len() { for i in 0..blooms.len() {
let index = self.indexer.bloom_index(block_number + i, level); let index = self.indexer.bloom_index(block_number + i, level);
let new_bloom = { let new_bloom = {
// use new blooms before db blooms where necessary // use new blooms before db blooms where necessary
let bloom_at = | index | { result.get(&index).cloned().or_else(|| self.data_source.bloom_at_index(&index)) }; let bloom_at = | index | { result.get(&index).cloned().or_else(|| self.data_source.bloom_at_index(&index)) };

View File

@ -22,7 +22,7 @@ use util::sha3::*;
use chainfilter::{BloomIndex, FilterDataSource, ChainFilter}; use chainfilter::{BloomIndex, FilterDataSource, ChainFilter};
/// In memory cache for blooms. /// In memory cache for blooms.
/// ///
/// Stores all blooms in HashMap, which indexes them by `BloomIndex`. /// Stores all blooms in HashMap, which indexes them by `BloomIndex`.
pub struct MemoryCache { pub struct MemoryCache {
blooms: HashMap<BloomIndex, H2048>, blooms: HashMap<BloomIndex, H2048>,
@ -35,7 +35,7 @@ impl MemoryCache {
} }
/// inserts all blooms into cache /// inserts all blooms into cache
/// ///
/// if bloom at given index already exists, overwrites it /// if bloom at given index already exists, overwrites it
pub fn insert_blooms(&mut self, blooms: HashMap<BloomIndex, H2048>) { pub fn insert_blooms(&mut self, blooms: HashMap<BloomIndex, H2048>) {
self.blooms.extend(blooms); self.blooms.extend(blooms);
@ -81,13 +81,13 @@ fn test_topic_basic_search() {
{ {
let filter = ChainFilter::new(&cache, index_size, bloom_levels); let filter = ChainFilter::new(&cache, index_size, bloom_levels);
let blocks = filter.blocks_with_bloom(&to_bloom(&topic), 0, 23); let blocks = filter.blocks_with_bloom(&to_bloom(&topic), 0, 22);
assert_eq!(blocks.len(), 0); assert_eq!(blocks.len(), 0);
} }
{ {
let filter = ChainFilter::new(&cache, index_size, bloom_levels); let filter = ChainFilter::new(&cache, index_size, bloom_levels);
let blocks = filter.blocks_with_bloom(&to_bloom(&topic), 23, 24); let blocks = filter.blocks_with_bloom(&to_bloom(&topic), 23, 23);
assert_eq!(blocks.len(), 1); assert_eq!(blocks.len(), 1);
assert_eq!(blocks[0], 23); assert_eq!(blocks[0], 23);
} }
@ -144,7 +144,7 @@ fn test_reset_chain_head_simple() {
cache.insert_blooms(modified_blooms_3); cache.insert_blooms(modified_blooms_3);
let reset_modified_blooms = { let reset_modified_blooms = {
let filter = ChainFilter::new(&cache, index_size, bloom_levels); let filter = ChainFilter::new(&cache, index_size, bloom_levels);
filter.reset_chain_head(&[to_bloom(&topic_4), to_bloom(&topic_5)], 15, 17) filter.reset_chain_head(&[to_bloom(&topic_4), to_bloom(&topic_5)], 15, 17)
@ -183,7 +183,7 @@ fn for_each_bloom<F>(bytes: &[u8], mut f: F) where F: FnMut(usize, &H2048) {
} }
fn for_each_log<F>(bytes: &[u8], mut f: F) where F: FnMut(usize, &Address, &[H256]) { fn for_each_log<F>(bytes: &[u8], mut f: F) where F: FnMut(usize, &Address, &[H256]) {
let mut reader = BufReader::new(bytes); let mut reader = BufReader::new(bytes);
let mut line = String::new(); let mut line = String::new();
while reader.read_line(&mut line).unwrap() > 0 { while reader.read_line(&mut line).unwrap() > 0 {
{ {
@ -235,11 +235,11 @@ fn test_chainfilter_real_data_short_searches() {
for_each_log(include_bytes!("logs.txt"), | block_number, address, topics | { for_each_log(include_bytes!("logs.txt"), | block_number, address, topics | {
println!("block_number: {:?}", block_number); println!("block_number: {:?}", block_number);
let filter = ChainFilter::new(&cache, index_size, bloom_levels); let filter = ChainFilter::new(&cache, index_size, bloom_levels);
let blocks = filter.blocks_with_bloom(&to_bloom(address), block_number, block_number + 1); let blocks = filter.blocks_with_bloom(&to_bloom(address), block_number, block_number);
assert_eq!(blocks.len(), 1); assert_eq!(blocks.len(), 1);
for (i, topic) in topics.iter().enumerate() { for (i, topic) in topics.iter().enumerate() {
println!("topic: {:?}", i); println!("topic: {:?}", i);
let blocks = filter.blocks_with_bloom(&to_bloom(topic), block_number, block_number + 1); let blocks = filter.blocks_with_bloom(&to_bloom(topic), block_number, block_number);
assert_eq!(blocks.len(), 1); assert_eq!(blocks.len(), 1);
} }
}); });