Merge branch 'master' into trie_recursion

This commit is contained in:
debris
2017-08-29 12:23:37 +02:00
70 changed files with 1602 additions and 748 deletions

View File

@@ -13,6 +13,7 @@ rustc-hex = "1.0"
rand = "0.3.12"
libc = "0.2"
heapsize = { version = "0.4", optional = true }
plain_hasher = { path = "../plain_hasher" }
[features]
x64asm_arithmetic=[]

View File

@@ -16,6 +16,7 @@ use std::collections::{HashMap, HashSet};
use rand::{Rand, Rng};
use rand::os::OsRng;
use rustc_hex::{FromHex, FromHexError};
use plain_hasher::PlainHasher;
use bigint::U256;
use libc::{c_void, memcmp};
@@ -446,41 +447,6 @@ impl_hash!(H2048, 256);
known_heap_size!(0, H32, H64, H128, H160, H256, H264, H512, H520, H1024, H2048);
// Specialized HashMap and HashSet
/// Hasher that just takes 8 bytes of the provided value.
/// May only be used for keys which are 32 bytes.
pub struct PlainHasher {
prefix: [u8; 8],
_marker: [u64; 0], // for alignment
}
impl Default for PlainHasher {
#[inline]
fn default() -> PlainHasher {
PlainHasher {
prefix: [0; 8],
_marker: [0; 0],
}
}
}
impl Hasher for PlainHasher {
#[inline]
fn finish(&self) -> u64 {
unsafe { ::std::mem::transmute(self.prefix) }
}
#[inline]
fn write(&mut self, bytes: &[u8]) {
debug_assert!(bytes.len() == 32);
for quarter in bytes.chunks(8) {
for (x, y) in self.prefix.iter_mut().zip(quarter) {
*x ^= *y
}
}
}
}
/// Specialized version of `HashMap` with H256 keys and fast hashing function.
pub type H256FastMap<T> = HashMap<H256, T, BuildHasherDefault<PlainHasher>>;
/// Specialized version of `HashSet` with H256 keys and fast hashing function.

View File

@@ -14,6 +14,7 @@ extern crate rand;
extern crate rustc_hex;
extern crate bigint;
extern crate libc;
extern crate plain_hasher;
#[cfg(feature="heapsizeof")]
#[macro_use]

View File

@@ -37,9 +37,9 @@ struct BitVecJournal {
impl BitVecJournal {
pub fn new(size: usize) -> BitVecJournal {
let extra = if size % 8 > 0 { 1 } else { 0 };
let extra = if size % 64 > 0 { 1 } else { 0 };
BitVecJournal {
elems: vec![0u64; size / 8 + extra],
elems: vec![0u64; size / 64 + extra],
journal: HashSet::new(),
}
}

View File

@@ -0,0 +1,12 @@
[package]
name = "plain_hasher"
description = "Hasher for 32-bit keys."
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
keywords = ["hash", "hasher"]
categories = ["no-std"]
homepage = "https://github.com/paritytech/plain_hasher"
[dependencies]
crunchy = "0.1.6"

View File

@@ -0,0 +1,33 @@
#![feature(test)]
extern crate test;
extern crate plain_hasher;
use std::hash::Hasher;
use std::collections::hash_map::DefaultHasher;
use test::{Bencher, black_box};
use plain_hasher::PlainHasher;
#[bench]
fn write_plain_hasher(b: &mut Bencher) {
b.iter(|| {
let n: u8 = black_box(100);
(0..n).fold(PlainHasher::default(), |mut old, new| {
let bb = black_box([new; 32]);
old.write(&bb as &[u8]);
old
});
});
}
#[bench]
fn write_default_hasher(b: &mut Bencher) {
b.iter(|| {
let n: u8 = black_box(100);
(0..n).fold(DefaultHasher::default(), |mut old, new| {
let bb = black_box([new; 32]);
old.write(&bb as &[u8]);
old
});
});
}

View File

@@ -0,0 +1,55 @@
#![no_std]
#[macro_use]
extern crate crunchy;
use core::{hash, mem};
/// Hasher that just takes 8 bytes of the provided value.
/// May only be used for keys which are 32 bytes.
#[derive(Default)]
pub struct PlainHasher {
prefix: u64,
}
impl hash::Hasher for PlainHasher {
#[inline]
fn finish(&self) -> u64 {
self.prefix
}
#[inline]
#[allow(unused_assignments)]
fn write(&mut self, bytes: &[u8]) {
debug_assert!(bytes.len() == 32);
unsafe {
let mut bytes_ptr = bytes.as_ptr();
let prefix_u8: &mut [u8; 8] = mem::transmute(&mut self.prefix);
let mut prefix_ptr = prefix_u8.as_mut_ptr();
unroll! {
for _i in 0..8 {
*prefix_ptr ^= (*bytes_ptr ^ *bytes_ptr.offset(8)) ^ (*bytes_ptr.offset(16) ^ *bytes_ptr.offset(24));
bytes_ptr = bytes_ptr.offset(1);
prefix_ptr = prefix_ptr.offset(1);
}
}
}
}
}
#[cfg(test)]
mod tests {
use core::hash::Hasher;
use super::PlainHasher;
#[test]
fn it_works() {
let mut bytes = [32u8; 32];
bytes[0] = 15;
let mut hasher = PlainHasher::default();
hasher.write(&bytes);
assert_eq!(hasher.prefix, 47);
}
}

View File

@@ -20,7 +20,7 @@
extern crate futures;
extern crate tokio_core;
use std::thread;
use std::{fmt, thread};
use std::sync::mpsc;
use std::time::Duration;
use futures::{Future, IntoFuture};
@@ -81,7 +81,19 @@ enum Mode {
ThreadPerFuture,
}
#[derive(Clone)]
impl fmt::Debug for Mode {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::Mode::*;
match *self {
Tokio(_) => write!(fmt, "tokio"),
Sync => write!(fmt, "synchronous"),
ThreadPerFuture => write!(fmt, "thread per future"),
}
}
}
#[derive(Debug, Clone)]
pub struct Remote {
inner: Mode,
}

View File

@@ -17,6 +17,7 @@
//! Disk-backed `HashDB` implementation.
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::sync::Arc;
use rlp::*;
use hashdb::*;
@@ -66,23 +67,28 @@ impl ArchiveDB {
impl HashDB for ArchiveDB {
fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iter(self.column) {
let h = H256::from_slice(&*key);
ret.insert(h, 1);
}
let mut ret: HashMap<H256, i32> = self.backing.iter(self.column)
.map(|(key, _)| (H256::from_slice(&*key), 1))
.collect();
for (key, refs) in self.overlay.keys() {
let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs);
match ret.entry(key) {
Entry::Occupied(mut entry) => {
*entry.get_mut() += refs;
},
Entry::Vacant(entry) => {
entry.insert(refs);
}
}
}
ret
}
fn get(&self, key: &H256) -> Option<DBValue> {
let k = self.overlay.raw(key);
if let Some((d, rc)) = k {
if rc > 0 { return Some(d); }
if let Some((d, rc)) = self.overlay.raw(key) {
if rc > 0 {
return Some(d);
}
}
self.payload(key)
}

View File

@@ -18,6 +18,7 @@
use std::fmt;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::sync::Arc;
use parking_lot::RwLock;
use heapsize::HeapSizeOf;
@@ -159,33 +160,38 @@ impl EarlyMergeDB {
fn insert_keys(inserts: &[(H256, DBValue)], backing: &KeyValueDB, col: Option<u32>, refs: &mut HashMap<H256, RefInfo>, batch: &mut DBTransaction, trace: bool) {
for &(ref h, ref d) in inserts {
if let Some(c) = refs.get_mut(h) {
// already counting. increment.
c.queue_refs += 1;
if trace {
trace!(target: "jdb.fine", " insert({}): In queue: Incrementing refs to {}", h, c.queue_refs);
}
continue;
}
// this is the first entry for this node in the journal.
if backing.get(col, h).expect("Low-level database error. Some issue with your hard disk?").is_some() {
// already in the backing DB. start counting, and remember it was already in.
Self::set_already_in(batch, col, h);
refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: true});
if trace {
trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h);
}
continue;
}
// Gets removed when a key leaves the journal, so should never be set when we're placing a new key.
//Self::reset_already_in(&h);
assert!(!Self::is_already_in(backing, col, &h));
batch.put(col, h, d);
refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: false});
if trace {
trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h);
match refs.entry(*h) {
Entry::Occupied(mut entry) => {
let info = entry.get_mut();
// already counting. increment.
info.queue_refs += 1;
if trace {
trace!(target: "jdb.fine", " insert({}): In queue: Incrementing refs to {}", h, info.queue_refs);
}
},
Entry::Vacant(entry) => {
// this is the first entry for this node in the journal.
let in_archive = backing.get(col, h).expect("Low-level database error. Some issue with your hard disk?").is_some();
if in_archive {
// already in the backing DB. start counting, and remember it was already in.
Self::set_already_in(batch, col, h);
if trace {
trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h);
}
} else {
// Gets removed when a key leaves the journal, so should never be set when we're placing a new key.
//Self::reset_already_in(&h);
assert!(!Self::is_already_in(backing, col, h));
if trace {
trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h);
}
batch.put(col, h, d);
}
entry.insert(RefInfo {
queue_refs: 1,
in_archive: in_archive,
});
},
}
}
}
@@ -193,15 +199,20 @@ impl EarlyMergeDB {
fn replay_keys(inserts: &[H256], backing: &KeyValueDB, col: Option<u32>, refs: &mut HashMap<H256, RefInfo>) {
trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs);
for h in inserts {
if let Some(c) = refs.get_mut(h) {
match refs.entry(*h) {
// already counting. increment.
c.queue_refs += 1;
continue;
Entry::Occupied(mut entry) => {
entry.get_mut().queue_refs += 1;
},
// this is the first entry for this node in the journal.
// it is initialised to 1 if it was already in.
Entry::Vacant(entry) => {
entry.insert(RefInfo {
queue_refs: 1,
in_archive: Self::is_already_in(backing, col, h),
});
},
}
// this is the first entry for this node in the journal.
// it is initialised to 1 if it was already in.
refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: Self::is_already_in(backing, col, h)});
}
trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs);
}
@@ -213,50 +224,54 @@ impl EarlyMergeDB {
// (the latter option would then mean removing the RefInfo, since it would no longer be counted in the queue.)
// both are valid, but we switch between them depending on context.
// All inserts in queue (i.e. those which may yet be reverted) have an entry in refs.
for h in deletes.iter() {
let mut n: Option<RefInfo> = None;
if let Some(c) = refs.get_mut(h) {
if c.in_archive && from == RemoveFrom::Archive {
c.in_archive = false;
Self::reset_already_in(batch, col, h);
if trace {
trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Reducing to queue only and recording", h);
for h in deletes {
match refs.entry(*h) {
Entry::Occupied(mut entry) => {
if entry.get().in_archive && from == RemoveFrom::Archive {
entry.get_mut().in_archive = false;
Self::reset_already_in(batch, col, h);
if trace {
trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Reducing to queue only and recording", h);
}
continue;
}
continue;
} else if c.queue_refs > 1 {
c.queue_refs -= 1;
if trace {
trace!(target: "jdb.fine", " remove({}): In queue > 1 refs: Decrementing ref count to {}", h, c.queue_refs);
if entry.get().queue_refs > 1 {
entry.get_mut().queue_refs -= 1;
if trace {
trace!(target: "jdb.fine", " remove({}): In queue > 1 refs: Decrementing ref count to {}", h, entry.get().queue_refs);
}
continue;
}
continue;
} else {
n = Some(c.clone());
}
}
match n {
Some(RefInfo{queue_refs: 1, in_archive: true}) => {
refs.remove(h);
Self::reset_already_in(batch, col, h);
if trace {
trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Removing from queue and leaving in archive", h);
let queue_refs = entry.get().queue_refs;
let in_archive = entry.get().in_archive;
match (queue_refs, in_archive) {
(1, true) => {
entry.remove();
Self::reset_already_in(batch, col, h);
if trace {
trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Removing from queue and leaving in archive", h);
}
},
(1, false) => {
entry.remove();
batch.delete(col, h);
if trace {
trace!(target: "jdb.fine", " remove({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h);
}
},
_ => panic!("Invalid value in refs: {:?}", entry.get()),
}
}
Some(RefInfo{queue_refs: 1, in_archive: false}) => {
refs.remove(h);
batch.delete(col, h);
if trace {
trace!(target: "jdb.fine", " remove({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h);
}
}
None => {
},
Entry::Vacant(_entry) => {
// Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs.
//assert!(!Self::is_already_in(db, &h));
batch.delete(col, h);
if trace {
trace!(target: "jdb.fine", " remove({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h);
}
}
_ => panic!("Invalid value in refs: {:?}", n),
},
}
}
}
@@ -311,23 +326,28 @@ impl EarlyMergeDB {
impl HashDB for EarlyMergeDB {
fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iter(self.column) {
let h = H256::from_slice(&*key);
ret.insert(h, 1);
}
let mut ret: HashMap<H256, i32> = self.backing.iter(self.column)
.map(|(key, _)| (H256::from_slice(&*key), 1))
.collect();
for (key, refs) in self.overlay.keys() {
let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs);
match ret.entry(key) {
Entry::Occupied(mut entry) => {
*entry.get_mut() += refs;
},
Entry::Vacant(entry) => {
entry.insert(refs);
}
}
}
ret
}
fn get(&self, key: &H256) -> Option<DBValue> {
let k = self.overlay.raw(key);
if let Some((d, rc)) = k {
if rc > 0 { return Some(d) }
if let Some((d, rc)) = self.overlay.raw(key) {
if rc > 0 {
return Some(d)
}
}
self.payload(key)
}

View File

@@ -17,6 +17,7 @@
//! `JournalDB` over in-memory overlay
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::sync::Arc;
use parking_lot::RwLock;
use heapsize::HeapSizeOf;
@@ -407,23 +408,28 @@ impl JournalDB for OverlayRecentDB {
impl HashDB for OverlayRecentDB {
fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iter(self.column) {
let h = H256::from_slice(&*key);
ret.insert(h, 1);
}
let mut ret: HashMap<H256, i32> = self.backing.iter(self.column)
.map(|(key, _)| (H256::from_slice(&*key), 1))
.collect();
for (key, refs) in self.transaction_overlay.keys() {
let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs);
match ret.entry(key) {
Entry::Occupied(mut entry) => {
*entry.get_mut() += refs;
},
Entry::Vacant(entry) => {
entry.insert(refs);
}
}
}
ret
}
fn get(&self, key: &H256) -> Option<DBValue> {
let k = self.transaction_overlay.raw(key);
if let Some((d, rc)) = k {
if rc > 0 { return Some(d) }
if let Some((d, rc)) = self.transaction_overlay.raw(key) {
if rc > 0 {
return Some(d)
}
}
let v = {
let journal_overlay = self.journal_overlay.read();

View File

@@ -198,7 +198,7 @@ impl JournalDB for RefCountedDB {
fn consolidate(&mut self, mut with: MemoryDB) {
for (key, (value, rc)) in with.drain() {
for _ in 0..rc {
self.emplace(key.clone(), value.clone());
self.emplace(key, value.clone());
}
for _ in rc..0 {

View File

@@ -16,14 +16,14 @@
//! Reference-counted memory-based `HashDB` implementation.
use hash::*;
use rlp::*;
use sha3::*;
use hashdb::*;
use heapsize::*;
use std::mem;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use heapsize::HeapSizeOf;
use hash::{H256FastMap, H256};
use rlp::NULL_RLP;
use sha3::*;
use hashdb::*;
/// Reference-counted memory-based `HashDB` implementation.
///
@@ -181,7 +181,13 @@ impl HashDB for MemoryDB {
}
fn keys(&self) -> HashMap<H256, i32> {
self.data.iter().filter_map(|(k, v)| if v.1 != 0 {Some((k.clone(), v.1))} else {None}).collect()
self.data.iter()
.filter_map(|(k, v)| if v.1 != 0 {
Some((*k, v.1))
} else {
None
})
.collect()
}
fn contains(&self, key: &H256) -> bool {
@@ -200,16 +206,17 @@ impl HashDB for MemoryDB {
return SHA3_NULL_RLP.clone();
}
let key = value.sha3();
if match self.data.get_mut(&key) {
Some(&mut (ref mut old_value, ref mut rc @ -0x80000000i32 ... 0)) => {
*old_value = DBValue::from_slice(value);
match self.data.entry(key) {
Entry::Occupied(mut entry) => {
let &mut (ref mut old_value, ref mut rc) = entry.get_mut();
if *rc >= -0x80000000i32 && *rc <= 0 {
*old_value = DBValue::from_slice(value);
}
*rc += 1;
false
},
Some(&mut (_, ref mut x)) => { *x += 1; false } ,
None => true,
}{ // ... None falls through into...
self.data.insert(key.clone(), (DBValue::from_slice(value), 1));
Entry::Vacant(entry) => {
entry.insert((DBValue::from_slice(value), 1));
},
}
key
}
@@ -219,17 +226,18 @@ impl HashDB for MemoryDB {
return;
}
match self.data.get_mut(&key) {
Some(&mut (ref mut old_value, ref mut rc @ -0x80000000i32 ... 0)) => {
*old_value = value;
match self.data.entry(key) {
Entry::Occupied(mut entry) => {
let &mut (ref mut old_value, ref mut rc) = entry.get_mut();
if *rc >= -0x80000000i32 && *rc <= 0 {
*old_value = value;
}
*rc += 1;
return;
},
Some(&mut (_, ref mut x)) => { *x += 1; return; } ,
None => {},
Entry::Vacant(entry) => {
entry.insert((value, 1));
},
}
// ... None falls through into...
self.data.insert(key, (value, 1));
}
fn remove(&mut self, key: &H256) {
@@ -237,11 +245,14 @@ impl HashDB for MemoryDB {
return;
}
if match self.data.get_mut(key) {
Some(&mut (_, ref mut x)) => { *x -= 1; false }
None => true
}{ // ... None falls through into...
self.data.insert(key.clone(), (DBValue::new(), -1));
match self.data.entry(*key) {
Entry::Occupied(mut entry) => {
let &mut (_, ref mut rc) = entry.get_mut();
*rc -= 1;
},
Entry::Vacant(entry) => {
entry.insert((DBValue::new(), -1));
},
}
}
}

View File

@@ -16,13 +16,14 @@
//! Disk-backed `HashDB` implementation.
use std::sync::Arc;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use error::*;
use hash::*;
use rlp::*;
use hashdb::*;
use memorydb::*;
use std::sync::*;
use std::collections::HashMap;
use kvdb::{KeyValueDB, DBTransaction};
/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay.
@@ -125,19 +126,27 @@ impl OverlayDB {
impl HashDB for OverlayDB {
fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iter(self.column) {
let h = H256::from_slice(&*key);
let r = self.payload(&h).unwrap().1;
ret.insert(h, r as i32);
}
let mut ret: HashMap<H256, i32> = self.backing.iter(self.column)
.map(|(key, _)| {
let h = H256::from_slice(&*key);
let r = self.payload(&h).unwrap().1;
(h, r as i32)
})
.collect();
for (key, refs) in self.overlay.keys() {
let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs);
match ret.entry(key) {
Entry::Occupied(mut entry) => {
*entry.get_mut() += refs;
},
Entry::Vacant(entry) => {
entry.insert(refs);
}
}
}
ret
}
fn get(&self, key: &H256) -> Option<DBValue> {
// return ok if positive; if negative, check backing - might be enough references there to make
// it positive again.
@@ -165,6 +174,7 @@ impl HashDB for OverlayDB {
_ => None,
}
}
fn contains(&self, key: &H256) -> bool {
// return ok if positive; if negative, check backing - might be enough references there to make
// it positive again.
@@ -185,6 +195,7 @@ impl HashDB for OverlayDB {
}
}
}
fn insert(&mut self, value: &[u8]) -> H256 { self.overlay.insert(value) }
fn emplace(&mut self, key: H256, value: DBValue) { self.overlay.emplace(key, value); }
fn remove(&mut self, key: &H256) { self.overlay.remove(key); }

View File

@@ -217,59 +217,66 @@ impl<'a> TrieDBIterator<'a> {
Ok(r)
}
fn seek_descend<'key>(&mut self, node_data: DBValue, key: &NibbleSlice<'key>) -> super::Result<()> {
let node = Node::decoded(&node_data);
match node {
Node::Leaf(ref slice, _) => {
if slice == key {
self.trail.push(Crumb {
status: Status::At,
node: node.clone().into(),
});
} else {
self.trail.push(Crumb {
status: Status::Exiting,
node: node.clone().into(),
});
}
fn seek<'key>(&mut self, mut node_data: DBValue, mut key: NibbleSlice<'key>) -> super::Result<()> {
loop {
let (data, mid) = {
let node = Node::decoded(&node_data);
match node {
Node::Leaf(slice, _) => {
if slice == key {
self.trail.push(Crumb {
status: Status::At,
node: node.clone().into(),
});
} else {
self.trail.push(Crumb {
status: Status::Exiting,
node: node.clone().into(),
});
}
self.key_nibbles.extend(slice.iter());
Ok(())
},
Node::Extension(ref slice, ref item) => {
if key.starts_with(slice) {
self.trail.push(Crumb {
status: Status::At,
node: node.clone().into(),
});
self.key_nibbles.extend(slice.iter());
let data = self.db.get_raw_or_lookup(&*item)?;
self.seek_descend(data, &key.mid(slice.len()))
} else {
self.descend(&node_data)?;
Ok(())
self.key_nibbles.extend(slice.iter());
return Ok(())
},
Node::Extension(ref slice, ref item) => {
if key.starts_with(slice) {
self.trail.push(Crumb {
status: Status::At,
node: node.clone().into(),
});
self.key_nibbles.extend(slice.iter());
let data = self.db.get_raw_or_lookup(&*item)?;
(data, slice.len())
} else {
self.descend(&node_data)?;
return Ok(())
}
},
Node::Branch(ref nodes, _) => match key.is_empty() {
true => {
self.trail.push(Crumb {
status: Status::At,
node: node.clone().into(),
});
return Ok(())
},
false => {
let i = key.at(0);
self.trail.push(Crumb {
status: Status::AtChild(i as usize),
node: node.clone().into(),
});
self.key_nibbles.push(i);
let child = self.db.get_raw_or_lookup(&*nodes[i as usize])?;
(child, 1)
}
},
_ => return Ok(()),
}
},
Node::Branch(ref nodes, _) => match key.is_empty() {
true => {
self.trail.push(Crumb {
status: Status::At,
node: node.clone().into(),
});
Ok(())
},
false => {
let i = key.at(0);
self.trail.push(Crumb {
status: Status::AtChild(i as usize),
node: node.clone().into(),
});
self.key_nibbles.push(i);
let child = self.db.get_raw_or_lookup(&*nodes[i as usize])?;
self.seek_descend(child, &key.mid(1))
}
},
_ => Ok(())
};
node_data = data;
key = key.mid(mid);
}
}
@@ -314,7 +321,7 @@ impl<'a> TrieIterator for TrieDBIterator<'a> {
self.trail.clear();
self.key_nibbles.clear();
let root_rlp = self.db.root_data()?;
self.seek_descend(root_rlp, &NibbleSlice::new(key))
self.seek(root_rlp, NibbleSlice::new(key))
}
}

View File

@@ -115,7 +115,7 @@ pub fn sec_trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let gen_input = input
// first put elements into btree to sort them and to remove duplicates
.into_iter()
.map(|(k, v)| (k.sha3().to_vec(), v))
.map(|(k, v)| (k.sha3(), v))
.collect::<BTreeMap<_, _>>()
// then move them to a vector
.into_iter()
@@ -155,8 +155,7 @@ fn hex_prefix_encode(nibbles: &[u8], leaf: bool) -> Vec<u8> {
let oddness_factor = inlen % 2;
// next even number divided by two
let reslen = (inlen + 2) >> 1;
let mut res = vec![];
res.reserve(reslen);
let mut res = Vec::with_capacity(reslen);
let first_byte = {
let mut bits = ((inlen as u8 & 1) + (2 * leaf as u8)) << 4;
@@ -180,11 +179,11 @@ fn hex_prefix_encode(nibbles: &[u8], leaf: bool) -> Vec<u8> {
/// Converts slice of bytes to nibbles.
fn as_nibbles(bytes: &[u8]) -> Vec<u8> {
let mut res = vec![];
res.reserve(bytes.len() * 2);
let mut res = Vec::with_capacity(bytes.len() * 2);
for i in 0..bytes.len() {
res.push(bytes[i] >> 4);
res.push((bytes[i] << 4) >> 4);
let byte = bytes[i];
res.push(byte >> 4);
res.push(byte & 0b1111);
}
res
}