Merge branch 'master' into a5-parity-ethereum

This commit is contained in:
5chdn 2018-07-05 19:18:17 +02:00
commit 40b55f6ce5
No known key found for this signature in database
GPG Key ID: 1A40871B597F5F80
12 changed files with 162 additions and 85 deletions

19
Cargo.lock generated
View File

@ -1153,10 +1153,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "fs-swap"
version = "0.2.2"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -1566,7 +1568,7 @@ version = "0.1.0"
dependencies = [
"elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"fs-swap 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"fs-swap 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"interleaved-ordered 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0",
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1602,6 +1604,15 @@ name = "libc"
version = "0.2.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "libloading"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cc 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "libusb"
version = "0.3.0"
@ -2474,6 +2485,7 @@ dependencies = [
"ethcore-bytes 0.1.0",
"ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"hashdb 0.2.0",
"heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"keccak-hash 0.1.2",
"keccak-hasher 0.1.0",
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3973,7 +3985,7 @@ dependencies = [
"checksum fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "86d4de0081402f5e88cdac65c8dcdcc73118c1a7a465e2a05f0da05843a8ea33"
"checksum flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9fac2277e84e5e858483756647a9d0aa8d9a2b7cba517fd84325a0aaa69a0909"
"checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344"
"checksum fs-swap 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "31a94e9407e53addc49de767234a0b000978523c59117e5badb575ccbb8370f6"
"checksum fs-swap 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "67f816b2a5f8a6628764a4323d1a8d9ad5303266c4e4e4486ba680f477ba7e62"
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
"checksum futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "1a70b146671de62ec8c8ed572219ca5d594d9b06c0b364d5e67b722fc559b48c"
@ -4014,6 +4026,7 @@ dependencies = [
"checksum lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c8f31047daa365f19be14b47c29df4f7c3b581832407daabe6ae77397619237d"
"checksum lazycell 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a6f08839bc70ef4a3fe1d566d5350f519c5912ea86be0df1740a7d247c7fc0ef"
"checksum libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "1e5d97d6708edaa407429faa671b942dc0f2727222fb6b6539bf1db936e4b121"
"checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2"
"checksum libusb 0.3.0 (git+https://github.com/paritytech/libusb-rs)" = "<none>"
"checksum libusb-sys 0.2.4 (git+https://github.com/paritytech/libusb-sys)" = "<none>"
"checksum linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7860ec297f7008ff7a1e3382d7f7e1dcd69efc94751a2284bafc3d013c2aa939"

View File

@ -83,7 +83,7 @@ use ethcore::client::{
Client, ChainNotify, ChainRoute, ChainMessageType, ClientIoMessage, BlockId, CallContract
};
use ethcore::account_provider::AccountProvider;
use ethcore::miner::{self, Miner, MinerService};
use ethcore::miner::{self, Miner, MinerService, pool_client::NonceCache};
use ethcore::trace::{Tracer, VMTracer};
use rustc_hex::FromHex;
use ethkey::Password;
@ -96,6 +96,9 @@ use_contract!(private, "PrivateContract", "res/private.json");
/// Initialization vector length.
const INIT_VEC_LEN: usize = 16;
/// Size of nonce cache
const NONCE_CACHE_SIZE: usize = 128;
/// Configurtion for private transaction provider
#[derive(Default, PartialEq, Debug, Clone)]
pub struct ProviderConfig {
@ -245,7 +248,7 @@ impl Provider where {
Ok(original_transaction)
}
fn pool_client<'a>(&'a self, nonce_cache: &'a RwLock<HashMap<Address, U256>>) -> miner::pool_client::PoolClient<'a, Client> {
fn pool_client<'a>(&'a self, nonce_cache: &'a NonceCache) -> miner::pool_client::PoolClient<'a, Client> {
let engine = self.client.engine();
let refuse_service_transactions = true;
miner::pool_client::PoolClient::new(
@ -264,7 +267,7 @@ impl Provider where {
/// can be replaced with a single `drain()` method instead.
/// Thanks to this we also don't really need to lock the entire verification for the time of execution.
fn process_queue(&self) -> Result<(), Error> {
let nonce_cache = Default::default();
let nonce_cache = NonceCache::new(NONCE_CACHE_SIZE);
let mut verification_queue = self.transactions_for_verification.lock();
let ready_transactions = verification_queue.ready_transactions(self.pool_client(&nonce_cache));
for transaction in ready_transactions {
@ -585,7 +588,7 @@ impl Importer for Arc<Provider> {
trace!("Validating transaction: {:?}", original_tx);
// Verify with the first account available
trace!("The following account will be used for verification: {:?}", validation_account);
let nonce_cache = Default::default();
let nonce_cache = NonceCache::new(NONCE_CACHE_SIZE);
self.transactions_for_verification.lock().add_transaction(
original_tx,
contract,

View File

@ -152,7 +152,7 @@ impl Default for ClientConfig {
}
#[cfg(test)]
mod test {
use super::{DatabaseCompactionProfile, Mode};
use super::{DatabaseCompactionProfile};
#[test]
fn test_default_compaction_profile() {

View File

@ -14,8 +14,9 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::cmp;
use std::time::{Instant, Duration};
use std::collections::{BTreeMap, BTreeSet, HashSet, HashMap};
use std::collections::{BTreeMap, BTreeSet, HashSet};
use std::sync::Arc;
use ansi_term::Colour;
@ -47,7 +48,7 @@ use client::BlockId;
use executive::contract_address;
use header::{Header, BlockNumber};
use miner;
use miner::pool_client::{PoolClient, CachedNonceClient};
use miner::pool_client::{PoolClient, CachedNonceClient, NonceCache};
use receipt::{Receipt, RichReceipt};
use spec::Spec;
use state::State;
@ -203,7 +204,7 @@ pub struct Miner {
params: RwLock<AuthoringParams>,
#[cfg(feature = "work-notify")]
listeners: RwLock<Vec<Box<NotifyWork>>>,
nonce_cache: RwLock<HashMap<Address, U256>>,
nonce_cache: NonceCache,
gas_pricer: Mutex<GasPricer>,
options: MinerOptions,
// TODO [ToDr] Arc is only required because of price updater
@ -230,6 +231,7 @@ impl Miner {
let limits = options.pool_limits.clone();
let verifier_options = options.pool_verification_options.clone();
let tx_queue_strategy = options.tx_queue_strategy;
let nonce_cache_size = cmp::max(4096, limits.max_count / 4);
Miner {
sealing: Mutex::new(SealingWork {
@ -244,7 +246,7 @@ impl Miner {
#[cfg(feature = "work-notify")]
listeners: RwLock::new(vec![]),
gas_pricer: Mutex::new(gas_pricer),
nonce_cache: RwLock::new(HashMap::with_capacity(1024)),
nonce_cache: NonceCache::new(nonce_cache_size),
options,
transaction_queue: Arc::new(TransactionQueue::new(limits, verifier_options, tx_queue_strategy)),
accounts,
@ -883,7 +885,7 @@ impl miner::MinerService for Miner {
let chain_info = chain.chain_info();
let from_queue = || self.transaction_queue.pending_hashes(
|sender| self.nonce_cache.read().get(sender).cloned(),
|sender| self.nonce_cache.get(sender),
);
let from_pending = || {
@ -1126,14 +1128,15 @@ impl miner::MinerService for Miner {
if has_new_best_block {
// Clear nonce cache
self.nonce_cache.write().clear();
self.nonce_cache.clear();
}
// First update gas limit in transaction queue and minimal gas price.
let gas_limit = *chain.best_block_header().gas_limit();
self.update_transaction_queue_limits(gas_limit);
// Then import all transactions...
// Then import all transactions from retracted blocks.
let client = self.pool_client(chain);
{
retracted
@ -1152,11 +1155,6 @@ impl miner::MinerService for Miner {
});
}
if has_new_best_block {
// ...and at the end remove the old ones
self.transaction_queue.cull(client);
}
if has_new_best_block || (imported.len() > 0 && self.options.reseal_on_uncle) {
// Reset `next_allowed_reseal` in case a block is imported.
// Even if min_period is high, we will always attempt to create
@ -1171,6 +1169,15 @@ impl miner::MinerService for Miner {
self.update_sealing(chain);
}
}
if has_new_best_block {
// Make sure to cull transactions after we update sealing.
// Not culling won't lead to old transactions being added to the block
// (thanks to Ready), but culling can take significant amount of time,
// so best to leave it after we create some work for miners to prevent increased
// uncle rate.
self.transaction_queue.cull(client);
}
}
fn pending_state(&self, latest_block_number: BlockNumber) -> Option<Self::State> {

View File

@ -36,10 +36,32 @@ use header::Header;
use miner;
use miner::service_transaction_checker::ServiceTransactionChecker;
type NoncesCache = RwLock<HashMap<Address, U256>>;
/// Cache for state nonces.
#[derive(Debug)]
pub struct NonceCache {
nonces: RwLock<HashMap<Address, U256>>,
limit: usize
}
const MAX_NONCE_CACHE_SIZE: usize = 4096;
const EXPECTED_NONCE_CACHE_SIZE: usize = 2048;
impl NonceCache {
/// Create new cache with a limit of `limit` entries.
pub fn new(limit: usize) -> Self {
NonceCache {
nonces: RwLock::new(HashMap::with_capacity(limit / 2)),
limit,
}
}
/// Retrieve a cached nonce for given sender.
pub fn get(&self, sender: &Address) -> Option<U256> {
self.nonces.read().get(sender).cloned()
}
/// Clear all entries from the cache.
pub fn clear(&self) {
self.nonces.write().clear();
}
}
/// Blockchain accesss for transaction pool.
pub struct PoolClient<'a, C: 'a> {
@ -70,7 +92,7 @@ C: BlockInfo + CallContract,
/// Creates new client given chain, nonce cache, accounts and service transaction verifier.
pub fn new(
chain: &'a C,
cache: &'a NoncesCache,
cache: &'a NonceCache,
engine: &'a EthEngine,
accounts: Option<&'a AccountProvider>,
refuse_service_transactions: bool,
@ -161,7 +183,7 @@ impl<'a, C: 'a> NonceClient for PoolClient<'a, C> where
pub(crate) struct CachedNonceClient<'a, C: 'a> {
client: &'a C,
cache: &'a NoncesCache,
cache: &'a NonceCache,
}
impl<'a, C: 'a> Clone for CachedNonceClient<'a, C> {
@ -176,13 +198,14 @@ impl<'a, C: 'a> Clone for CachedNonceClient<'a, C> {
impl<'a, C: 'a> fmt::Debug for CachedNonceClient<'a, C> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("CachedNonceClient")
.field("cache", &self.cache.read().len())
.field("cache", &self.cache.nonces.read().len())
.field("limit", &self.cache.limit)
.finish()
}
}
impl<'a, C: 'a> CachedNonceClient<'a, C> {
pub fn new(client: &'a C, cache: &'a NoncesCache) -> Self {
pub fn new(client: &'a C, cache: &'a NonceCache) -> Self {
CachedNonceClient {
client,
cache,
@ -194,27 +217,29 @@ impl<'a, C: 'a> NonceClient for CachedNonceClient<'a, C> where
C: Nonce + Sync,
{
fn account_nonce(&self, address: &Address) -> U256 {
if let Some(nonce) = self.cache.read().get(address) {
if let Some(nonce) = self.cache.nonces.read().get(address) {
return *nonce;
}
// We don't check again if cache has been populated.
// It's not THAT expensive to fetch the nonce from state.
let mut cache = self.cache.write();
let mut cache = self.cache.nonces.write();
let nonce = self.client.latest_nonce(address);
cache.insert(*address, nonce);
if cache.len() < MAX_NONCE_CACHE_SIZE {
if cache.len() < self.cache.limit {
return nonce
}
debug!(target: "txpool", "NonceCache: reached limit.");
trace_time!("nonce_cache:clear");
// Remove excessive amount of entries from the cache
while cache.len() > EXPECTED_NONCE_CACHE_SIZE {
// Just remove random entry
if let Some(key) = cache.keys().next().cloned() {
cache.remove(&key);
}
let to_remove: Vec<_> = cache.keys().take(self.cache.limit / 2).cloned().collect();
for x in to_remove {
cache.remove(&x);
}
nonce
}
}

View File

@ -384,7 +384,6 @@ impl NetworkProtocolHandler for SyncProtocolHandler {
}
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
trace_time!("sync::read");
ChainSync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), *peer, packet_id, data);
}

View File

@ -43,6 +43,14 @@ type Pool = txpool::Pool<pool::VerifiedTransaction, scoring::NonceAndGasPrice, L
/// since it only affects transaction Condition.
const TIMESTAMP_CACHE: u64 = 1000;
/// How many senders at once do we attempt to process while culling.
///
/// When running with huge transaction pools, culling can take significant amount of time.
/// To prevent holding `write()` lock on the pool for this long period, we split the work into
/// chunks and allow other threads to utilize the pool in the meantime.
/// This parameter controls how many (best) senders at once will be processed.
const CULL_SENDERS_CHUNK: usize = 1024;
/// Transaction queue status.
#[derive(Debug, Clone, PartialEq)]
pub struct Status {
@ -398,10 +406,11 @@ impl TransactionQueue {
}
/// Culls all stalled transactions from the pool.
pub fn cull<C: client::NonceClient>(
pub fn cull<C: client::NonceClient + Clone>(
&self,
client: C,
) {
trace_time!("pool::cull");
// We don't care about future transactions, so nonce_cap is not important.
let nonce_cap = None;
// We want to clear stale transactions from the queue as well.
@ -416,10 +425,19 @@ impl TransactionQueue {
current_id.checked_sub(gap)
};
let state_readiness = ready::State::new(client, stale_id, nonce_cap);
self.recently_rejected.clear();
let removed = self.pool.write().cull(None, state_readiness);
let mut removed = 0;
let senders: Vec<_> = {
let pool = self.pool.read();
let senders = pool.senders().cloned().collect();
senders
};
for chunk in senders.chunks(CULL_SENDERS_CHUNK) {
trace_time!("pool::cull::chunk");
let state_readiness = ready::State::new(client.clone(), stale_id, nonce_cap);
removed += self.pool.write().cull(Some(chunk), state_readiness);
}
debug!(target: "txqueue", "Removed {} stalled transactions. {}", removed, self.status());
}

View File

@ -414,6 +414,11 @@ impl<T, S, L> Pool<T, S, L> where
|| self.mem_usage >= self.options.max_mem_usage
}
/// Returns senders ordered by priority of their transactions.
pub fn senders(&self) -> impl Iterator<Item=&T::Sender> {
self.best_transactions.iter().map(|tx| tx.transaction.sender())
}
/// Returns an iterator of pending (ready) transactions.
pub fn pending<R: Ready<T>>(&self, ready: R) -> PendingIterator<T, R, S, L> {
PendingIterator {

View File

@ -7,10 +7,11 @@ license = "GPL-3.0"
[dependencies]
elastic-array = "0.10"
ethcore-bytes = { version = "0.1.0", path = "../bytes" }
hashdb = { version = "0.2", path = "../hashdb" }
heapsize = "0.4"
log = "0.3"
rand = "0.4"
hashdb = { version = "0.2", path = "../hashdb" }
ethcore-bytes = { version = "0.1.0", path = "../bytes" }
[dev-dependencies]
env_logger = "0.5"

View File

@ -18,6 +18,7 @@
extern crate elastic_array;
extern crate ethcore_bytes as bytes;
extern crate hashdb;
extern crate heapsize;
extern crate rand;
#[macro_use]
extern crate log;

View File

@ -31,6 +31,8 @@ use std::collections::{HashSet, VecDeque};
use std::marker::PhantomData;
use std::mem;
use std::ops::Index;
use heapsize::HeapSizeOf;
use std::{fmt::Debug, hash::Hash};
// For lookups into the Node storage buffer.
// This is deliberately non-copyable.
@ -39,20 +41,20 @@ struct StorageHandle(usize);
// Handles to nodes in the trie.
#[derive(Debug)]
enum NodeHandle<H: Hasher> {
enum NodeHandle<H> {
/// Loaded into memory.
InMemory(StorageHandle),
/// Either a hash or an inline node
Hash(H::Out),
Hash(H),
}
impl<H: Hasher> From<StorageHandle> for NodeHandle<H> {
impl<H> From<StorageHandle> for NodeHandle<H> {
fn from(handle: StorageHandle) -> Self {
NodeHandle::InMemory(handle)
}
}
fn empty_children<H: Hasher>() -> Box<[Option<NodeHandle<H>>; 16]> {
fn empty_children<H>() -> Box<[Option<NodeHandle<H>>; 16]> {
Box::new([
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
@ -61,7 +63,7 @@ fn empty_children<H: Hasher>() -> Box<[Option<NodeHandle<H>>; 16]> {
/// Node types in the Trie.
#[derive(Debug)]
enum Node<H: Hasher> {
enum Node<H> {
/// Empty node.
Empty,
/// A leaf node contains the end of a key and a value.
@ -77,22 +79,24 @@ enum Node<H: Hasher> {
Branch(Box<[Option<NodeHandle<H>>; 16]>, Option<DBValue>)
}
impl<H: Hasher> Node<H> {
impl<O> Node<O> where O: AsRef<[u8]> + AsMut<[u8]> + Default + HeapSizeOf + Debug + PartialEq + Eq + Hash + Send + Sync + Clone + Copy {
// load an inline node into memory or get the hash to do the lookup later.
fn inline_or_hash<C>(node: &[u8], db: &HashDB<H>, storage: &mut NodeStorage<H>) -> NodeHandle<H>
where C: NodeCodec<H>
fn inline_or_hash<C, H>(node: &[u8], db: &HashDB<H>, storage: &mut NodeStorage<H::Out>) -> NodeHandle<H::Out>
where C: NodeCodec<H>,
H: Hasher<Out = O>,
{
C::try_decode_hash(&node)
.map(NodeHandle::Hash)
.unwrap_or_else(|| {
let child = Node::from_encoded::<C>(node, db, storage);
let child = Node::from_encoded::<C, H>(node, db, storage);
NodeHandle::InMemory(storage.alloc(Stored::New(child)))
})
}
// decode a node from encoded bytes without getting its children.
fn from_encoded<C>(data: &[u8], db: &HashDB<H>, storage: &mut NodeStorage<H>) -> Self
where C: NodeCodec<H>
fn from_encoded<C, H>(data: &[u8], db: &HashDB<H>, storage: &mut NodeStorage<H::Out>) -> Self
where C: NodeCodec<H>,
H: Hasher<Out = O>,
{
match C::decode(data).expect("encoded bytes read from db; qed") {
EncodedNode::Empty => Node::Empty,
@ -100,13 +104,13 @@ impl<H: Hasher> Node<H> {
EncodedNode::Extension(key, cb) => {
Node::Extension(
key.encoded(false),
Self::inline_or_hash::<C>(cb, db, storage))
Self::inline_or_hash::<C, H>(cb, db, storage))
}
EncodedNode::Branch(ref encoded_children, val) => {
let mut child = |i:usize| {
let raw = encoded_children[i];
if !C::is_empty_node(raw) {
Some(Self::inline_or_hash::<C>(raw, db, storage))
Some(Self::inline_or_hash::<C, H>(raw, db, storage))
} else {
None
}
@ -125,10 +129,11 @@ impl<H: Hasher> Node<H> {
}
// TODO: parallelize
fn into_encoded<F, C>(self, mut child_cb: F) -> ElasticArray1024<u8>
fn into_encoded<F, C, H>(self, mut child_cb: F) -> ElasticArray1024<u8>
where
C: NodeCodec<H>,
F: FnMut(NodeHandle<H>) -> ChildReference<H::Out>
F: FnMut(NodeHandle<H::Out>) -> ChildReference<H::Out>,
H: Hasher<Out = O>,
{
match self {
Node::Empty => C::empty_node(),
@ -150,7 +155,7 @@ impl<H: Hasher> Node<H> {
}
// post-inspect action.
enum Action<H: Hasher> {
enum Action<H> {
// Replace a node with a new one.
Replace(Node<H>),
// Restore the original node. This trusts that the node is actually the original.
@ -160,14 +165,14 @@ enum Action<H: Hasher> {
}
// post-insert action. Same as action without delete
enum InsertAction<H: Hasher> {
enum InsertAction<H> {
// Replace a node with a new one.
Replace(Node<H>),
// Restore the original node.
Restore(Node<H>),
}
impl<H: Hasher> InsertAction<H> {
impl<H> InsertAction<H> {
fn into_action(self) -> Action<H> {
match self {
InsertAction::Replace(n) => Action::Replace(n),
@ -184,11 +189,11 @@ impl<H: Hasher> InsertAction<H> {
}
// What kind of node is stored here.
enum Stored<H: Hasher> {
enum Stored<H> {
// A new node.
New(Node<H>),
// A cached node, loaded from the DB.
Cached(Node<H>, H::Out),
Cached(Node<H>, H),
}
/// Used to build a collection of child nodes from a collection of `NodeHandle`s
@ -198,12 +203,12 @@ pub enum ChildReference<HO> { // `HO` is e.g. `H256`, i.e. the output of a `Hash
}
/// Compact and cache-friendly storage for Trie nodes.
struct NodeStorage<H: Hasher> {
struct NodeStorage<H> {
nodes: Vec<Stored<H>>,
free_indices: VecDeque<usize>,
}
impl<H: Hasher> NodeStorage<H> {
impl<H> NodeStorage<H> {
/// Create a new storage.
fn empty() -> Self {
NodeStorage {
@ -232,7 +237,7 @@ impl<H: Hasher> NodeStorage<H> {
}
}
impl<'a, H: Hasher> Index<&'a StorageHandle> for NodeStorage<H> {
impl<'a, H> Index<&'a StorageHandle> for NodeStorage<H> {
type Output = Node<H>;
fn index(&self, handle: &'a StorageHandle) -> &Node<H> {
@ -284,10 +289,10 @@ where
H: Hasher + 'a,
C: NodeCodec<H>
{
storage: NodeStorage<H>,
storage: NodeStorage<H::Out>,
db: &'a mut HashDB<H>,
root: &'a mut H::Out,
root_handle: NodeHandle<H>,
root_handle: NodeHandle<H::Out>,
death_row: HashSet<H::Out>,
/// The number of hash operations this trie has performed.
/// Note that none are performed until changes are committed.
@ -347,7 +352,7 @@ where
// cache a node by hash
fn cache(&mut self, hash: H::Out) -> Result<StorageHandle, H::Out, C::Error> {
let node_encoded = self.db.get(&hash).ok_or_else(|| Box::new(TrieError::IncompleteDatabase(hash)))?;
let node = Node::from_encoded::<C>(
let node = Node::from_encoded::<C, H>(
&node_encoded,
&*self.db,
&mut self.storage
@ -357,8 +362,8 @@ where
// inspect a node, choosing either to replace, restore, or delete it.
// if restored or replaced, returns the new node along with a flag of whether it was changed.
fn inspect<F>(&mut self, stored: Stored<H>, inspector: F) -> Result<Option<(Stored<H>, bool)>, H::Out, C::Error>
where F: FnOnce(&mut Self, Node<H>) -> Result<Action<H>, H::Out, C::Error> {
fn inspect<F>(&mut self, stored: Stored<H::Out>, inspector: F) -> Result<Option<(Stored<H::Out>, bool)>, H::Out, C::Error>
where F: FnOnce(&mut Self, Node<H::Out>) -> Result<Action<H::Out>, H::Out, C::Error> {
Ok(match stored {
Stored::New(node) => match inspector(self, node)? {
Action::Restore(node) => Some((Stored::New(node), false)),
@ -380,7 +385,7 @@ where
}
// walk the trie, attempting to find the key's node.
fn lookup<'x, 'key>(&'x self, mut partial: NibbleSlice<'key>, handle: &NodeHandle<H>) -> Result<Option<DBValue>, H::Out, C::Error>
fn lookup<'x, 'key>(&'x self, mut partial: NibbleSlice<'key>, handle: &NodeHandle<H::Out>) -> Result<Option<DBValue>, H::Out, C::Error>
where 'x: 'key
{
let mut handle = handle;
@ -429,7 +434,7 @@ where
}
/// insert a key-value pair into the trie, creating new nodes if necessary.
fn insert_at(&mut self, handle: NodeHandle<H>, partial: NibbleSlice, value: DBValue, old_val: &mut Option<DBValue>) -> Result<(StorageHandle, bool), H::Out, C::Error> {
fn insert_at(&mut self, handle: NodeHandle<H::Out>, partial: NibbleSlice, value: DBValue, old_val: &mut Option<DBValue>) -> Result<(StorageHandle, bool), H::Out, C::Error> {
let h = match handle {
NodeHandle::InMemory(h) => h,
NodeHandle::Hash(h) => self.cache(h)?,
@ -443,7 +448,7 @@ where
}
/// the insertion inspector.
fn insert_inspector(&mut self, node: Node<H>, partial: NibbleSlice, value: DBValue, old_val: &mut Option<DBValue>) -> Result<InsertAction<H>, H::Out, C::Error> {
fn insert_inspector(&mut self, node: Node<H::Out>, partial: NibbleSlice, value: DBValue, old_val: &mut Option<DBValue>) -> Result<InsertAction<H::Out>, H::Out, C::Error> {
trace!(target: "trie", "augmented (partial: {:?}, value: {:?})", partial, value.pretty());
Ok(match node {
@ -604,7 +609,7 @@ where
}
/// Remove a node from the trie based on key.
fn remove_at(&mut self, handle: NodeHandle<H>, partial: NibbleSlice, old_val: &mut Option<DBValue>) -> Result<Option<(StorageHandle, bool)>, H::Out, C::Error> {
fn remove_at(&mut self, handle: NodeHandle<H::Out>, partial: NibbleSlice, old_val: &mut Option<DBValue>) -> Result<Option<(StorageHandle, bool)>, H::Out, C::Error> {
let stored = match handle {
NodeHandle::InMemory(h) => self.storage.destroy(h),
NodeHandle::Hash(h) => {
@ -619,7 +624,7 @@ where
}
/// the removal inspector
fn remove_inspector(&mut self, node: Node<H>, partial: NibbleSlice, old_val: &mut Option<DBValue>) -> Result<Action<H>, H::Out, C::Error> {
fn remove_inspector(&mut self, node: Node<H::Out>, partial: NibbleSlice, old_val: &mut Option<DBValue>) -> Result<Action<H::Out>, H::Out, C::Error> {
Ok(match (node, partial.is_empty()) {
(Node::Empty, _) => Action::Delete,
(Node::Branch(c, None), true) => Action::Restore(Node::Branch(c, None)),
@ -705,7 +710,7 @@ where
/// _invalid state_ means:
/// - Branch node where there is only a single entry;
/// - Extension node followed by anything other than a Branch node.
fn fix(&mut self, node: Node<H>) -> Result<Node<H>, H::Out, C::Error> {
fn fix(&mut self, node: Node<H::Out>) -> Result<Node<H::Out>, H::Out, C::Error> {
match node {
Node::Branch(mut children, value) => {
// if only a single value, transmute to leaf/extension and feed through fixed.
@ -825,7 +830,7 @@ where
match self.storage.destroy(handle) {
Stored::New(node) => {
let encoded_root = node.into_encoded::<_, C>(|child| self.commit_child(child) );
let encoded_root = node.into_encoded::<_, C, H>(|child| self.commit_child(child) );
*self.root = self.db.insert(&encoded_root[..]);
self.hash_count += 1;
@ -845,14 +850,14 @@ where
/// case where we can fit the actual data in the `Hasher`s output type, we
/// store the data inline. This function is used as the callback to the
/// `into_encoded` method of `Node`.
fn commit_child(&mut self, handle: NodeHandle<H>) -> ChildReference<H::Out> {
fn commit_child(&mut self, handle: NodeHandle<H::Out>) -> ChildReference<H::Out> {
match handle {
NodeHandle::Hash(hash) => ChildReference::Hash(hash),
NodeHandle::InMemory(storage_handle) => {
match self.storage.destroy(storage_handle) {
Stored::Cached(_, hash) => ChildReference::Hash(hash),
Stored::New(node) => {
let encoded = node.into_encoded::<_, C>(|node_handle| self.commit_child(node_handle) );
let encoded = node.into_encoded::<_, C, H>(|node_handle| self.commit_child(node_handle) );
if encoded.len() >= H::LENGTH {
let hash = self.db.insert(&encoded[..]);
self.hash_count +=1;
@ -871,7 +876,7 @@ where
}
// a hack to get the root node's handle
fn root_handle(&self) -> NodeHandle<H> {
fn root_handle(&self) -> NodeHandle<H::Out> {
match self.root_handle {
NodeHandle::Hash(h) => NodeHandle::Hash(h),
NodeHandle::InMemory(StorageHandle(x)) => NodeHandle::InMemory(StorageHandle(x)),