Merge branch 'master' into client-api-refact2
This commit is contained in:
commit
43027b36ea
@ -33,7 +33,7 @@ env:
|
|||||||
global:
|
global:
|
||||||
# GH_TOKEN
|
# GH_TOKEN
|
||||||
- secure: bumJASbZSU8bxJ0EyPUJmu16AiV9EXOpyOj86Jlq/Ty9CfwGqsSXt96uDyE+OUJf34RUFQMsw0nk37/zC4lcn6kqk2wpuH3N/o85Zo/cVZY/NusBWLQqtT5VbYWsV+u2Ua4Tmmsw8yVYQhYwU2ZOejNpflL+Cs9XGgORp1L+/gMRMC2y5Se6ZhwnKPQlRJ8LGsG1dzjQULxzADIt3/zuspNBS8a2urJwlHfGMkvHDoUWCviP/GXoSqw3TZR7FmKyxE19I8n9+iSvm9+oZZquvcgfUxMHn8Gq/b44UbPvjtFOg2yam4xdWXF/RyWCHdc/R9EHorSABeCbefIsm+zcUF3/YQxwpSxM4IZEeH2rTiC7dcrsKw3XsO16xFQz5YI5Bay+CT/wTdMmJd7DdYz7Dyf+pOvcM9WOf/zorxYWSBOMYy0uzbusU2iyIghQ82s7E/Ahg+WARtPgkuTLSB5aL1oCTBKHqQscMr7lo5Ti6RpWLxEdTQMBznc+bMr+6dEtkEcG9zqc6cE9XX+ox3wTU6+HVMfQ1ltCntJ4UKcw3A6INEbw9wgocQa812CIASQ2fE+SCAbz6JxBjIAlFUnD1lUB7S8PdMPwn9plfQgKQ2A5YZqg6FnBdf0rQXIJYxQWKHXj/rBHSUCT0tHACDlzTA+EwWggvkP5AGIxRxm8jhw=
|
- secure: bumJASbZSU8bxJ0EyPUJmu16AiV9EXOpyOj86Jlq/Ty9CfwGqsSXt96uDyE+OUJf34RUFQMsw0nk37/zC4lcn6kqk2wpuH3N/o85Zo/cVZY/NusBWLQqtT5VbYWsV+u2Ua4Tmmsw8yVYQhYwU2ZOejNpflL+Cs9XGgORp1L+/gMRMC2y5Se6ZhwnKPQlRJ8LGsG1dzjQULxzADIt3/zuspNBS8a2urJwlHfGMkvHDoUWCviP/GXoSqw3TZR7FmKyxE19I8n9+iSvm9+oZZquvcgfUxMHn8Gq/b44UbPvjtFOg2yam4xdWXF/RyWCHdc/R9EHorSABeCbefIsm+zcUF3/YQxwpSxM4IZEeH2rTiC7dcrsKw3XsO16xFQz5YI5Bay+CT/wTdMmJd7DdYz7Dyf+pOvcM9WOf/zorxYWSBOMYy0uzbusU2iyIghQ82s7E/Ahg+WARtPgkuTLSB5aL1oCTBKHqQscMr7lo5Ti6RpWLxEdTQMBznc+bMr+6dEtkEcG9zqc6cE9XX+ox3wTU6+HVMfQ1ltCntJ4UKcw3A6INEbw9wgocQa812CIASQ2fE+SCAbz6JxBjIAlFUnD1lUB7S8PdMPwn9plfQgKQ2A5YZqg6FnBdf0rQXIJYxQWKHXj/rBHSUCT0tHACDlzTA+EwWggvkP5AGIxRxm8jhw=
|
||||||
- TARGETS="-p ethkey -p ethstore -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethjson -p ethcore-dapps -p ethcore-signer"
|
- TARGETS="-p ethkey -p ethstore -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethjson -p ethcore-dapps -p ethcore-signer -p bigint"
|
||||||
- ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}"
|
- ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}"
|
||||||
- KCOV_FEATURES=""
|
- KCOV_FEATURES=""
|
||||||
- KCOV_CMD="./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests,ethstore/tests target/kcov"
|
- KCOV_CMD="./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests,ethstore/tests target/kcov"
|
||||||
|
@ -214,8 +214,8 @@ impl Account {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Commit the `storage_overlay` to the backing DB and update `storage_root`.
|
/// Commit the `storage_overlay` to the backing DB and update `storage_root`.
|
||||||
pub fn commit_storage(&mut self, db: &mut AccountDBMut) {
|
pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut AccountDBMut) {
|
||||||
let mut t = SecTrieDBMut::from_existing(db, &mut self.storage_root)
|
let mut t = trie_factory.from_existing(db, &mut self.storage_root)
|
||||||
.expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \
|
.expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \
|
||||||
SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \
|
SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \
|
||||||
using it will not fail.");
|
using it will not fail.");
|
||||||
@ -275,7 +275,7 @@ mod tests {
|
|||||||
let rlp = {
|
let rlp = {
|
||||||
let mut a = Account::new_contract(69.into(), 0.into());
|
let mut a = Account::new_contract(69.into(), 0.into());
|
||||||
a.set_storage(H256::from(&U256::from(0x00u64)), H256::from(&U256::from(0x1234u64)));
|
a.set_storage(H256::from(&U256::from(0x00u64)), H256::from(&U256::from(0x1234u64)));
|
||||||
a.commit_storage(&mut db);
|
a.commit_storage(&Default::default(), &mut db);
|
||||||
a.init_code(vec![]);
|
a.init_code(vec![]);
|
||||||
a.commit_code(&mut db);
|
a.commit_code(&mut db);
|
||||||
a.rlp()
|
a.rlp()
|
||||||
@ -313,7 +313,7 @@ mod tests {
|
|||||||
let mut db = AccountDBMut::new(&mut db, &Address::new());
|
let mut db = AccountDBMut::new(&mut db, &Address::new());
|
||||||
a.set_storage(0.into(), 0x1234.into());
|
a.set_storage(0.into(), 0x1234.into());
|
||||||
assert_eq!(a.storage_root(), None);
|
assert_eq!(a.storage_root(), None);
|
||||||
a.commit_storage(&mut db);
|
a.commit_storage(&Default::default(), &mut db);
|
||||||
assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2");
|
assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -323,11 +323,11 @@ mod tests {
|
|||||||
let mut db = MemoryDB::new();
|
let mut db = MemoryDB::new();
|
||||||
let mut db = AccountDBMut::new(&mut db, &Address::new());
|
let mut db = AccountDBMut::new(&mut db, &Address::new());
|
||||||
a.set_storage(0.into(), 0x1234.into());
|
a.set_storage(0.into(), 0x1234.into());
|
||||||
a.commit_storage(&mut db);
|
a.commit_storage(&Default::default(), &mut db);
|
||||||
a.set_storage(1.into(), 0x1234.into());
|
a.set_storage(1.into(), 0x1234.into());
|
||||||
a.commit_storage(&mut db);
|
a.commit_storage(&Default::default(), &mut db);
|
||||||
a.set_storage(1.into(), 0.into());
|
a.set_storage(1.into(), 0.into());
|
||||||
a.commit_storage(&mut db);
|
a.commit_storage(&Default::default(), &mut db);
|
||||||
assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2");
|
assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,7 +254,7 @@ mod tests {
|
|||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let last_hashes = vec![genesis_header.hash()];
|
let last_hashes = vec![genesis_header.hash()];
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, None, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, None, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let b = b.close_and_lock();
|
let b = b.close_and_lock();
|
||||||
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
|
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
|
||||||
assert!(b.try_seal(engine.deref(), seal).is_ok());
|
assert!(b.try_seal(engine.deref(), seal).is_ok());
|
||||||
|
@ -222,6 +222,7 @@ impl<'x> OpenBlock<'x> {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
engine: &'x Engine,
|
engine: &'x Engine,
|
||||||
vm_factory: &'x EvmFactory,
|
vm_factory: &'x EvmFactory,
|
||||||
|
trie_factory: TrieFactory,
|
||||||
tracing: bool,
|
tracing: bool,
|
||||||
db: Box<JournalDB>,
|
db: Box<JournalDB>,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
@ -231,7 +232,7 @@ impl<'x> OpenBlock<'x> {
|
|||||||
gas_range_target: (U256, U256),
|
gas_range_target: (U256, U256),
|
||||||
extra_data: Bytes,
|
extra_data: Bytes,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let state = try!(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce()));
|
let state = try!(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce(), trie_factory));
|
||||||
let mut r = OpenBlock {
|
let mut r = OpenBlock {
|
||||||
block: ExecutedBlock::new(state, tracing),
|
block: ExecutedBlock::new(state, tracing),
|
||||||
engine: engine,
|
engine: engine,
|
||||||
@ -481,16 +482,17 @@ pub fn enact(
|
|||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
dao_rescue_block_gas_limit: Option<U256>,
|
||||||
vm_factory: &EvmFactory
|
vm_factory: &EvmFactory,
|
||||||
|
trie_factory: TrieFactory,
|
||||||
) -> Result<LockedBlock, Error> {
|
) -> Result<LockedBlock, Error> {
|
||||||
{
|
{
|
||||||
if ::log::max_log_level() >= ::log::LogLevel::Trace {
|
if ::log::max_log_level() >= ::log::LogLevel::Trace {
|
||||||
let s = try!(State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce()));
|
let s = try!(State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce(), trie_factory.clone()));
|
||||||
trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author()));
|
trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut b = try!(OpenBlock::new(engine, vm_factory, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, header.author().clone(), (3141562.into(), 31415620.into()), header.extra_data().clone()));
|
let mut b = try!(OpenBlock::new(engine, vm_factory, trie_factory, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, header.author().clone(), (3141562.into(), 31415620.into()), header.extra_data().clone()));
|
||||||
b.set_difficulty(*header.difficulty());
|
b.set_difficulty(*header.difficulty());
|
||||||
b.set_gas_limit(*header.gas_limit());
|
b.set_gas_limit(*header.gas_limit());
|
||||||
b.set_timestamp(header.timestamp());
|
b.set_timestamp(header.timestamp());
|
||||||
@ -509,11 +511,12 @@ pub fn enact_bytes(
|
|||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
dao_rescue_block_gas_limit: Option<U256>,
|
||||||
vm_factory: &EvmFactory
|
vm_factory: &EvmFactory,
|
||||||
|
trie_factory: TrieFactory,
|
||||||
) -> Result<LockedBlock, Error> {
|
) -> Result<LockedBlock, Error> {
|
||||||
let block = BlockView::new(block_bytes);
|
let block = BlockView::new(block_bytes);
|
||||||
let header = block.header();
|
let header = block.header();
|
||||||
enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory)
|
enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory, trie_factory)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
|
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
|
||||||
@ -526,10 +529,11 @@ pub fn enact_verified(
|
|||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
dao_rescue_block_gas_limit: Option<U256>,
|
||||||
vm_factory: &EvmFactory
|
vm_factory: &EvmFactory,
|
||||||
|
trie_factory: TrieFactory,
|
||||||
) -> Result<LockedBlock, Error> {
|
) -> Result<LockedBlock, Error> {
|
||||||
let view = BlockView::new(&block.bytes);
|
let view = BlockView::new(&block.bytes);
|
||||||
enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory)
|
enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory, trie_factory)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
|
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
|
||||||
@ -542,10 +546,11 @@ pub fn enact_and_seal(
|
|||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
dao_rescue_block_gas_limit: Option<U256>,
|
||||||
vm_factory: &EvmFactory
|
vm_factory: &EvmFactory,
|
||||||
|
trie_factory: TrieFactory,
|
||||||
) -> Result<SealedBlock, Error> {
|
) -> Result<SealedBlock, Error> {
|
||||||
let header = BlockView::new(block_bytes).header_view();
|
let header = BlockView::new(block_bytes).header_view();
|
||||||
Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory)).seal(engine, header.seal())))
|
Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory, trie_factory)).seal(engine, header.seal())))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -565,7 +570,7 @@ mod tests {
|
|||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let last_hashes = vec![genesis_header.hash()];
|
let last_hashes = vec![genesis_header.hash()];
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let b = b.close_and_lock();
|
let b = b.close_and_lock();
|
||||||
let _ = b.seal(engine.deref(), vec![]);
|
let _ = b.seal(engine.deref(), vec![]);
|
||||||
}
|
}
|
||||||
@ -581,7 +586,7 @@ mod tests {
|
|||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap()
|
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, vec![genesis_header.hash()], None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap()
|
||||||
.close_and_lock().seal(engine.deref(), vec![]).unwrap();
|
.close_and_lock().seal(engine.deref(), vec![]).unwrap();
|
||||||
let orig_bytes = b.rlp_bytes();
|
let orig_bytes = b.rlp_bytes();
|
||||||
let orig_db = b.drain();
|
let orig_db = b.drain();
|
||||||
@ -589,7 +594,7 @@ mod tests {
|
|||||||
let mut db_result = get_temp_journal_db();
|
let mut db_result = get_temp_journal_db();
|
||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], None, &Default::default()).unwrap();
|
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], None, &Default::default(), Default::default()).unwrap();
|
||||||
|
|
||||||
assert_eq!(e.rlp_bytes(), orig_bytes);
|
assert_eq!(e.rlp_bytes(), orig_bytes);
|
||||||
|
|
||||||
@ -609,7 +614,7 @@ mod tests {
|
|||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, vec![genesis_header.hash()], None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, vec![genesis_header.hash()], None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let mut uncle1_header = Header::new();
|
let mut uncle1_header = Header::new();
|
||||||
uncle1_header.extra_data = b"uncle1".to_vec();
|
uncle1_header.extra_data = b"uncle1".to_vec();
|
||||||
let mut uncle2_header = Header::new();
|
let mut uncle2_header = Header::new();
|
||||||
@ -624,7 +629,7 @@ mod tests {
|
|||||||
let mut db_result = get_temp_journal_db();
|
let mut db_result = get_temp_journal_db();
|
||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], None, &Default::default()).unwrap();
|
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], None, &Default::default(), Default::default()).unwrap();
|
||||||
|
|
||||||
let bytes = e.rlp_bytes();
|
let bytes = e.rlp_bytes();
|
||||||
assert_eq!(bytes, orig_bytes);
|
assert_eq!(bytes, orig_bytes);
|
||||||
|
@ -96,6 +96,7 @@ pub struct Client {
|
|||||||
panic_handler: Arc<PanicHandler>,
|
panic_handler: Arc<PanicHandler>,
|
||||||
verifier: Box<Verifier>,
|
verifier: Box<Verifier>,
|
||||||
vm_factory: Arc<EvmFactory>,
|
vm_factory: Arc<EvmFactory>,
|
||||||
|
trie_factory: TrieFactory,
|
||||||
miner: Arc<Miner>,
|
miner: Arc<Miner>,
|
||||||
io_channel: IoChannel<NetSyncMessage>,
|
io_channel: IoChannel<NetSyncMessage>,
|
||||||
queue_transactions: AtomicUsize,
|
queue_transactions: AtomicUsize,
|
||||||
@ -177,6 +178,7 @@ impl Client {
|
|||||||
panic_handler: panic_handler,
|
panic_handler: panic_handler,
|
||||||
verifier: verification::new(config.verifier_type),
|
verifier: verification::new(config.verifier_type),
|
||||||
vm_factory: Arc::new(EvmFactory::new(config.vm_type)),
|
vm_factory: Arc::new(EvmFactory::new(config.vm_type)),
|
||||||
|
trie_factory: TrieFactory::new(config.trie_spec),
|
||||||
miner: miner,
|
miner: miner,
|
||||||
io_channel: message_channel,
|
io_channel: message_channel,
|
||||||
queue_transactions: AtomicUsize::new(0),
|
queue_transactions: AtomicUsize::new(0),
|
||||||
@ -235,7 +237,7 @@ impl Client {
|
|||||||
let last_hashes = self.build_last_hashes(header.parent_hash.clone());
|
let last_hashes = self.build_last_hashes(header.parent_hash.clone());
|
||||||
let db = self.state_db.lock().unwrap().boxed_clone();
|
let db = self.state_db.lock().unwrap().boxed_clone();
|
||||||
|
|
||||||
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.dao_rescue_block_gas_limit(header.parent_hash.clone()), &self.vm_factory);
|
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.dao_rescue_block_gas_limit(header.parent_hash.clone()), &self.vm_factory, self.trie_factory.clone());
|
||||||
if let Err(e) = enact_result {
|
if let Err(e) = enact_result {
|
||||||
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
||||||
return Err(());
|
return Err(());
|
||||||
@ -350,7 +352,7 @@ impl Client {
|
|||||||
imported
|
imported
|
||||||
}
|
}
|
||||||
|
|
||||||
fn commit_block<B>(&self, block: B, hash: &H256, block_data: &Bytes) -> ImportRoute where B: IsBlock + Drain {
|
fn commit_block<B>(&self, block: B, hash: &H256, block_data: &[u8]) -> ImportRoute where B: IsBlock + Drain {
|
||||||
let number = block.header().number();
|
let number = block.header().number();
|
||||||
// Are we committing an era?
|
// Are we committing an era?
|
||||||
let ancient = if number >= HISTORY {
|
let ancient = if number >= HISTORY {
|
||||||
@ -420,13 +422,17 @@ impl Client {
|
|||||||
|
|
||||||
let root = HeaderView::new(&header).state_root();
|
let root = HeaderView::new(&header).state_root();
|
||||||
|
|
||||||
State::from_existing(db, root, self.engine.account_start_nonce()).ok()
|
State::from_existing(db, root, self.engine.account_start_nonce(), self.trie_factory.clone()).ok()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get a copy of the best block's state.
|
/// Get a copy of the best block's state.
|
||||||
pub fn state(&self) -> State {
|
pub fn state(&self) -> State {
|
||||||
State::from_existing(self.state_db.lock().unwrap().boxed_clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce())
|
State::from_existing(
|
||||||
|
self.state_db.lock().unwrap().boxed_clone(),
|
||||||
|
HeaderView::new(&self.best_block_header()).state_root(),
|
||||||
|
self.engine.account_start_nonce(),
|
||||||
|
self.trie_factory.clone())
|
||||||
.expect("State root of best block header always valid.")
|
.expect("State root of best block header always valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -815,6 +821,7 @@ impl MiningBlockChainClient for Client {
|
|||||||
let mut open_block = OpenBlock::new(
|
let mut open_block = OpenBlock::new(
|
||||||
engine,
|
engine,
|
||||||
&self.vm_factory,
|
&self.vm_factory,
|
||||||
|
self.trie_factory.clone(),
|
||||||
false, // TODO: this will need to be parameterised once we want to do immediate mining insertion.
|
false, // TODO: this will need to be parameterised once we want to do immediate mining insertion.
|
||||||
self.state_db.lock().unwrap().boxed_clone(),
|
self.state_db.lock().unwrap().boxed_clone(),
|
||||||
&self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"),
|
&self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"),
|
||||||
|
@ -20,6 +20,7 @@ pub use trace::{Config as TraceConfig, Switch};
|
|||||||
pub use evm::VMType;
|
pub use evm::VMType;
|
||||||
pub use verification::VerifierType;
|
pub use verification::VerifierType;
|
||||||
use util::journaldb;
|
use util::journaldb;
|
||||||
|
use util::trie::TrieSpec;
|
||||||
|
|
||||||
/// Client state db compaction profile
|
/// Client state db compaction profile
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
@ -45,6 +46,8 @@ pub struct ClientConfig {
|
|||||||
pub tracing: TraceConfig,
|
pub tracing: TraceConfig,
|
||||||
/// VM type.
|
/// VM type.
|
||||||
pub vm_type: VMType,
|
pub vm_type: VMType,
|
||||||
|
/// Trie type.
|
||||||
|
pub trie_spec: TrieSpec,
|
||||||
/// The JournalDB ("pruning") algorithm to use.
|
/// The JournalDB ("pruning") algorithm to use.
|
||||||
pub pruning: journaldb::Algorithm,
|
pub pruning: journaldb::Algorithm,
|
||||||
/// The name of the client instance.
|
/// The name of the client instance.
|
||||||
|
@ -325,7 +325,7 @@ mod tests {
|
|||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let last_hashes = vec![genesis_header.hash()];
|
let last_hashes = vec![genesis_header.hash()];
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let b = b.close();
|
let b = b.close();
|
||||||
assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap());
|
assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap());
|
||||||
}
|
}
|
||||||
@ -340,7 +340,7 @@ mod tests {
|
|||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let last_hashes = vec![genesis_header.hash()];
|
let last_hashes = vec![genesis_header.hash()];
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let mut b = OpenBlock::new(engine.deref(), &vm_factory, false, db, &genesis_header, last_hashes, None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let mut b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let mut uncle = Header::new();
|
let mut uncle = Header::new();
|
||||||
let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106");
|
let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106");
|
||||||
uncle.author = uncle_author.clone();
|
uncle.author = uncle_author.clone();
|
||||||
|
@ -67,7 +67,7 @@ mod tests {
|
|||||||
let mut db_result = get_temp_journal_db();
|
let mut db_result = get_temp_journal_db();
|
||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let s = State::from_existing(db, genesis_header.state_root.clone(), engine.account_start_nonce()).unwrap();
|
let s = State::from_existing(db, genesis_header.state_root.clone(), engine.account_start_nonce(), Default::default()).unwrap();
|
||||||
assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64));
|
assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64));
|
||||||
assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000002")), U256::from(1u64));
|
assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000002")), U256::from(1u64));
|
||||||
assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000003")), U256::from(1u64));
|
assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000003")), U256::from(1u64));
|
||||||
|
@ -267,6 +267,10 @@ impl Miner {
|
|||||||
let difficulty = *block.block().fields().header.difficulty();
|
let difficulty = *block.block().fields().header.difficulty();
|
||||||
let is_new = original_work_hash.map_or(true, |h| block.block().fields().header.hash() != h);
|
let is_new = original_work_hash.map_or(true, |h| block.block().fields().header.hash() != h);
|
||||||
sealing_work.push(block);
|
sealing_work.push(block);
|
||||||
|
// If push notifications are enabled we assume all work items are used.
|
||||||
|
if self.work_poster.is_some() && is_new {
|
||||||
|
sealing_work.use_last_ref();
|
||||||
|
}
|
||||||
(Some((pow_hash, difficulty, number)), is_new)
|
(Some((pow_hash, difficulty, number)), is_new)
|
||||||
} else {
|
} else {
|
||||||
(None, false)
|
(None, false)
|
||||||
|
@ -52,10 +52,10 @@ impl WorkPoster {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn create_client() -> Client<PostHandler> {
|
fn create_client() -> Client<PostHandler> {
|
||||||
let client = Client::<PostHandler>::configure()
|
Client::<PostHandler>::configure()
|
||||||
.keep_alive(true)
|
.keep_alive(true)
|
||||||
.build().expect("Error creating HTTP client") as Client<PostHandler>;
|
.build()
|
||||||
client
|
.expect("Error creating HTTP client")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn notify(&self, pow_hash: H256, difficulty: U256, number: u64) {
|
pub fn notify(&self, pow_hash: H256, difficulty: U256, number: u64) {
|
||||||
@ -63,8 +63,10 @@ impl WorkPoster {
|
|||||||
let target = Ethash::difficulty_to_boundary(&difficulty);
|
let target = Ethash::difficulty_to_boundary(&difficulty);
|
||||||
let seed_hash = &self.seed_compute.lock().unwrap().get_seedhash(number);
|
let seed_hash = &self.seed_compute.lock().unwrap().get_seedhash(number);
|
||||||
let seed_hash = H256::from_slice(&seed_hash[..]);
|
let seed_hash = H256::from_slice(&seed_hash[..]);
|
||||||
let body = format!(r#"{{ "result": ["0x{}","0x{}","0x{}","0x{:x}"] }}"#,
|
let body = format!(
|
||||||
pow_hash.hex(), seed_hash.hex(), target.hex(), number);
|
r#"{{ "result": ["0x{}","0x{}","0x{}","0x{:x}"] }}"#,
|
||||||
|
pow_hash.hex(), seed_hash.hex(), target.hex(), number
|
||||||
|
);
|
||||||
let mut client = self.client.lock().unwrap();
|
let mut client = self.client.lock().unwrap();
|
||||||
for u in &self.urls {
|
for u in &self.urls {
|
||||||
if let Err(e) = client.request(u.clone(), PostHandler { body: body.clone() }) {
|
if let Err(e) = client.request(u.clone(), PostHandler { body: body.clone() }) {
|
||||||
|
@ -42,6 +42,7 @@ pub struct State {
|
|||||||
cache: RefCell<HashMap<Address, Option<Account>>>,
|
cache: RefCell<HashMap<Address, Option<Account>>>,
|
||||||
snapshots: RefCell<Vec<HashMap<Address, Option<Option<Account>>>>>,
|
snapshots: RefCell<Vec<HashMap<Address, Option<Option<Account>>>>>,
|
||||||
account_start_nonce: U256,
|
account_start_nonce: U256,
|
||||||
|
trie_factory: TrieFactory,
|
||||||
}
|
}
|
||||||
|
|
||||||
const SEC_TRIE_DB_UNWRAP_STR: &'static str = "A state can only be created with valid root. Creating a SecTrieDB with a valid root will not fail. \
|
const SEC_TRIE_DB_UNWRAP_STR: &'static str = "A state can only be created with valid root. Creating a SecTrieDB with a valid root will not fail. \
|
||||||
@ -50,11 +51,11 @@ const SEC_TRIE_DB_UNWRAP_STR: &'static str = "A state can only be created with v
|
|||||||
impl State {
|
impl State {
|
||||||
/// Creates new state with empty state root
|
/// Creates new state with empty state root
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn new(mut db: Box<JournalDB>, account_start_nonce: U256) -> State {
|
pub fn new(mut db: Box<JournalDB>, account_start_nonce: U256, trie_factory: TrieFactory) -> State {
|
||||||
let mut root = H256::new();
|
let mut root = H256::new();
|
||||||
{
|
{
|
||||||
// init trie and reset root too null
|
// init trie and reset root too null
|
||||||
let _ = SecTrieDBMut::new(db.as_hashdb_mut(), &mut root);
|
let _ = trie_factory.create(db.as_hashdb_mut(), &mut root);
|
||||||
}
|
}
|
||||||
|
|
||||||
State {
|
State {
|
||||||
@ -63,22 +64,26 @@ impl State {
|
|||||||
cache: RefCell::new(HashMap::new()),
|
cache: RefCell::new(HashMap::new()),
|
||||||
snapshots: RefCell::new(Vec::new()),
|
snapshots: RefCell::new(Vec::new()),
|
||||||
account_start_nonce: account_start_nonce,
|
account_start_nonce: account_start_nonce,
|
||||||
|
trie_factory: trie_factory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates new state with existing state root
|
/// Creates new state with existing state root
|
||||||
pub fn from_existing(db: Box<JournalDB>, root: H256, account_start_nonce: U256) -> Result<State, TrieError> {
|
pub fn from_existing(db: Box<JournalDB>, root: H256, account_start_nonce: U256, trie_factory: TrieFactory) -> Result<State, TrieError> {
|
||||||
if !db.as_hashdb().contains(&root) {
|
if !db.as_hashdb().contains(&root) {
|
||||||
Err(TrieError::InvalidStateRoot)
|
return Err(TrieError::InvalidStateRoot);
|
||||||
} else {
|
}
|
||||||
Ok(State {
|
|
||||||
|
let state = State {
|
||||||
db: db,
|
db: db,
|
||||||
root: root,
|
root: root,
|
||||||
cache: RefCell::new(HashMap::new()),
|
cache: RefCell::new(HashMap::new()),
|
||||||
snapshots: RefCell::new(Vec::new()),
|
snapshots: RefCell::new(Vec::new()),
|
||||||
account_start_nonce: account_start_nonce,
|
account_start_nonce: account_start_nonce,
|
||||||
})
|
trie_factory: trie_factory,
|
||||||
}
|
};
|
||||||
|
|
||||||
|
Ok(state)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a recoverable snaphot of this state
|
/// Create a recoverable snaphot of this state
|
||||||
@ -156,7 +161,7 @@ impl State {
|
|||||||
|
|
||||||
/// Determine whether an account exists.
|
/// Determine whether an account exists.
|
||||||
pub fn exists(&self, a: &Address) -> bool {
|
pub fn exists(&self, a: &Address) -> bool {
|
||||||
let db = SecTrieDB::new(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
|
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
|
||||||
self.cache.borrow().get(&a).unwrap_or(&None).is_some() || db.contains(&a)
|
self.cache.borrow().get(&a).unwrap_or(&None).is_some() || db.contains(&a)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,7 +247,10 @@ impl State {
|
|||||||
for a in &addresses {
|
for a in &addresses {
|
||||||
if self.code(a).map_or(false, |c| c.sha3() == broken_dao) {
|
if self.code(a).map_or(false, |c| c.sha3() == broken_dao) {
|
||||||
// Figure out if the balance has been reduced.
|
// Figure out if the balance has been reduced.
|
||||||
let maybe_original = SecTrieDB::new(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR).get(&a).map(Account::from_rlp);
|
let maybe_original = self.trie_factory
|
||||||
|
.readonly(self.db.as_hashdb(), &self.root)
|
||||||
|
.expect(SEC_TRIE_DB_UNWRAP_STR)
|
||||||
|
.get(&a).map(Account::from_rlp);
|
||||||
if maybe_original.map_or(false, |original| *original.balance() > self.balance(a)) {
|
if maybe_original.map_or(false, |original| *original.balance() > self.balance(a)) {
|
||||||
return Err(Error::Transaction(TransactionError::DAORescue));
|
return Err(Error::Transaction(TransactionError::DAORescue));
|
||||||
}
|
}
|
||||||
@ -262,14 +270,14 @@ impl State {
|
|||||||
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
|
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
|
||||||
/// `accounts` is mutable because we may need to commit the code or storage and record that.
|
/// `accounts` is mutable because we may need to commit the code or storage and record that.
|
||||||
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
||||||
pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap<Address, Option<Account>>) {
|
pub fn commit_into(trie_factory: &TrieFactory, db: &mut HashDB, root: &mut H256, accounts: &mut HashMap<Address, Option<Account>>) {
|
||||||
// first, commit the sub trees.
|
// first, commit the sub trees.
|
||||||
// TODO: is this necessary or can we dispense with the `ref mut a` for just `a`?
|
// TODO: is this necessary or can we dispense with the `ref mut a` for just `a`?
|
||||||
for (address, ref mut a) in accounts.iter_mut() {
|
for (address, ref mut a) in accounts.iter_mut() {
|
||||||
match a {
|
match a {
|
||||||
&mut&mut Some(ref mut account) => {
|
&mut&mut Some(ref mut account) => {
|
||||||
let mut account_db = AccountDBMut::new(db, address);
|
let mut account_db = AccountDBMut::new(db, address);
|
||||||
account.commit_storage(&mut account_db);
|
account.commit_storage(trie_factory, &mut account_db);
|
||||||
account.commit_code(&mut account_db);
|
account.commit_code(&mut account_db);
|
||||||
}
|
}
|
||||||
&mut&mut None => {}
|
&mut&mut None => {}
|
||||||
@ -277,7 +285,7 @@ impl State {
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut trie = SecTrieDBMut::from_existing(db, root).unwrap();
|
let mut trie = trie_factory.from_existing(db, root).unwrap();
|
||||||
for (address, ref a) in accounts.iter() {
|
for (address, ref a) in accounts.iter() {
|
||||||
match **a {
|
match **a {
|
||||||
Some(ref account) => trie.insert(address, &account.rlp()),
|
Some(ref account) => trie.insert(address, &account.rlp()),
|
||||||
@ -290,7 +298,7 @@ impl State {
|
|||||||
/// Commits our cached account changes into the trie.
|
/// Commits our cached account changes into the trie.
|
||||||
pub fn commit(&mut self) {
|
pub fn commit(&mut self) {
|
||||||
assert!(self.snapshots.borrow().is_empty());
|
assert!(self.snapshots.borrow().is_empty());
|
||||||
Self::commit_into(self.db.as_hashdb_mut(), &mut self.root, self.cache.borrow_mut().deref_mut());
|
Self::commit_into(&self.trie_factory, self.db.as_hashdb_mut(), &mut self.root, self.cache.borrow_mut().deref_mut());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -340,7 +348,7 @@ impl State {
|
|||||||
fn get<'a>(&'a self, a: &Address, require_code: bool) -> &'a Option<Account> {
|
fn get<'a>(&'a self, a: &Address, require_code: bool) -> &'a Option<Account> {
|
||||||
let have_key = self.cache.borrow().contains_key(a);
|
let have_key = self.cache.borrow().contains_key(a);
|
||||||
if !have_key {
|
if !have_key {
|
||||||
let db = SecTrieDB::new(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
|
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
|
||||||
self.insert_cache(a, db.get(&a).map(Account::from_rlp))
|
self.insert_cache(a, db.get(&a).map(Account::from_rlp))
|
||||||
}
|
}
|
||||||
if require_code {
|
if require_code {
|
||||||
@ -361,7 +369,7 @@ impl State {
|
|||||||
fn require_or_from<'a, F: FnOnce() -> Account, G: FnOnce(&mut Account)>(&self, a: &Address, require_code: bool, default: F, not_default: G) -> &'a mut Account {
|
fn require_or_from<'a, F: FnOnce() -> Account, G: FnOnce(&mut Account)>(&self, a: &Address, require_code: bool, default: F, not_default: G) -> &'a mut Account {
|
||||||
let have_key = self.cache.borrow().contains_key(a);
|
let have_key = self.cache.borrow().contains_key(a);
|
||||||
if !have_key {
|
if !have_key {
|
||||||
let db = SecTrieDB::new(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
|
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
|
||||||
self.insert_cache(a, db.get(&a).map(Account::from_rlp))
|
self.insert_cache(a, db.get(&a).map(Account::from_rlp))
|
||||||
} else {
|
} else {
|
||||||
self.note_cache(a);
|
self.note_cache(a);
|
||||||
@ -396,6 +404,7 @@ impl Clone for State {
|
|||||||
cache: RefCell::new(self.cache.borrow().clone()),
|
cache: RefCell::new(self.cache.borrow().clone()),
|
||||||
snapshots: RefCell::new(self.snapshots.borrow().clone()),
|
snapshots: RefCell::new(self.snapshots.borrow().clone()),
|
||||||
account_start_nonce: self.account_start_nonce.clone(),
|
account_start_nonce: self.account_start_nonce.clone(),
|
||||||
|
trie_factory: self.trie_factory.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1179,7 +1188,7 @@ fn code_from_database() {
|
|||||||
state.drop()
|
state.drop()
|
||||||
};
|
};
|
||||||
|
|
||||||
let state = State::from_existing(db, root, U256::from(0u8)).unwrap();
|
let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap();
|
||||||
assert_eq!(state.code(&a), Some([1u8, 2, 3].to_vec()));
|
assert_eq!(state.code(&a), Some([1u8, 2, 3].to_vec()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1194,7 +1203,7 @@ fn storage_at_from_database() {
|
|||||||
state.drop()
|
state.drop()
|
||||||
};
|
};
|
||||||
|
|
||||||
let s = State::from_existing(db, root, U256::from(0u8)).unwrap();
|
let s = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap();
|
||||||
assert_eq!(s.storage_at(&a, &H256::from(&U256::from(01u64))), H256::from(&U256::from(69u64)));
|
assert_eq!(s.storage_at(&a, &H256::from(&U256::from(01u64))), H256::from(&U256::from(69u64)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1211,7 +1220,7 @@ fn get_from_database() {
|
|||||||
state.drop()
|
state.drop()
|
||||||
};
|
};
|
||||||
|
|
||||||
let state = State::from_existing(db, root, U256::from(0u8)).unwrap();
|
let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap();
|
||||||
assert_eq!(state.balance(&a), U256::from(69u64));
|
assert_eq!(state.balance(&a), U256::from(69u64));
|
||||||
assert_eq!(state.nonce(&a), U256::from(1u64));
|
assert_eq!(state.nonce(&a), U256::from(1u64));
|
||||||
}
|
}
|
||||||
@ -1244,7 +1253,7 @@ fn remove_from_database() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let (root, db) = {
|
let (root, db) = {
|
||||||
let mut state = State::from_existing(db, root, U256::from(0u8)).unwrap();
|
let mut state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap();
|
||||||
assert_eq!(state.exists(&a), true);
|
assert_eq!(state.exists(&a), true);
|
||||||
assert_eq!(state.nonce(&a), U256::from(1u64));
|
assert_eq!(state.nonce(&a), U256::from(1u64));
|
||||||
state.kill_account(&a);
|
state.kill_account(&a);
|
||||||
@ -1254,7 +1263,7 @@ fn remove_from_database() {
|
|||||||
state.drop()
|
state.drop()
|
||||||
};
|
};
|
||||||
|
|
||||||
let state = State::from_existing(db, root, U256::from(0u8)).unwrap();
|
let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap();
|
||||||
assert_eq!(state.exists(&a), false);
|
assert_eq!(state.exists(&a), false);
|
||||||
assert_eq!(state.nonce(&a), U256::from(0u64));
|
assert_eq!(state.nonce(&a), U256::from(0u64));
|
||||||
}
|
}
|
||||||
|
@ -175,6 +175,7 @@ pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_numbe
|
|||||||
let mut b = OpenBlock::new(
|
let mut b = OpenBlock::new(
|
||||||
test_engine.deref(),
|
test_engine.deref(),
|
||||||
&vm_factory,
|
&vm_factory,
|
||||||
|
Default::default(),
|
||||||
false,
|
false,
|
||||||
db,
|
db,
|
||||||
&last_header,
|
&last_header,
|
||||||
@ -315,7 +316,7 @@ pub fn get_temp_state() -> GuardedTempResult<State> {
|
|||||||
let journal_db = get_temp_journal_db_in(temp.as_path());
|
let journal_db = get_temp_journal_db_in(temp.as_path());
|
||||||
GuardedTempResult {
|
GuardedTempResult {
|
||||||
_temp: temp,
|
_temp: temp,
|
||||||
result: Some(State::new(journal_db, U256::from(0u8)))
|
result: Some(State::new(journal_db, U256::from(0), Default::default())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,7 +326,7 @@ pub fn get_temp_journal_db_in(path: &Path) -> Box<JournalDB> {
|
|||||||
|
|
||||||
pub fn get_temp_state_in(path: &Path) -> State {
|
pub fn get_temp_state_in(path: &Path) -> State {
|
||||||
let journal_db = get_temp_journal_db_in(path);
|
let journal_db = get_temp_journal_db_in(path);
|
||||||
State::new(journal_db, U256::from(0u8))
|
State::new(journal_db, U256::from(0), Default::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_good_dummy_block_seq(count: usize) -> Vec<Bytes> {
|
pub fn get_good_dummy_block_seq(count: usize) -> Vec<Bytes> {
|
||||||
|
@ -22,5 +22,5 @@ aster = { version = "0.17", default-features = false }
|
|||||||
clippy = { version = "^0.*", optional = true }
|
clippy = { version = "^0.*", optional = true }
|
||||||
quasi = { version = "0.11", default-features = false }
|
quasi = { version = "0.11", default-features = false }
|
||||||
quasi_macros = { version = "0.11", optional = true }
|
quasi_macros = { version = "0.11", optional = true }
|
||||||
syntex = { version = "*", optional = true }
|
syntex = { version = "0.33", optional = true }
|
||||||
syntex_syntax = { version = "*", optional = true }
|
syntex_syntax = { version = "0.33", optional = true }
|
||||||
|
@ -497,9 +497,9 @@ fn client_generics(builder: &aster::AstBuilder, interface_map: &InterfaceMap) ->
|
|||||||
.build()
|
.build()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn client_qualified_ident(builder: &aster::AstBuilder, interface_map: &InterfaceMap) -> P<Ty> {
|
fn client_qualified_ident(cx: &ExtCtxt, builder: &aster::AstBuilder, interface_map: &InterfaceMap) -> P<Ty> {
|
||||||
let generics = client_generics(builder, interface_map);
|
let generics = client_generics(builder, interface_map);
|
||||||
aster::ty::TyBuilder::new().path().segment(interface_map.ident_map.client_ident(builder))
|
aster::ty::TyBuilder::new().path().segment(interface_map.ident_map.client_ident(cx, builder, &interface_map.original_item))
|
||||||
.with_generics(generics).build()
|
.with_generics(generics).build()
|
||||||
.build()
|
.build()
|
||||||
}
|
}
|
||||||
@ -515,7 +515,7 @@ fn client_phantom_ident(builder: &aster::AstBuilder, interface_map: &InterfaceMa
|
|||||||
/// for say `Service` it generates `ServiceClient`
|
/// for say `Service` it generates `ServiceClient`
|
||||||
fn push_client_struct(cx: &ExtCtxt, builder: &aster::AstBuilder, interface_map: &InterfaceMap, push: &mut FnMut(Annotatable)) {
|
fn push_client_struct(cx: &ExtCtxt, builder: &aster::AstBuilder, interface_map: &InterfaceMap, push: &mut FnMut(Annotatable)) {
|
||||||
let generics = client_generics(builder, interface_map);
|
let generics = client_generics(builder, interface_map);
|
||||||
let client_short_ident = interface_map.ident_map.client_ident(builder);
|
let client_short_ident = interface_map.ident_map.client_ident(cx, builder, &interface_map.original_item);
|
||||||
let phantom = client_phantom_ident(builder, interface_map);
|
let phantom = client_phantom_ident(builder, interface_map);
|
||||||
|
|
||||||
let client_struct_item = quote_item!(cx,
|
let client_struct_item = quote_item!(cx,
|
||||||
@ -547,9 +547,9 @@ fn push_with_socket_client_implementation(
|
|||||||
push: &mut FnMut(Annotatable))
|
push: &mut FnMut(Annotatable))
|
||||||
{
|
{
|
||||||
let generics = client_generics(builder, interface_map);
|
let generics = client_generics(builder, interface_map);
|
||||||
let client_ident = client_qualified_ident(builder, interface_map);
|
let client_ident = client_qualified_ident(cx, builder, interface_map);
|
||||||
let where_clause = &generics.where_clause;
|
let where_clause = &generics.where_clause;
|
||||||
let client_short_ident = interface_map.ident_map.client_ident(builder);
|
let client_short_ident = interface_map.ident_map.client_ident(cx, builder, &interface_map.original_item);
|
||||||
|
|
||||||
let implement = quote_item!(cx,
|
let implement = quote_item!(cx,
|
||||||
impl $generics ::ipc::WithSocket<S> for $client_ident $where_clause {
|
impl $generics ::ipc::WithSocket<S> for $client_ident $where_clause {
|
||||||
@ -578,7 +578,7 @@ fn push_client_implementation(
|
|||||||
.collect::<Vec<P<ast::ImplItem>>>();
|
.collect::<Vec<P<ast::ImplItem>>>();
|
||||||
|
|
||||||
let generics = client_generics(builder, interface_map);
|
let generics = client_generics(builder, interface_map);
|
||||||
let client_ident = client_qualified_ident(builder, interface_map);
|
let client_ident = client_qualified_ident(cx, builder, interface_map);
|
||||||
let where_clause = &generics.where_clause;
|
let where_clause = &generics.where_clause;
|
||||||
|
|
||||||
let handshake_item = quote_impl_item!(cx,
|
let handshake_item = quote_impl_item!(cx,
|
||||||
@ -682,6 +682,52 @@ fn implement_handshake_arm(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
fn get_str_from_lit(cx: &ExtCtxt, name: &str, lit: &ast::Lit) -> Result<String, ()> {
|
||||||
|
match lit.node {
|
||||||
|
ast::LitKind::Str(ref s, _) => Ok(format!("{}", s)),
|
||||||
|
_ => {
|
||||||
|
cx.span_err(
|
||||||
|
lit.span,
|
||||||
|
&format!("ipc client_ident annotation `{}` must be a string, not `{}`",
|
||||||
|
name,
|
||||||
|
::syntax::print::pprust::lit_to_string(lit)));
|
||||||
|
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_ipc_meta_items(attr: &ast::Attribute) -> Option<&[P<ast::MetaItem>]> {
|
||||||
|
match attr.node.value.node {
|
||||||
|
ast::MetaItemKind::List(ref name, ref items) if name == &"ipc" => {
|
||||||
|
Some(items)
|
||||||
|
}
|
||||||
|
_ => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn client_ident_renamed(cx: &ExtCtxt, item: &ast::Item) -> Option<String> {
|
||||||
|
for meta_items in item.attrs().iter().filter_map(get_ipc_meta_items) {
|
||||||
|
for meta_item in meta_items {
|
||||||
|
let span = meta_item.span;
|
||||||
|
match meta_item.node {
|
||||||
|
ast::MetaItemKind::NameValue(ref name, ref lit) if name == &"client_ident" => {
|
||||||
|
if let Ok(s) = get_str_from_lit(cx, name, lit) {
|
||||||
|
return Some(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
cx.span_err(
|
||||||
|
meta_item.span,
|
||||||
|
&format!("unknown client_ident container attribute `{}`",
|
||||||
|
::syntax::print::pprust::meta_item_to_string(meta_item)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
struct InterfaceMap {
|
struct InterfaceMap {
|
||||||
pub original_item: Item,
|
pub original_item: Item,
|
||||||
pub item: P<ast::Item>,
|
pub item: P<ast::Item>,
|
||||||
@ -700,9 +746,14 @@ impl IdentMap {
|
|||||||
builder.id(format!("{}", ::syntax::print::pprust::path_to_string(&self.original_path)))
|
builder.id(format!("{}", ::syntax::print::pprust::path_to_string(&self.original_path)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn client_ident(&self, builder: &aster::AstBuilder) -> Ident {
|
fn client_ident(&self, cx: &ExtCtxt, builder: &aster::AstBuilder, item: &ast::Item) -> Ident {
|
||||||
|
if let Some(new_name) = client_ident_renamed(cx, item) {
|
||||||
|
builder.id(new_name)
|
||||||
|
}
|
||||||
|
else {
|
||||||
builder.id(format!("{}Client", self.original_path.segments[0].identifier))
|
builder.id(format!("{}Client", self.original_path.segments[0].identifier))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn qualified_ident(&self, builder: &aster::AstBuilder) -> Ident {
|
fn qualified_ident(&self, builder: &aster::AstBuilder) -> Ident {
|
||||||
builder.id(format!("{}", ::syntax::print::pprust::path_to_string(&self.original_path).replace("<", "::<")))
|
builder.id(format!("{}", ::syntax::print::pprust::path_to_string(&self.original_path).replace("<", "::<")))
|
||||||
|
@ -50,11 +50,36 @@ include!("lib.rs.in");
|
|||||||
|
|
||||||
#[cfg(feature = "with-syntex")]
|
#[cfg(feature = "with-syntex")]
|
||||||
pub fn register(reg: &mut syntex::Registry) {
|
pub fn register(reg: &mut syntex::Registry) {
|
||||||
|
use syntax::{ast, fold};
|
||||||
|
|
||||||
|
#[cfg(feature = "with-syntex")]
|
||||||
|
fn strip_attributes(krate: ast::Crate) -> ast::Crate {
|
||||||
|
struct StripAttributeFolder;
|
||||||
|
impl fold::Folder for StripAttributeFolder {
|
||||||
|
fn fold_attribute(&mut self, attr: ast::Attribute) -> Option<ast::Attribute> {
|
||||||
|
match attr.node.value.node {
|
||||||
|
ast::MetaItemKind::List(ref n, _) if n == &"ipc" => { return None; }
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
|
||||||
|
fold::noop_fold_mac(mac, self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fold::Folder::fold_crate(&mut StripAttributeFolder, krate)
|
||||||
|
}
|
||||||
|
|
||||||
reg.add_attr("feature(custom_derive)");
|
reg.add_attr("feature(custom_derive)");
|
||||||
reg.add_attr("feature(custom_attribute)");
|
reg.add_attr("feature(custom_attribute)");
|
||||||
|
|
||||||
reg.add_decorator("derive_Ipc", codegen::expand_ipc_implementation);
|
reg.add_decorator("derive_Ipc", codegen::expand_ipc_implementation);
|
||||||
reg.add_decorator("derive_Binary", serialization::expand_serialization_implementation);
|
reg.add_decorator("derive_Binary", serialization::expand_serialization_implementation);
|
||||||
|
|
||||||
|
reg.add_post_expansion_pass(strip_attributes);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "with-syntex"))]
|
#[cfg(not(feature = "with-syntex"))]
|
||||||
@ -67,4 +92,6 @@ pub fn register(reg: &mut rustc_plugin::Registry) {
|
|||||||
syntax::parse::token::intern("derive_Binary"),
|
syntax::parse::token::intern("derive_Binary"),
|
||||||
syntax::ext::base::MultiDecorator(
|
syntax::ext::base::MultiDecorator(
|
||||||
Box::new(serialization::expand_serialization_implementation)));
|
Box::new(serialization::expand_serialization_implementation)));
|
||||||
|
|
||||||
|
reg.register_attribute("ipc".to_owned(), AttributeType::Normal);
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
use util::bytes::Populatable;
|
use util::bytes::Populatable;
|
||||||
use util::numbers::{U256, U512, H256, H2048, Address};
|
use util::numbers::{U256, U512, H256, H2048, Address};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::collections::VecDeque;
|
use std::collections::{VecDeque, BTreeMap};
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -139,6 +139,92 @@ impl<R: BinaryConvertable, E: BinaryConvertable> BinaryConvertable for Result<R,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<K, V> BinaryConvertable for BTreeMap<K, V> where K : BinaryConvertable + Ord, V: BinaryConvertable {
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
0usize + match K::len_params() {
|
||||||
|
0 => mem::size_of::<K>() * self.len(),
|
||||||
|
_ => self.iter().fold(0usize, |acc, (k, _)| acc + k.size())
|
||||||
|
} + match V::len_params() {
|
||||||
|
0 => mem::size_of::<V>() * self.len(),
|
||||||
|
_ => self.iter().fold(0usize, |acc, (_, v)| acc + v.size())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque<usize>) -> Result<(), BinaryConvertError> {
|
||||||
|
let mut offset = 0usize;
|
||||||
|
for (key, val) in self.iter() {
|
||||||
|
let key_size = match K::len_params() {
|
||||||
|
0 => mem::size_of::<K>(),
|
||||||
|
_ => { let size = key.size(); length_stack.push_back(size); size }
|
||||||
|
};
|
||||||
|
let val_size = match K::len_params() {
|
||||||
|
0 => mem::size_of::<V>(),
|
||||||
|
_ => { let size = val.size(); length_stack.push_back(size); size }
|
||||||
|
};
|
||||||
|
|
||||||
|
if key_size > 0 {
|
||||||
|
let item_end = offset + key_size;
|
||||||
|
try!(key.to_bytes(&mut buffer[offset..item_end], length_stack));
|
||||||
|
offset = item_end;
|
||||||
|
}
|
||||||
|
|
||||||
|
if val_size > 0 {
|
||||||
|
let item_end = offset + key_size;
|
||||||
|
try!(val.to_bytes(&mut buffer[offset..item_end], length_stack));
|
||||||
|
offset = item_end;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_bytes(buffer: &[u8], length_stack: &mut VecDeque<usize>) -> Result<Self, BinaryConvertError> {
|
||||||
|
let mut index = 0;
|
||||||
|
let mut result = Self::new();
|
||||||
|
|
||||||
|
if buffer.len() == 0 { return Ok(result); }
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let key_size = match K::len_params() {
|
||||||
|
0 => mem::size_of::<K>(),
|
||||||
|
_ => try!(length_stack.pop_front().ok_or(BinaryConvertError)),
|
||||||
|
};
|
||||||
|
let key = if key_size == 0 {
|
||||||
|
try!(K::from_empty_bytes())
|
||||||
|
} else {
|
||||||
|
try!(K::from_bytes(&buffer[index..index+key_size], length_stack))
|
||||||
|
};
|
||||||
|
index = index + key_size;
|
||||||
|
|
||||||
|
let val_size = match V::len_params() {
|
||||||
|
0 => mem::size_of::<V>(),
|
||||||
|
_ => try!(length_stack.pop_front().ok_or(BinaryConvertError)),
|
||||||
|
};
|
||||||
|
let val = if val_size == 0 {
|
||||||
|
try!(V::from_empty_bytes())
|
||||||
|
} else {
|
||||||
|
try!(V::from_bytes(&buffer[index..index+val_size], length_stack))
|
||||||
|
};
|
||||||
|
result.insert(key, val);
|
||||||
|
index = index + val_size;
|
||||||
|
|
||||||
|
if index == buffer.len() { break; }
|
||||||
|
if index > buffer.len() {
|
||||||
|
return Err(BinaryConvertError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_empty_bytes() -> Result<Self, BinaryConvertError> {
|
||||||
|
Ok(Self::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn len_params() -> usize {
|
||||||
|
1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> BinaryConvertable for Vec<T> where T: BinaryConvertable {
|
impl<T> BinaryConvertable for Vec<T> where T: BinaryConvertable {
|
||||||
fn size(&self) -> usize {
|
fn size(&self) -> usize {
|
||||||
match T::len_params() {
|
match T::len_params() {
|
||||||
@ -652,3 +738,18 @@ fn serialize_err_opt_vec_in_out() {
|
|||||||
|
|
||||||
assert!(vec.is_ok());
|
assert!(vec.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn serialize_btree() {
|
||||||
|
use std::io::{Cursor, SeekFrom, Seek};
|
||||||
|
|
||||||
|
let mut buff = Cursor::new(Vec::new());
|
||||||
|
let mut btree = BTreeMap::new();
|
||||||
|
btree.insert(1u64, 5u64);
|
||||||
|
serialize_into(&btree, &mut buff).unwrap();
|
||||||
|
|
||||||
|
buff.seek(SeekFrom::Start(0)).unwrap();
|
||||||
|
let res = deserialize_from::<BTreeMap<u64, u64>, _>(&mut buff).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(res[&1u64], 5u64);
|
||||||
|
}
|
||||||
|
@ -16,5 +16,5 @@ ethcore-ipc-nano = { path = "../nano" }
|
|||||||
ethcore-util = { path = "../../util" }
|
ethcore-util = { path = "../../util" }
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
syntex = "*"
|
syntex = "0.33"
|
||||||
ethcore-ipc-codegen = { path = "../codegen" }
|
ethcore-ipc-codegen = { path = "../codegen" }
|
||||||
|
@ -58,6 +58,23 @@ pub fn main() {
|
|||||||
registry.expand("", &src, &dst).unwrap();
|
registry.expand("", &src, &dst).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// rpc pass
|
||||||
|
if {
|
||||||
|
let src = Path::new("with_attrs.rs.in");
|
||||||
|
let dst = Path::new(&out_dir).join("with_attrs_ipc.rs");
|
||||||
|
let mut registry = syntex::Registry::new();
|
||||||
|
codegen::register(&mut registry);
|
||||||
|
registry.expand("", &src, &dst).is_ok()
|
||||||
|
}
|
||||||
|
// serialization pass
|
||||||
|
{
|
||||||
|
let src = Path::new(&out_dir).join("with_attrs_ipc.rs");
|
||||||
|
let dst = Path::new(&out_dir).join("with_attrs_cg.rs");
|
||||||
|
let mut registry = syntex::Registry::new();
|
||||||
|
codegen::register(&mut registry);
|
||||||
|
registry.expand("", &src, &dst).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
// rpc pass
|
// rpc pass
|
||||||
{
|
{
|
||||||
let src = Path::new("binary.rs.in");
|
let src = Path::new("binary.rs.in");
|
||||||
|
@ -86,7 +86,7 @@ mod tests {
|
|||||||
0, 0, 0, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
4, 0, 0, 0, 0, 0, 0, 0,
|
4, 0, 0, 0, 0, 0, 0, 0,
|
||||||
5, 0, 0, 0],
|
5, 0, 0, 0],
|
||||||
service_client.socket().borrow().write_buffer.clone());
|
service_client.socket().write().unwrap().write_buffer.clone());
|
||||||
assert_eq!(10, result);
|
assert_eq!(10, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,7 +103,7 @@ mod tests {
|
|||||||
1, 0, 0, 0, 0, 0, 0, 0,
|
1, 0, 0, 0, 0, 0, 0, 0,
|
||||||
4, 0, 0, 0, 0, 0, 0, 0,
|
4, 0, 0, 0, 0, 0, 0, 0,
|
||||||
8, 0, 0, 0, 0, 0, 0, 0,
|
8, 0, 0, 0, 0, 0, 0, 0,
|
||||||
5, 0, 0, 0, 10, 0, 0, 0], service_client.socket().borrow().write_buffer.clone());
|
5, 0, 0, 0, 10, 0, 0, 0], service_client.socket().write().unwrap().write_buffer.clone());
|
||||||
assert_eq!(10, result);
|
assert_eq!(10, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -145,7 +145,7 @@ mod tests {
|
|||||||
// items
|
// items
|
||||||
3, 0, 0, 0, 0, 0, 0, 0,
|
3, 0, 0, 0, 0, 0, 0, 0,
|
||||||
11, 0, 0, 0, 0, 0, 0, 0],
|
11, 0, 0, 0, 0, 0, 0, 0],
|
||||||
service_client.socket().borrow().write_buffer.clone());
|
service_client.socket().write().unwrap().write_buffer.clone());
|
||||||
assert_eq!(true, result);
|
assert_eq!(true, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::super::service::*;
|
use super::super::service::*;
|
||||||
|
use super::super::with_attrs::PrettyNamedClient;
|
||||||
use nanoipc;
|
use nanoipc;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@ -43,6 +44,12 @@ mod tests {
|
|||||||
assert!(client.is_ok());
|
assert!(client.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn can_create_renamed_client() {
|
||||||
|
let client = nanoipc::init_duplex_client::<PrettyNamedClient<_>>("ipc:///tmp/parity-nano-test10.ipc");
|
||||||
|
assert!(client.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_call_handshake() {
|
fn can_call_handshake() {
|
||||||
let url = "ipc:///tmp/parity-test-nano-20.ipc";
|
let url = "ipc:///tmp/parity-test-nano-20.ipc";
|
||||||
|
@ -28,3 +28,4 @@ mod examples;
|
|||||||
mod over_nano;
|
mod over_nano;
|
||||||
mod nested;
|
mod nested;
|
||||||
mod binary;
|
mod binary;
|
||||||
|
mod with_attrs;
|
||||||
|
18
ipc/tests/with_attrs.rs
Normal file
18
ipc/tests/with_attrs.rs
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#![allow(dead_code, unused_assignments, unused_variables)] // codegen issues
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/with_attrs_cg.rs"));
|
34
ipc/tests/with_attrs.rs.in
Normal file
34
ipc/tests/with_attrs.rs.in
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::RwLock;
|
||||||
|
use std::ops::*;
|
||||||
|
use ipc::IpcConfig;
|
||||||
|
use std::mem;
|
||||||
|
use ipc::binary::BinaryConvertError;
|
||||||
|
use std::collections::VecDeque;
|
||||||
|
|
||||||
|
pub struct BadlyNamedService;
|
||||||
|
|
||||||
|
#[derive(Ipc)]
|
||||||
|
#[ipc(client_ident="PrettyNamedClient")]
|
||||||
|
impl BadlyNamedService {
|
||||||
|
fn is_zero(&self, x: u64) -> bool {
|
||||||
|
x == 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ::ipc::IpcConfig for BadlyNamedService {}
|
@ -203,6 +203,7 @@ Database Options:
|
|||||||
--db-compaction TYPE Database compaction type. TYPE may be one of:
|
--db-compaction TYPE Database compaction type. TYPE may be one of:
|
||||||
ssd - suitable for SSDs and fast HDDs;
|
ssd - suitable for SSDs and fast HDDs;
|
||||||
hdd - suitable for slow HDDs [default: ssd].
|
hdd - suitable for slow HDDs [default: ssd].
|
||||||
|
--fat-db Fat database.
|
||||||
|
|
||||||
Import/Export Options:
|
Import/Export Options:
|
||||||
--from BLOCK Export from block BLOCK, which may be an index or
|
--from BLOCK Export from block BLOCK, which may be an index or
|
||||||
@ -362,6 +363,7 @@ pub struct Args {
|
|||||||
pub flag_ipcapi: Option<String>,
|
pub flag_ipcapi: Option<String>,
|
||||||
pub flag_db_cache_size: Option<usize>,
|
pub flag_db_cache_size: Option<usize>,
|
||||||
pub flag_db_compaction: String,
|
pub flag_db_compaction: String,
|
||||||
|
pub flag_fat_db: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn print_version() {
|
pub fn print_version() {
|
||||||
|
@ -333,6 +333,14 @@ impl Configuration {
|
|||||||
_ => { die!("Invalid pruning method given."); }
|
_ => { die!("Invalid pruning method given."); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if self.args.flag_fat_db {
|
||||||
|
if let journaldb::Algorithm::Archive = client_config.pruning {
|
||||||
|
client_config.trie_spec = TrieSpec::Fat;
|
||||||
|
} else {
|
||||||
|
die!("Fatdb is not supported. Please rerun with --pruning=archive")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// forced state db cache size if provided
|
// forced state db cache size if provided
|
||||||
client_config.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 4));
|
client_config.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 4));
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ use std::cmp;
|
|||||||
|
|
||||||
use std::str::{FromStr};
|
use std::str::{FromStr};
|
||||||
use std::convert::From;
|
use std::convert::From;
|
||||||
use std::hash::{Hash, Hasher};
|
use std::hash::Hash;
|
||||||
use std::ops::*;
|
use std::ops::*;
|
||||||
use std::cmp::*;
|
use std::cmp::*;
|
||||||
|
|
||||||
@ -1031,7 +1031,7 @@ macro_rules! construct_uint {
|
|||||||
|
|
||||||
// shift
|
// shift
|
||||||
for i in word_shift..$n_words {
|
for i in word_shift..$n_words {
|
||||||
ret[i] += original[i - word_shift] << bit_shift;
|
ret[i] = original[i - word_shift] << bit_shift;
|
||||||
}
|
}
|
||||||
// carry
|
// carry
|
||||||
if bit_shift > 0 {
|
if bit_shift > 0 {
|
||||||
@ -1052,14 +1052,18 @@ macro_rules! construct_uint {
|
|||||||
let word_shift = shift / 64;
|
let word_shift = shift / 64;
|
||||||
let bit_shift = shift % 64;
|
let bit_shift = shift % 64;
|
||||||
|
|
||||||
|
// shift
|
||||||
for i in word_shift..$n_words {
|
for i in word_shift..$n_words {
|
||||||
// Shift
|
ret[i - word_shift] = original[i] >> bit_shift;
|
||||||
ret[i - word_shift] += original[i] >> bit_shift;
|
}
|
||||||
|
|
||||||
// Carry
|
// Carry
|
||||||
if bit_shift > 0 && i < $n_words - 1 {
|
if bit_shift > 0 {
|
||||||
ret[i - word_shift] += original[i + 1] << (64 - bit_shift);
|
for i in word_shift+1..$n_words {
|
||||||
|
ret[i - word_shift - 1] += original[i] << (64 - bit_shift);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
$name(ret)
|
$name(ret)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -104,6 +104,21 @@ pub trait HashDB: AsHashDB {
|
|||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
fn remove(&mut self, key: &H256);
|
fn remove(&mut self, key: &H256);
|
||||||
|
|
||||||
|
/// Insert auxiliary data into hashdb.
|
||||||
|
fn insert_aux(&mut self, _hash: Vec<u8>, _value: Vec<u8>) {
|
||||||
|
unimplemented!();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get auxiliary data from hashdb.
|
||||||
|
fn get_aux(&self, _hash: &[u8]) -> Option<Vec<u8>> {
|
||||||
|
unimplemented!();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes auxiliary data from hashdb.
|
||||||
|
fn remove_aux(&mut self, _hash: &[u8]) {
|
||||||
|
unimplemented!();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Upcast trait.
|
/// Upcast trait.
|
||||||
|
@ -26,6 +26,13 @@ use kvdb::{Database, DBTransaction, DatabaseConfig};
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
use std::env;
|
use std::env;
|
||||||
|
|
||||||
|
/// Suffix appended to auxiliary keys to distinguish them from normal keys.
|
||||||
|
/// Would be nich to use rocksdb columns for this eventually.
|
||||||
|
const AUX_FLAG: u8 = 255;
|
||||||
|
|
||||||
|
/// Database version.
|
||||||
|
const DB_VERSION : u32 = 0x103;
|
||||||
|
|
||||||
/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay
|
/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay
|
||||||
/// and latent-removal semantics.
|
/// and latent-removal semantics.
|
||||||
///
|
///
|
||||||
@ -39,8 +46,6 @@ pub struct ArchiveDB {
|
|||||||
latest_era: Option<u64>,
|
latest_era: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
const DB_VERSION : u32 = 0x103;
|
|
||||||
|
|
||||||
impl ArchiveDB {
|
impl ArchiveDB {
|
||||||
/// Create a new instance from file
|
/// Create a new instance from file
|
||||||
pub fn new(path: &str, config: DatabaseConfig) -> ArchiveDB {
|
pub fn new(path: &str, config: DatabaseConfig) -> ArchiveDB {
|
||||||
@ -115,12 +120,35 @@ impl HashDB for ArchiveDB {
|
|||||||
fn insert(&mut self, value: &[u8]) -> H256 {
|
fn insert(&mut self, value: &[u8]) -> H256 {
|
||||||
self.overlay.insert(value)
|
self.overlay.insert(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn emplace(&mut self, key: H256, value: Bytes) {
|
fn emplace(&mut self, key: H256, value: Bytes) {
|
||||||
self.overlay.emplace(key, value);
|
self.overlay.emplace(key, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove(&mut self, key: &H256) {
|
fn remove(&mut self, key: &H256) {
|
||||||
self.overlay.remove(key);
|
self.overlay.remove(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn insert_aux(&mut self, hash: Vec<u8>, value: Vec<u8>) {
|
||||||
|
self.overlay.insert_aux(hash, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_aux(&self, hash: &[u8]) -> Option<Vec<u8>> {
|
||||||
|
if let Some(res) = self.overlay.get_aux(hash) {
|
||||||
|
return Some(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut db_hash = hash.to_vec();
|
||||||
|
db_hash.push(AUX_FLAG);
|
||||||
|
|
||||||
|
self.backing.get(&db_hash)
|
||||||
|
.expect("Low-level database error. Some issue with your hard disk?")
|
||||||
|
.map(|v| v.to_vec())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remove_aux(&mut self, hash: &[u8]) {
|
||||||
|
self.overlay.remove_aux(hash);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl JournalDB for ArchiveDB {
|
impl JournalDB for ArchiveDB {
|
||||||
@ -144,6 +172,7 @@ impl JournalDB for ArchiveDB {
|
|||||||
let batch = DBTransaction::new();
|
let batch = DBTransaction::new();
|
||||||
let mut inserts = 0usize;
|
let mut inserts = 0usize;
|
||||||
let mut deletes = 0usize;
|
let mut deletes = 0usize;
|
||||||
|
|
||||||
for i in self.overlay.drain().into_iter() {
|
for i in self.overlay.drain().into_iter() {
|
||||||
let (key, (value, rc)) = i;
|
let (key, (value, rc)) = i;
|
||||||
if rc > 0 {
|
if rc > 0 {
|
||||||
@ -156,6 +185,12 @@ impl JournalDB for ArchiveDB {
|
|||||||
deletes += 1;
|
deletes += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (mut key, value) in self.overlay.drain_aux().into_iter() {
|
||||||
|
key.push(AUX_FLAG);
|
||||||
|
batch.put(&key, &value).expect("Low-level database error. Some issue with your hard disk?");
|
||||||
|
}
|
||||||
|
|
||||||
if self.latest_era.map_or(true, |e| now > e) {
|
if self.latest_era.map_or(true, |e| now > e) {
|
||||||
try!(batch.put(&LATEST_ERA_KEY, &encode(&now)));
|
try!(batch.put(&LATEST_ERA_KEY, &encode(&now)));
|
||||||
self.latest_era = Some(now);
|
self.latest_era = Some(now);
|
||||||
|
@ -74,6 +74,7 @@ use std::default::Default;
|
|||||||
pub struct MemoryDB {
|
pub struct MemoryDB {
|
||||||
data: HashMap<H256, (Bytes, i32)>,
|
data: HashMap<H256, (Bytes, i32)>,
|
||||||
static_null_rlp: (Bytes, i32),
|
static_null_rlp: (Bytes, i32),
|
||||||
|
aux: HashMap<Bytes, Bytes>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for MemoryDB {
|
impl Default for MemoryDB {
|
||||||
@ -88,6 +89,7 @@ impl MemoryDB {
|
|||||||
MemoryDB {
|
MemoryDB {
|
||||||
data: HashMap::new(),
|
data: HashMap::new(),
|
||||||
static_null_rlp: (vec![0x80u8; 1], 1),
|
static_null_rlp: (vec![0x80u8; 1], 1),
|
||||||
|
aux: HashMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,9 +136,12 @@ impl MemoryDB {
|
|||||||
|
|
||||||
/// Return the internal map of hashes to data, clearing the current state.
|
/// Return the internal map of hashes to data, clearing the current state.
|
||||||
pub fn drain(&mut self) -> HashMap<H256, (Bytes, i32)> {
|
pub fn drain(&mut self) -> HashMap<H256, (Bytes, i32)> {
|
||||||
let mut data = HashMap::new();
|
mem::replace(&mut self.data, HashMap::new())
|
||||||
mem::swap(&mut self.data, &mut data);
|
}
|
||||||
data
|
|
||||||
|
/// Return the internal map of auxiliary data, clearing the current state.
|
||||||
|
pub fn drain_aux(&mut self) -> HashMap<Bytes, Bytes> {
|
||||||
|
mem::replace(&mut self.aux, HashMap::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Denote than an existing value has the given key. Used when a key gets removed without
|
/// Denote than an existing value has the given key. Used when a key gets removed without
|
||||||
@ -233,6 +238,18 @@ impl HashDB for MemoryDB {
|
|||||||
self.data.insert(key.clone(), (Bytes::new(), -1));
|
self.data.insert(key.clone(), (Bytes::new(), -1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn insert_aux(&mut self, hash: Vec<u8>, value: Vec<u8>) {
|
||||||
|
self.aux.insert(hash, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_aux(&self, hash: &[u8]) -> Option<Vec<u8>> {
|
||||||
|
self.aux.get(hash).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remove_aux(&mut self, hash: &[u8]) {
|
||||||
|
self.aux.remove(hash);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -539,7 +539,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
self.info.write().unwrap().public_endpoint = Some(public_endpoint.clone());
|
self.info.write().unwrap().public_endpoint = Some(public_endpoint.clone());
|
||||||
|
|
||||||
if self.first_time.load(AtomicOrdering::Relaxed) {
|
if self.first_time.load(AtomicOrdering::Relaxed) {
|
||||||
info!("Public node URL: {}", paint(White.bold(), format!("{}", self.external_url().unwrap())));
|
info!("Public node URL: {}", paint(White.bold(), self.external_url().unwrap()));
|
||||||
self.first_time.store(false, AtomicOrdering::Relaxed);
|
self.first_time.store(false, AtomicOrdering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
112
util/src/trie/fatdb.rs
Normal file
112
util/src/trie/fatdb.rs
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use hash::H256;
|
||||||
|
use sha3::Hashable;
|
||||||
|
use hashdb::HashDB;
|
||||||
|
use super::{TrieDB, Trie, TrieDBIterator, TrieError};
|
||||||
|
|
||||||
|
/// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database.
|
||||||
|
/// Additionaly it stores inserted hash-key mappings for later retrieval.
|
||||||
|
///
|
||||||
|
/// Use it as a `Trie` or `TrieMut` trait object.
|
||||||
|
pub struct FatDB<'db> {
|
||||||
|
raw: TrieDB<'db>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'db> FatDB<'db> {
|
||||||
|
/// Create a new trie with the backing database `db` and empty `root`
|
||||||
|
/// Initialise to the state entailed by the genesis block.
|
||||||
|
/// This guarantees the trie is built correctly.
|
||||||
|
pub fn new(db: &'db HashDB, root: &'db H256) -> Result<Self, TrieError> {
|
||||||
|
let fatdb = FatDB {
|
||||||
|
raw: try!(TrieDB::new(db, root))
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(fatdb)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the backing database.
|
||||||
|
pub fn db(&self) -> &HashDB {
|
||||||
|
self.raw.db()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterator over all key / vlaues in the trie.
|
||||||
|
pub fn iter(&self) -> FatDBIterator {
|
||||||
|
FatDBIterator::new(&self.raw)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'db> Trie for FatDB<'db> {
|
||||||
|
fn iter<'a>(&'a self) -> Box<Iterator<Item = (Vec<u8>, &[u8])> + 'a> {
|
||||||
|
Box::new(FatDB::iter(self))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn root(&self) -> &H256 {
|
||||||
|
self.raw.root()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn contains(&self, key: &[u8]) -> bool {
|
||||||
|
self.raw.contains(&key.sha3())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key {
|
||||||
|
self.raw.get(&key.sha3())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Itarator over inserted pairs of key values.
|
||||||
|
pub struct FatDBIterator<'db> {
|
||||||
|
trie_iterator: TrieDBIterator<'db>,
|
||||||
|
trie: &'db TrieDB<'db>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'db> FatDBIterator<'db> {
|
||||||
|
/// Creates new iterator.
|
||||||
|
pub fn new(trie: &'db TrieDB) -> Self {
|
||||||
|
FatDBIterator {
|
||||||
|
trie_iterator: TrieDBIterator::new(trie),
|
||||||
|
trie: trie,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'db> Iterator for FatDBIterator<'db> {
|
||||||
|
type Item = (Vec<u8>, &'db [u8]);
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
self.trie_iterator.next()
|
||||||
|
.map(|(hash, value)| {
|
||||||
|
(self.trie.db().get_aux(&hash).expect("Missing fatdb hash"), value)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fatdb_to_trie() {
|
||||||
|
use memorydb::MemoryDB;
|
||||||
|
use trie::{FatDBMut, TrieMut};
|
||||||
|
|
||||||
|
let mut memdb = MemoryDB::new();
|
||||||
|
let mut root = H256::default();
|
||||||
|
{
|
||||||
|
let mut t = FatDBMut::new(&mut memdb, &mut root);
|
||||||
|
t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]);
|
||||||
|
}
|
||||||
|
let t = FatDB::new(&memdb, &root).unwrap();
|
||||||
|
assert_eq!(t.get(&[0x01u8, 0x23]).unwrap(), &[0x01u8, 0x23]);
|
||||||
|
assert_eq!(t.iter().collect::<Vec<_>>(), vec![(vec![0x01u8, 0x23], &[0x01u8, 0x23] as &[u8])]);
|
||||||
|
}
|
94
util/src/trie/fatdbmut.rs
Normal file
94
util/src/trie/fatdbmut.rs
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use hash::H256;
|
||||||
|
use sha3::Hashable;
|
||||||
|
use hashdb::HashDB;
|
||||||
|
use super::{TrieDBMut, Trie, TrieMut, TrieError};
|
||||||
|
|
||||||
|
/// A mutable `Trie` implementation which hashes keys and uses a generic `HashDB` backing database.
|
||||||
|
/// Additionaly it stores inserted hash-key mappings for later retrieval.
|
||||||
|
///
|
||||||
|
/// Use it as a `Trie` or `TrieMut` trait object.
|
||||||
|
pub struct FatDBMut<'db> {
|
||||||
|
raw: TrieDBMut<'db>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'db> FatDBMut<'db> {
|
||||||
|
/// Create a new trie with the backing database `db` and empty `root`
|
||||||
|
/// Initialise to the state entailed by the genesis block.
|
||||||
|
/// This guarantees the trie is built correctly.
|
||||||
|
pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self {
|
||||||
|
FatDBMut { raw: TrieDBMut::new(db, root) }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new trie with the backing database `db` and `root`.
|
||||||
|
///
|
||||||
|
/// Returns an error if root does not exist.
|
||||||
|
pub fn from_existing(db: &'db mut HashDB, root: &'db mut H256) -> Result<Self, TrieError> {
|
||||||
|
Ok(FatDBMut { raw: try!(TrieDBMut::from_existing(db, root)) })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the backing database.
|
||||||
|
pub fn db(&self) -> &HashDB {
|
||||||
|
self.raw.db()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the backing database.
|
||||||
|
pub fn db_mut(&mut self) -> &mut HashDB {
|
||||||
|
self.raw.db_mut()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'db> TrieMut for FatDBMut<'db> {
|
||||||
|
fn root(&self) -> &H256 {
|
||||||
|
self.raw.root()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn contains(&self, key: &[u8]) -> bool {
|
||||||
|
self.raw.contains(&key.sha3())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key {
|
||||||
|
self.raw.get(&key.sha3())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert(&mut self, key: &[u8], value: &[u8]) {
|
||||||
|
let hash = key.sha3();
|
||||||
|
self.raw.insert(&hash, value);
|
||||||
|
let db = self.raw.db_mut();
|
||||||
|
db.insert_aux(hash.to_vec(), key.to_vec());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remove(&mut self, key: &[u8]) {
|
||||||
|
self.raw.remove(&key.sha3());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fatdb_to_trie() {
|
||||||
|
use memorydb::MemoryDB;
|
||||||
|
use super::TrieDB;
|
||||||
|
|
||||||
|
let mut memdb = MemoryDB::new();
|
||||||
|
let mut root = H256::default();
|
||||||
|
{
|
||||||
|
let mut t = FatDBMut::new(&mut memdb, &mut root);
|
||||||
|
t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]);
|
||||||
|
}
|
||||||
|
let t = TrieDB::new(&memdb, &root).unwrap();
|
||||||
|
assert_eq!(t.get(&(&[0x01u8, 0x23]).sha3()).unwrap(), &[0x01u8, 0x23]);
|
||||||
|
}
|
@ -17,6 +17,8 @@
|
|||||||
//! Trie interface and implementation.
|
//! Trie interface and implementation.
|
||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
use hash::H256;
|
||||||
|
use hashdb::HashDB;
|
||||||
|
|
||||||
/// Export the trietraits module.
|
/// Export the trietraits module.
|
||||||
pub mod trietraits;
|
pub mod trietraits;
|
||||||
@ -35,12 +37,18 @@ pub mod sectriedb;
|
|||||||
/// Export the sectriedbmut module.
|
/// Export the sectriedbmut module.
|
||||||
pub mod sectriedbmut;
|
pub mod sectriedbmut;
|
||||||
|
|
||||||
|
mod fatdb;
|
||||||
|
|
||||||
|
mod fatdbmut;
|
||||||
|
|
||||||
pub use self::trietraits::{Trie, TrieMut};
|
pub use self::trietraits::{Trie, TrieMut};
|
||||||
pub use self::standardmap::{Alphabet, StandardMap, ValueMode};
|
pub use self::standardmap::{Alphabet, StandardMap, ValueMode};
|
||||||
pub use self::triedbmut::TrieDBMut;
|
pub use self::triedbmut::TrieDBMut;
|
||||||
pub use self::triedb::TrieDB;
|
pub use self::triedb::{TrieDB, TrieDBIterator};
|
||||||
pub use self::sectriedbmut::SecTrieDBMut;
|
pub use self::sectriedbmut::SecTrieDBMut;
|
||||||
pub use self::sectriedb::SecTrieDB;
|
pub use self::sectriedb::SecTrieDB;
|
||||||
|
pub use self::fatdb::{FatDB, FatDBIterator};
|
||||||
|
pub use self::fatdbmut::FatDBMut;
|
||||||
|
|
||||||
/// Trie Errors
|
/// Trie Errors
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -54,3 +62,62 @@ impl fmt::Display for TrieError {
|
|||||||
write!(f, "Trie Error: Invalid state root.")
|
write!(f, "Trie Error: Invalid state root.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Trie types
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum TrieSpec {
|
||||||
|
/// Generic trie.
|
||||||
|
Generic,
|
||||||
|
/// Secure trie.
|
||||||
|
Secure,
|
||||||
|
/// Secure trie with fat database.
|
||||||
|
Fat,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TrieSpec {
|
||||||
|
fn default() -> TrieSpec {
|
||||||
|
TrieSpec::Secure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Trie factory.
|
||||||
|
#[derive(Default, Clone)]
|
||||||
|
pub struct TrieFactory {
|
||||||
|
spec: TrieSpec,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TrieFactory {
|
||||||
|
/// Creates new factory.
|
||||||
|
pub fn new(spec: TrieSpec) -> Self {
|
||||||
|
TrieFactory {
|
||||||
|
spec: spec,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new immutable instance of Trie.
|
||||||
|
pub fn readonly<'db>(&self, db: &'db HashDB, root: &'db H256) -> Result<Box<Trie + 'db>, TrieError> {
|
||||||
|
match self.spec {
|
||||||
|
TrieSpec::Generic => Ok(Box::new(try!(TrieDB::new(db, root)))),
|
||||||
|
TrieSpec::Secure => Ok(Box::new(try!(SecTrieDB::new(db, root)))),
|
||||||
|
TrieSpec::Fat => Ok(Box::new(try!(FatDB::new(db, root)))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new mutable instance of Trie.
|
||||||
|
pub fn create<'db>(&self, db: &'db mut HashDB, root: &'db mut H256) -> Box<TrieMut + 'db> {
|
||||||
|
match self.spec {
|
||||||
|
TrieSpec::Generic => Box::new(TrieDBMut::new(db, root)),
|
||||||
|
TrieSpec::Secure => Box::new(SecTrieDBMut::new(db, root)),
|
||||||
|
TrieSpec::Fat => Box::new(FatDBMut::new(db, root)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create new mutable instance of trie and check for errors.
|
||||||
|
pub fn from_existing<'db>(&self, db: &'db mut HashDB, root: &'db mut H256) -> Result<Box<TrieMut + 'db>, TrieError> {
|
||||||
|
match self.spec {
|
||||||
|
TrieSpec::Generic => Ok(Box::new(try!(TrieDBMut::from_existing(db, root)))),
|
||||||
|
TrieSpec::Secure => Ok(Box::new(try!(SecTrieDBMut::from_existing(db, root)))),
|
||||||
|
TrieSpec::Fat => Ok(Box::new(try!(FatDBMut::from_existing(db, root)))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -14,8 +14,8 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use hash::*;
|
use hash::H256;
|
||||||
use sha3::*;
|
use sha3::Hashable;
|
||||||
use hashdb::HashDB;
|
use hashdb::HashDB;
|
||||||
use super::triedb::TrieDB;
|
use super::triedb::TrieDB;
|
||||||
use super::trietraits::Trie;
|
use super::trietraits::Trie;
|
||||||
@ -50,6 +50,10 @@ impl<'db> SecTrieDB<'db> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'db> Trie for SecTrieDB<'db> {
|
impl<'db> Trie for SecTrieDB<'db> {
|
||||||
|
fn iter<'a>(&'a self) -> Box<Iterator<Item = (Vec<u8>, &[u8])> + 'a> {
|
||||||
|
Box::new(TrieDB::iter(&self.raw))
|
||||||
|
}
|
||||||
|
|
||||||
fn root(&self) -> &H256 { self.raw.root() }
|
fn root(&self) -> &H256 { self.raw.root() }
|
||||||
|
|
||||||
fn contains(&self, key: &[u8]) -> bool {
|
fn contains(&self, key: &[u8]) -> bool {
|
||||||
@ -68,7 +72,7 @@ fn trie_to_sectrie() {
|
|||||||
use super::trietraits::TrieMut;
|
use super::trietraits::TrieMut;
|
||||||
|
|
||||||
let mut memdb = MemoryDB::new();
|
let mut memdb = MemoryDB::new();
|
||||||
let mut root = H256::new();
|
let mut root = H256::default();
|
||||||
{
|
{
|
||||||
let mut t = TrieDBMut::new(&mut memdb, &mut root);
|
let mut t = TrieDBMut::new(&mut memdb, &mut root);
|
||||||
t.insert(&(&[0x01u8, 0x23]).sha3(), &[0x01u8, 0x23]);
|
t.insert(&(&[0x01u8, 0x23]).sha3(), &[0x01u8, 0x23]);
|
||||||
|
@ -14,8 +14,8 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use hash::*;
|
use hash::H256;
|
||||||
use sha3::*;
|
use sha3::Hashable;
|
||||||
use hashdb::HashDB;
|
use hashdb::HashDB;
|
||||||
use super::triedbmut::TrieDBMut;
|
use super::triedbmut::TrieDBMut;
|
||||||
use super::trietraits::{Trie, TrieMut};
|
use super::trietraits::{Trie, TrieMut};
|
||||||
@ -44,13 +44,13 @@ impl<'db> SecTrieDBMut<'db> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get the backing database.
|
/// Get the backing database.
|
||||||
pub fn db(&'db self) -> &'db HashDB { self.raw.db() }
|
pub fn db(&self) -> &HashDB { self.raw.db() }
|
||||||
|
|
||||||
/// Get the backing database.
|
/// Get the backing database.
|
||||||
pub fn db_mut(&'db mut self) -> &'db mut HashDB { self.raw.db_mut() }
|
pub fn db_mut(&mut self) -> &mut HashDB { self.raw.db_mut() }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'db> Trie for SecTrieDBMut<'db> {
|
impl<'db> TrieMut for SecTrieDBMut<'db> {
|
||||||
fn root(&self) -> &H256 { self.raw.root() }
|
fn root(&self) -> &H256 { self.raw.root() }
|
||||||
|
|
||||||
fn contains(&self, key: &[u8]) -> bool {
|
fn contains(&self, key: &[u8]) -> bool {
|
||||||
@ -60,9 +60,7 @@ impl<'db> Trie for SecTrieDBMut<'db> {
|
|||||||
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key {
|
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key {
|
||||||
self.raw.get(&key.sha3())
|
self.raw.get(&key.sha3())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<'db> TrieMut for SecTrieDBMut<'db> {
|
|
||||||
fn insert(&mut self, key: &[u8], value: &[u8]) {
|
fn insert(&mut self, key: &[u8], value: &[u8]) {
|
||||||
self.raw.insert(&key.sha3(), value);
|
self.raw.insert(&key.sha3(), value);
|
||||||
}
|
}
|
||||||
@ -78,7 +76,7 @@ fn sectrie_to_trie() {
|
|||||||
use super::triedb::*;
|
use super::triedb::*;
|
||||||
|
|
||||||
let mut memdb = MemoryDB::new();
|
let mut memdb = MemoryDB::new();
|
||||||
let mut root = H256::new();
|
let mut root = H256::default();
|
||||||
{
|
{
|
||||||
let mut t = SecTrieDBMut::new(&mut memdb, &mut root);
|
let mut t = SecTrieDBMut::new(&mut memdb, &mut root);
|
||||||
t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]);
|
t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]);
|
||||||
|
@ -18,7 +18,7 @@ use common::*;
|
|||||||
use hashdb::*;
|
use hashdb::*;
|
||||||
use nibbleslice::*;
|
use nibbleslice::*;
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
use super::trietraits::Trie;
|
use super::trietraits::{Trie};
|
||||||
use super::node::Node;
|
use super::node::Node;
|
||||||
use super::TrieError;
|
use super::TrieError;
|
||||||
|
|
||||||
@ -257,7 +257,7 @@ pub struct TrieDBIterator<'a> {
|
|||||||
|
|
||||||
impl<'a> TrieDBIterator<'a> {
|
impl<'a> TrieDBIterator<'a> {
|
||||||
/// Create a new iterator.
|
/// Create a new iterator.
|
||||||
fn new(db: &'a TrieDB) -> TrieDBIterator<'a> {
|
pub fn new(db: &'a TrieDB) -> TrieDBIterator<'a> {
|
||||||
let mut r = TrieDBIterator {
|
let mut r = TrieDBIterator {
|
||||||
db: db,
|
db: db,
|
||||||
trail: vec![],
|
trail: vec![],
|
||||||
@ -331,10 +331,16 @@ impl<'a> Iterator for TrieDBIterator<'a> {
|
|||||||
|
|
||||||
impl<'db> TrieDB<'db> {
|
impl<'db> TrieDB<'db> {
|
||||||
/// Get all keys/values stored in the trie.
|
/// Get all keys/values stored in the trie.
|
||||||
pub fn iter(&self) -> TrieDBIterator { TrieDBIterator::new(self) }
|
pub fn iter(&self) -> TrieDBIterator {
|
||||||
|
TrieDBIterator::new(self)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'db> Trie for TrieDB<'db> {
|
impl<'db> Trie for TrieDB<'db> {
|
||||||
|
fn iter<'a>(&'a self) -> Box<Iterator<Item = (Vec<u8>, &[u8])> + 'a> {
|
||||||
|
Box::new(TrieDB::iter(self))
|
||||||
|
}
|
||||||
|
|
||||||
fn root(&self) -> &H256 { &self.root }
|
fn root(&self) -> &H256 { &self.root }
|
||||||
|
|
||||||
fn contains(&self, key: &[u8]) -> bool {
|
fn contains(&self, key: &[u8]) -> bool {
|
||||||
|
@ -99,12 +99,12 @@ impl<'db> TrieDBMut<'db> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get the backing database.
|
/// Get the backing database.
|
||||||
pub fn db(&'db self) -> &'db HashDB {
|
pub fn db(&self) -> &HashDB {
|
||||||
self.db
|
self.db
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the backing database.
|
/// Get the backing database.
|
||||||
pub fn db_mut(&'db mut self) -> &'db mut HashDB {
|
pub fn db_mut(&mut self) -> &mut HashDB {
|
||||||
self.db
|
self.db
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -642,7 +642,7 @@ impl<'db> TrieDBMut<'db> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'db> Trie for TrieDBMut<'db> {
|
impl<'db> TrieMut for TrieDBMut<'db> {
|
||||||
fn root(&self) -> &H256 { &self.root }
|
fn root(&self) -> &H256 { &self.root }
|
||||||
|
|
||||||
fn contains(&self, key: &[u8]) -> bool {
|
fn contains(&self, key: &[u8]) -> bool {
|
||||||
@ -652,9 +652,7 @@ impl<'db> Trie for TrieDBMut<'db> {
|
|||||||
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key {
|
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key {
|
||||||
self.do_lookup(&NibbleSlice::new(key))
|
self.do_lookup(&NibbleSlice::new(key))
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<'db> TrieMut for TrieDBMut<'db> {
|
|
||||||
fn insert(&mut self, key: &[u8], value: &[u8]) {
|
fn insert(&mut self, key: &[u8], value: &[u8]) {
|
||||||
match value.is_empty() {
|
match value.is_empty() {
|
||||||
false => self.insert_ns(&NibbleSlice::new(key), value),
|
false => self.insert_ns(&NibbleSlice::new(key), value),
|
||||||
|
@ -30,10 +30,25 @@ pub trait Trie {
|
|||||||
|
|
||||||
/// What is the value of the given key in this trie?
|
/// What is the value of the given key in this trie?
|
||||||
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key;
|
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key;
|
||||||
|
|
||||||
|
/// Returns an iterator over elements of trie.
|
||||||
|
fn iter<'a>(&'a self) -> Box<Iterator<Item = (Vec<u8>, &[u8])> + 'a>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A key-value datastore implemented as a database-backed modified Merkle tree.
|
/// A key-value datastore implemented as a database-backed modified Merkle tree.
|
||||||
pub trait TrieMut: Trie {
|
pub trait TrieMut {
|
||||||
|
/// Return the root of the trie.
|
||||||
|
fn root(&self) -> &H256;
|
||||||
|
|
||||||
|
/// Is the trie empty?
|
||||||
|
fn is_empty(&self) -> bool { *self.root() == SHA3_NULL_RLP }
|
||||||
|
|
||||||
|
/// Does the trie contain a given key?
|
||||||
|
fn contains(&self, key: &[u8]) -> bool;
|
||||||
|
|
||||||
|
/// What is the value of the given key in this trie?
|
||||||
|
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key;
|
||||||
|
|
||||||
/// Insert a `key`/`value` pair into the trie. An `empty` value is equivalent to removing
|
/// Insert a `key`/`value` pair into the trie. An `empty` value is equivalent to removing
|
||||||
/// `key` from the trie.
|
/// `key` from the trie.
|
||||||
fn insert(&mut self, key: &[u8], value: &[u8]);
|
fn insert(&mut self, key: &[u8], value: &[u8]);
|
||||||
@ -42,4 +57,3 @@ pub trait TrieMut: Trie {
|
|||||||
/// value.
|
/// value.
|
||||||
fn remove(&mut self, key: &[u8]);
|
fn remove(&mut self, key: &[u8]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user