Trace comment on new block inclusion (#100)

This commit is contained in:
rakita 2020-12-02 11:31:11 +01:00 committed by GitHub
parent 51d824fbdc
commit 56131b6d92
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 146 additions and 45 deletions

View File

@ -1476,7 +1476,7 @@ impl BlockChain {
} }
} }
/// Apply pending insertion updates // t_nb 9.12 commit changed to become current greatest by applying pending insertion updates
pub fn commit(&self) { pub fn commit(&self) {
let mut pending_best_ancient_block = self.pending_best_ancient_block.write(); let mut pending_best_ancient_block = self.pending_best_ancient_block.write();
let mut pending_best_block = self.pending_best_block.write(); let mut pending_best_block = self.pending_best_block.write();

View File

@ -161,7 +161,7 @@ pub trait Drain {
} }
impl<'x> OpenBlock<'x> { impl<'x> OpenBlock<'x> {
/// Create a new `OpenBlock` ready for transaction pushing. // t_nb 8.1 Create a new `OpenBlock` ready for transaction pushing.
pub fn new<'a, I: IntoIterator<Item = ExtendedHeader>>( pub fn new<'a, I: IntoIterator<Item = ExtendedHeader>>(
engine: &'x dyn EthEngine, engine: &'x dyn EthEngine,
factories: Factories, factories: Factories,
@ -176,6 +176,8 @@ impl<'x> OpenBlock<'x> {
ancestry: I, ancestry: I,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let number = parent.number() + 1; let number = parent.number() + 1;
// t_nb 8.1.1 get parent StateDB.
let state = State::from_existing( let state = State::from_existing(
db, db,
parent.state_root().clone(), parent.state_root().clone(),
@ -198,14 +200,17 @@ impl<'x> OpenBlock<'x> {
let gas_floor_target = cmp::max(gas_range_target.0, engine.params().min_gas_limit); let gas_floor_target = cmp::max(gas_range_target.0, engine.params().min_gas_limit);
let gas_ceil_target = cmp::max(gas_range_target.1, gas_floor_target); let gas_ceil_target = cmp::max(gas_range_target.1, gas_floor_target);
// t_nb 8.1.2 It calculated child gas limits should be.
engine.machine().populate_from_parent( engine.machine().populate_from_parent(
&mut r.block.header, &mut r.block.header,
parent, parent,
gas_floor_target, gas_floor_target,
gas_ceil_target, gas_ceil_target,
); );
// t_nb 8.1.3 this adds engine specific things
engine.populate_from_parent(&mut r.block.header, parent); engine.populate_from_parent(&mut r.block.header, parent);
// t_nb 8.1.3 updating last hashes and the DAO fork, for ethash.
engine.machine().on_new_block(&mut r.block)?; engine.machine().on_new_block(&mut r.block)?;
engine.on_new_block(&mut r.block, is_epoch_begin, &mut ancestry.into_iter())?; engine.on_new_block(&mut r.block, is_epoch_begin, &mut ancestry.into_iter())?;
@ -222,7 +227,7 @@ impl<'x> OpenBlock<'x> {
self.block.header.set_gas_limit(U256::max_value()); self.block.header.set_gas_limit(U256::max_value());
} }
/// Add an uncle to the block, if possible. // t_nb 8.4 Add an uncle to the block, if possible.
/// ///
/// NOTE Will check chain constraints and the uncle number but will NOT check /// NOTE Will check chain constraints and the uncle number but will NOT check
/// that the header itself is actually valid. /// that the header itself is actually valid.
@ -343,13 +348,17 @@ impl<'x> OpenBlock<'x> {
}) })
} }
/// Turn this into a `LockedBlock`. // t_nb 8.5 Turn this into a `LockedBlock`.
pub fn close_and_lock(self) -> Result<LockedBlock, Error> { pub fn close_and_lock(self) -> Result<LockedBlock, Error> {
let mut s = self; let mut s = self;
// t_nb 8.5.1 engine applies block rewards (Ethash and AuRa do.Clique is empty)
s.engine.on_close_block(&mut s.block)?; s.engine.on_close_block(&mut s.block)?;
// t_nb 8.5.2 commit account changes from cache to tree
s.block.state.commit()?; s.block.state.commit()?;
// t_nb 8.5.3 fill open block header with all other fields
s.block.header.set_transactions_root(ordered_trie_root( s.block.header.set_transactions_root(ordered_trie_root(
s.block.transactions.iter().map(|e| e.rlp_bytes()), s.block.transactions.iter().map(|e| e.rlp_bytes()),
)); ));
@ -506,7 +515,7 @@ impl Drain for SealedBlock {
} }
} }
/// Enact the block given by block header, transactions and uncles // t_nb 8.0 Enact the block given by block header, transactions and uncles
pub(crate) fn enact( pub(crate) fn enact(
header: Header, header: Header,
transactions: Vec<SignedTransaction>, transactions: Vec<SignedTransaction>,
@ -532,6 +541,7 @@ pub(crate) fn enact(
None None
}; };
// t_nb 8.1 Created new OpenBlock
let mut b = OpenBlock::new( let mut b = OpenBlock::new(
engine, engine,
factories, factories,
@ -556,17 +566,22 @@ pub(crate) fn enact(
b.block.header.number(), root, env.author, author_balance); b.block.header.number(), root, env.author, author_balance);
} }
// t_nb 8.2 transfer all field from current header to OpenBlock header that we created
b.populate_from(&header); b.populate_from(&header);
// t_nb 8.3 execute transactions one by one
b.push_transactions(transactions)?; b.push_transactions(transactions)?;
// t_nb 8.4 Push uncles to OpenBlock and check if we have more then max uncles
for u in uncles { for u in uncles {
b.push_uncle(u)?; b.push_uncle(u)?;
} }
// t_nb 8.5 close block
b.close_and_lock() b.close_and_lock()
} }
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header // t_nb 8.0 Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
pub fn enact_verified( pub fn enact_verified(
block: PreverifiedBlock, block: PreverifiedBlock,
engine: &dyn EthEngine, engine: &dyn EthEngine,

View File

@ -275,7 +275,7 @@ impl Importer {
}) })
} }
/// This is triggered by a message coming from a block queue when the block is ready for insertion // t_nb 6.0 This is triggered by a message coming from a block queue when the block is ready for insertion
pub fn import_verified_blocks(&self, client: &Client) -> usize { pub fn import_verified_blocks(&self, client: &Client) -> usize {
// Shortcut out if we know we're incapable of syncing the chain. // Shortcut out if we know we're incapable of syncing the chain.
if !client.enabled.load(AtomicOrdering::Relaxed) { if !client.enabled.load(AtomicOrdering::Relaxed) {
@ -315,11 +315,13 @@ impl Importer {
invalid_blocks.insert(hash); invalid_blocks.insert(hash);
continue; continue;
} }
// t_nb 7.0 check and lock block
match self.check_and_lock_block(&bytes, block, client) { match self.check_and_lock_block(&bytes, block, client) {
Ok((closed_block, pending)) => { Ok((closed_block, pending)) => {
imported_blocks.push(hash); imported_blocks.push(hash);
let transactions_len = closed_block.transactions.len(); let transactions_len = closed_block.transactions.len();
// t_nb 8.0 commit block to db
let route = self.commit_block( let route = self.commit_block(
closed_block, closed_block,
&header, &header,
@ -362,6 +364,7 @@ impl Importer {
if !imported_blocks.is_empty() { if !imported_blocks.is_empty() {
let route = ChainRoute::from(import_results.as_ref()); let route = ChainRoute::from(import_results.as_ref());
// t_nb 10 Notify miner about new included block.
if !has_more_blocks_to_import { if !has_more_blocks_to_import {
self.miner.chain_new_blocks( self.miner.chain_new_blocks(
client, client,
@ -373,6 +376,7 @@ impl Importer {
); );
} }
// t_nb 11 notify rest of system about new block inclusion
client.notify(|notify| { client.notify(|notify| {
notify.new_blocks(NewBlocks::new( notify.new_blocks(NewBlocks::new(
imported_blocks.clone(), imported_blocks.clone(),
@ -394,6 +398,7 @@ impl Importer {
imported imported
} }
// t_nb 6.0.1 check and lock block,
fn check_and_lock_block( fn check_and_lock_block(
&self, &self,
bytes: &[u8], bytes: &[u8],
@ -404,13 +409,14 @@ impl Importer {
let header = block.header.clone(); let header = block.header.clone();
// Check the block isn't so old we won't be able to enact it. // Check the block isn't so old we won't be able to enact it.
// t_nb 7.1 check if block is older then last pruned block
let best_block_number = client.chain.read().best_block_number(); let best_block_number = client.chain.read().best_block_number();
if client.pruning_info().earliest_state > header.number() { if client.pruning_info().earliest_state > header.number() {
warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number);
bail!("Block is ancient"); bail!("Block is ancient");
} }
// Check if parent is in chain // t_nb 7.2 Check if parent is in chain
let parent = match client.block_header_decoded(BlockId::Hash(*header.parent_hash())) { let parent = match client.block_header_decoded(BlockId::Hash(*header.parent_hash())) {
Some(h) => h, Some(h) => h,
None => { None => {
@ -420,7 +426,7 @@ impl Importer {
}; };
let chain = client.chain.read(); let chain = client.chain.read();
// Verify Block Family // t_nb 7.3 verify block family
let verify_family_result = self.verifier.verify_block_family( let verify_family_result = self.verifier.verify_block_family(
&header, &header,
&parent, &parent,
@ -437,6 +443,7 @@ impl Importer {
bail!(e); bail!(e);
}; };
// t_nb 7.4 verify block external
let verify_external_result = self.verifier.verify_block_external(&header, engine); let verify_external_result = self.verifier.verify_block_external(&header, engine);
if let Err(e) = verify_external_result { if let Err(e) = verify_external_result {
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
@ -444,7 +451,9 @@ impl Importer {
}; };
// Enact Verified Block // Enact Verified Block
// t_nb 7.5 Get build last hashes. Get parent state db. Get epoch_transition
let last_hashes = client.build_last_hashes(header.parent_hash()); let last_hashes = client.build_last_hashes(header.parent_hash());
let db = client let db = client
.state_db .state_db
.read() .read()
@ -454,6 +463,7 @@ impl Importer {
.epoch_transition(parent.number(), *header.parent_hash()) .epoch_transition(parent.number(), *header.parent_hash())
.is_some(); .is_some();
// t_nb 8.0 Block enacting. Execution of transactions.
let enact_result = enact_verified( let enact_result = enact_verified(
block, block,
engine, engine,
@ -474,7 +484,7 @@ impl Importer {
} }
}; };
// Strip receipts for blocks before validate_receipts_transition, // t_nb 7.6 Strip receipts for blocks before validate_receipts_transition,
// if the expected receipts root header does not match. // if the expected receipts root header does not match.
// (i.e. allow inconsistency in receipts outcome before the transition block) // (i.e. allow inconsistency in receipts outcome before the transition block)
if header.number() < engine.params().validate_receipts_transition if header.number() < engine.params().validate_receipts_transition
@ -483,7 +493,7 @@ impl Importer {
locked_block.strip_receipts_outcomes(); locked_block.strip_receipts_outcomes();
} }
// Final Verification // t_nb 7.7 Final Verification. See if block that we created (executed) matches exactly with block that we received.
if let Err(e) = self if let Err(e) = self
.verifier .verifier
.verify_block_final(&header, &locked_block.header) .verify_block_final(&header, &locked_block.header)
@ -570,6 +580,7 @@ impl Importer {
let mut batch = DBTransaction::new(); let mut batch = DBTransaction::new();
// t_nb 9.1 Gather all ancestry actions. (Used only by AuRa)
let ancestry_actions = self let ancestry_actions = self
.engine .engine
.ancestry_actions(&header, &mut chain.ancestry_with_metadata_iter(*parent)); .ancestry_actions(&header, &mut chain.ancestry_with_metadata_iter(*parent));
@ -605,24 +616,28 @@ impl Importer {
} }
}; };
// t_nb 9.2 calcuate route between current and latest block.
let route = chain.tree_route(best_hash, *parent).expect("forks are only kept when it has common ancestors; tree route from best to prospective's parent always exists; qed"); let route = chain.tree_route(best_hash, *parent).expect("forks are only kept when it has common ancestors; tree route from best to prospective's parent always exists; qed");
// t_nb 9.3 Check block total difficulty
let fork_choice = if route.is_from_route_finalized { let fork_choice = if route.is_from_route_finalized {
ForkChoice::Old ForkChoice::Old
} else { } else {
self.engine.fork_choice(&new, &best) self.engine.fork_choice(&new, &best)
}; };
// CHECK! I *think* this is fine, even if the state_root is equal to another // t_nb 9.4 CHECK! I *think* this is fine, even if the state_root is equal to another
// already-imported block of the same number. // already-imported block of the same number.
// TODO: Prove it with a test. // TODO: Prove it with a test.
let mut state = block.state.drop().1; let mut state = block.state.drop().1;
// check epoch end signal, potentially generating a proof on the current // t_nb 9.5 check epoch end signal, potentially generating a proof on the current
// state. // state. Write transition into db.
if let Some(pending) = pending { if let Some(pending) = pending {
chain.insert_pending_transition(&mut batch, header.hash(), pending); chain.insert_pending_transition(&mut batch, header.hash(), pending);
} }
// t_nb 9.6 push state to database Transaction. (It calls journal_under from JournalDB)
state state
.journal_under(&mut batch, number, hash) .journal_under(&mut batch, number, hash)
.expect("DB commit failed"); .expect("DB commit failed");
@ -633,6 +648,7 @@ impl Importer {
let AncestryAction::MarkFinalized(a) = ancestry_action; let AncestryAction::MarkFinalized(a) = ancestry_action;
if a != header.hash() { if a != header.hash() {
// t_nb 9.7 if there are finalized ancester, mark that chainge in block in db. (Used by AuRa)
chain chain
.mark_finalized(&mut batch, a) .mark_finalized(&mut batch, a)
.expect("Engine's ancestry action must be known blocks; qed"); .expect("Engine's ancestry action must be known blocks; qed");
@ -645,6 +661,7 @@ impl Importer {
}) })
.collect(); .collect();
// t_nb 9.8 insert block
let route = chain.insert_block( let route = chain.insert_block(
&mut batch, &mut batch,
block_data, block_data,
@ -655,6 +672,7 @@ impl Importer {
}, },
); );
// t_nb 9.9 insert traces (if they are enabled)
client.tracedb.read().import( client.tracedb.read().import(
&mut batch, &mut batch,
TraceImportRequest { TraceImportRequest {
@ -667,15 +685,22 @@ impl Importer {
); );
let is_canon = route.enacted.last().map_or(false, |h| h == hash); let is_canon = route.enacted.last().map_or(false, |h| h == hash);
// t_nb 9.10 sync cache
state.sync_cache(&route.enacted, &route.retracted, is_canon); state.sync_cache(&route.enacted, &route.retracted, is_canon);
// Final commit to the DB // Final commit to the DB
// t_nb 9.11 Write Transaction to database (cached)
client.db.read().key_value().write_buffered(batch); client.db.read().key_value().write_buffered(batch);
// t_nb 9.12 commit changed to become current greatest by applying pending insertion updates (Sync point)
chain.commit(); chain.commit();
// t_nb 9.13 check epoch end. Related only to AuRa and it seems light engine
self.check_epoch_end(&header, &finalized, &chain, client); self.check_epoch_end(&header, &finalized, &chain, client);
// t_nb 9.14 update last hashes. They are build in step 7.5
client.update_last_hashes(&parent, hash); client.update_last_hashes(&parent, hash);
// t_nb 9.15 prune ancient states
if let Err(e) = client.prune_ancient(state, &chain) { if let Err(e) = client.prune_ancient(state, &chain) {
warn!("Failed to prune ancient state data: {}", e); warn!("Failed to prune ancient state data: {}", e);
} }
@ -1098,7 +1123,7 @@ impl Client {
with_call(&call) with_call(&call)
} }
// prune ancient states until below the memory limit or only the minimum amount remain. // t_nb 9.15 prune ancient states until below the memory limit or only the minimum amount remain.
fn prune_ancient( fn prune_ancient(
&self, &self,
mut state_db: StateDB, mut state_db: StateDB,
@ -1138,6 +1163,7 @@ impl Client {
Ok(()) Ok(())
} }
// t_nb 9.14 update last hashes. They are build in step 7.5
fn update_last_hashes(&self, parent: &H256, hash: &H256) { fn update_last_hashes(&self, parent: &H256, hash: &H256) {
let mut hashes = self.last_hashes.write(); let mut hashes = self.last_hashes.write();
if hashes.front().map_or(false, |h| h == parent) { if hashes.front().map_or(false, |h| h == parent) {
@ -1721,11 +1747,14 @@ impl CallContract for Client {
} }
impl ImportBlock for Client { impl ImportBlock for Client {
// t_nb 2.0 import block to client
fn import_block(&self, unverified: Unverified) -> EthcoreResult<H256> { fn import_block(&self, unverified: Unverified) -> EthcoreResult<H256> {
// t_nb 2.1 check if header hash is known to us.
if self.chain.read().is_known(&unverified.hash()) { if self.chain.read().is_known(&unverified.hash()) {
bail!(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain)); bail!(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain));
} }
// t_nb 2.2 check if parent is known
let status = self.block_status(BlockId::Hash(unverified.parent_hash())); let status = self.block_status(BlockId::Hash(unverified.parent_hash()));
if status == BlockStatus::Unknown { if status == BlockStatus::Unknown {
bail!(EthcoreErrorKind::Block(BlockError::UnknownParent( bail!(EthcoreErrorKind::Block(BlockError::UnknownParent(
@ -1743,14 +1772,16 @@ impl ImportBlock for Client {
None None
}; };
// t_nb 2.3
match self.importer.block_queue.import(unverified) { match self.importer.block_queue.import(unverified) {
Ok(hash) => { Ok(hash) => {
// t_nb 2.4 If block is okay and the queue is empty we propagate the block in a `PriorityTask` to be rebrodcasted
if let Some((raw, hash, difficulty)) = raw { if let Some((raw, hash, difficulty)) = raw {
self.notify(move |n| n.block_pre_import(&raw, &hash, &difficulty)); self.notify(move |n| n.block_pre_import(&raw, &hash, &difficulty));
} }
Ok(hash) Ok(hash)
} }
// we only care about block errors (not import errors) // t_nb 2.5 if block is not okay print error. we only care about block errors (not import errors)
Err((Some(block), EthcoreError(EthcoreErrorKind::Block(err), _))) => { Err((Some(block), EthcoreError(EthcoreErrorKind::Block(err), _))) => {
self.importer self.importer
.bad_blocks .bad_blocks

View File

@ -1307,6 +1307,7 @@ impl Engine<EthereumMachine> for AuthorityRound {
Ok(()) Ok(())
} }
// t_nb 8.1.5
fn on_new_block( fn on_new_block(
&self, &self,
block: &mut ExecutedBlock, block: &mut ExecutedBlock,
@ -1531,7 +1532,7 @@ impl Engine<EthereumMachine> for AuthorityRound {
Ok(()) Ok(())
} }
// Check the validators. // t_nb 6.4 Check the validators.
fn verify_block_external(&self, header: &Header) -> Result<(), Error> { fn verify_block_external(&self, header: &Header) -> Result<(), Error> {
let (validators, set_number) = self.epoch_set(header)?; let (validators, set_number) = self.epoch_set(header)?;

View File

@ -488,7 +488,7 @@ pub trait Engine<M: Machine>: Sync + Send {
header_timestamp > parent_timestamp header_timestamp > parent_timestamp
} }
/// Gather all ancestry actions. Called at the last stage when a block is committed. The Engine must guarantee that // t_nb 9.1 Gather all ancestry actions. Called at the last stage when a block is committed. The Engine must guarantee that
/// the ancestry exists. /// the ancestry exists.
fn ancestry_actions( fn ancestry_actions(
&self, &self,
@ -507,7 +507,7 @@ pub trait Engine<M: Machine>: Sync + Send {
} }
} }
/// Check whether a given block is the best block based on the default total difficulty rule. // t_nb 9.3 Check whether a given block is the best block based on the default total difficulty rule.
pub fn total_difficulty_fork_choice(new: &ExtendedHeader, best: &ExtendedHeader) -> ForkChoice { pub fn total_difficulty_fork_choice(new: &ExtendedHeader, best: &ExtendedHeader) -> ForkChoice {
if new.total_score() > best.total_score() { if new.total_score() > best.total_score() {
ForkChoice::New ForkChoice::New
@ -562,7 +562,7 @@ pub trait EthEngine: Engine<::machine::EthereumMachine> {
self.machine().create_address_scheme(number) self.machine().create_address_scheme(number)
} }
/// Verify a particular transaction is valid. // t_nb 5.3.1 Verify a particular transaction is valid.
/// ///
/// Unordered verification doesn't rely on the transaction execution order, /// Unordered verification doesn't rely on the transaction execution order,
/// i.e. it should only verify stuff that doesn't assume any previous transactions /// i.e. it should only verify stuff that doesn't assume any previous transactions

View File

@ -238,7 +238,7 @@ impl EthereumMachine {
Ok(()) Ok(())
} }
/// Logic to perform on a new block: updating last hashes and the DAO // t_nb 8.1.3 Logic to perform on a new block: updating last hashes and the DAO
/// fork, for ethash. /// fork, for ethash.
pub fn on_new_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { pub fn on_new_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> {
self.push_last_hash(block)?; self.push_last_hash(block)?;

View File

@ -1230,7 +1230,7 @@ impl miner::MinerService for Miner {
) )
} }
/// Update sealing if required. // t_nb 10.4 Update sealing if required.
/// Prepare the block and work if the Engine does not seal internally. /// Prepare the block and work if the Engine does not seal internally.
fn update_sealing<C>(&self, chain: &C, force: ForceUpdateSealing) fn update_sealing<C>(&self, chain: &C, force: ForceUpdateSealing)
where where
@ -1339,6 +1339,7 @@ impl miner::MinerService for Miner {
}) })
} }
// t_nb 10 notify miner about new include blocks
fn chain_new_blocks<C>( fn chain_new_blocks<C>(
&self, &self,
chain: &C, chain: &C,
@ -1363,11 +1364,11 @@ impl miner::MinerService for Miner {
self.nonce_cache.clear(); self.nonce_cache.clear();
} }
// First update gas limit in transaction queue and minimal gas price. // t_nb 10.1 First update gas limit in transaction queue and minimal gas price.
let gas_limit = *chain.best_block_header().gas_limit(); let gas_limit = *chain.best_block_header().gas_limit();
self.update_transaction_queue_limits(gas_limit); self.update_transaction_queue_limits(gas_limit);
// Then import all transactions from retracted blocks. // t_nb 10.2 Then import all transactions from retracted blocks (retracted means from side chain).
let client = self.pool_client(chain); let client = self.pool_client(chain);
{ {
retracted retracted
@ -1379,6 +1380,7 @@ impl miner::MinerService for Miner {
.into_iter() .into_iter()
.map(pool::verifier::Transaction::Retracted) .map(pool::verifier::Transaction::Retracted)
.collect(); .collect();
// t_nb 10.2
let _ = self.transaction_queue.import( let _ = self.transaction_queue.import(
client.clone(), client.clone(),
txs, txs,
@ -1387,12 +1389,13 @@ impl miner::MinerService for Miner {
} }
if has_new_best_block || (imported.len() > 0 && self.options.reseal_on_uncle) { if has_new_best_block || (imported.len() > 0 && self.options.reseal_on_uncle) {
// Reset `next_allowed_reseal` in case a block is imported. // t_nb 10.3 Reset `next_allowed_reseal` in case a block is imported.
// Even if min_period is high, we will always attempt to create // Even if min_period is high, we will always attempt to create
// new pending block. // new pending block.
self.sealing.lock().next_allowed_reseal = Instant::now(); self.sealing.lock().next_allowed_reseal = Instant::now();
if !is_internal_import { if !is_internal_import {
// t_nb 10.4 if it is internal import update sealing
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// | NOTE Code below requires sealing locks. | // | NOTE Code below requires sealing locks. |
// | Make sure to release the locks before calling that method. | // | Make sure to release the locks before calling that method. |
@ -1402,7 +1405,7 @@ impl miner::MinerService for Miner {
} }
if has_new_best_block { if has_new_best_block {
// Make sure to cull transactions after we update sealing. // t_nb 10.5 Make sure to cull transactions after we update sealing.
// Not culling won't lead to old transactions being added to the block // Not culling won't lead to old transactions being added to the block
// (thanks to Ready), but culling can take significant amount of time, // (thanks to Ready), but culling can take significant amount of time,
// so best to leave it after we create some work for miners to prevent increased // so best to leave it after we create some work for miners to prevent increased
@ -1424,7 +1427,9 @@ impl miner::MinerService for Miner {
&*accounts, &*accounts,
service_transaction_checker.as_ref(), service_transaction_checker.as_ref(),
); );
// t_nb 10.5 do culling
queue.cull(client); queue.cull(client);
// reseal is only used by InstaSeal engine
if engine.should_reseal_on_update() { if engine.should_reseal_on_update() {
// force update_sealing here to skip `reseal_required` checks // force update_sealing here to skip `reseal_required` checks
chain.update_sealing(ForceUpdateSealing::Yes); chain.update_sealing(ForceUpdateSealing::Yes);
@ -1435,13 +1440,16 @@ impl miner::MinerService for Miner {
warn!(target: "miner", "Error queueing cull: {:?}", e); warn!(target: "miner", "Error queueing cull: {:?}", e);
} }
} else { } else {
// t_nb 10.5 do culling
self.transaction_queue.cull(client); self.transaction_queue.cull(client);
// reseal is only used by InstaSeal engine
if self.engine.should_reseal_on_update() { if self.engine.should_reseal_on_update() {
// force update_sealing here to skip `reseal_required` checks // force update_sealing here to skip `reseal_required` checks
self.update_sealing(chain, ForceUpdateSealing::Yes); self.update_sealing(chain, ForceUpdateSealing::Yes);
} }
} }
} }
// t_nb 10.6 For service transaction checker update addresses to latest block
if let Some(ref service_transaction_checker) = self.service_transaction_checker { if let Some(ref service_transaction_checker) = self.service_transaction_checker {
match service_transaction_checker.refresh_cache(chain) { match service_transaction_checker.refresh_cache(chain) {
Ok(true) => { Ok(true) => {

View File

@ -111,6 +111,7 @@ impl Watcher {
} }
impl ChainNotify for Watcher { impl ChainNotify for Watcher {
// t_nb 11.1 check number of block and trigger snapshot creation if needed.
fn new_blocks(&self, new_blocks: NewBlocks) { fn new_blocks(&self, new_blocks: NewBlocks) {
if self.oracle.is_major_importing() || new_blocks.has_more_blocks_to_import { if self.oracle.is_major_importing() || new_blocks.has_more_blocks_to_import {
return; return;

View File

@ -43,7 +43,7 @@ pub trait Backend: Send {
/// Treat the backend as a writeable hashdb. /// Treat the backend as a writeable hashdb.
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue>; fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue>;
/// Add an account entry to the cache. // t_nb 9.4 Add an account entry to the cache.
fn add_to_account_cache(&mut self, addr: Address, data: Option<Account>, modified: bool); fn add_to_account_cache(&mut self, addr: Address, data: Option<Account>, modified: bool);
/// Add a global code cache entry. This doesn't need to worry about canonicality because /// Add a global code cache entry. This doesn't need to worry about canonicality because

View File

@ -996,7 +996,7 @@ impl<B: Backend> State<B> {
Ok(()) Ok(())
} }
/// Commits our cached account changes into the trie. // t_nb 8.5.2 Commits our cached account changes into the trie.
pub fn commit(&mut self) -> Result<(), Error> { pub fn commit(&mut self) -> Result<(), Error> {
assert!(self.checkpoints.borrow().is_empty()); assert!(self.checkpoints.borrow().is_empty());
// first, commit the sub trees. // first, commit the sub trees.
@ -1036,7 +1036,7 @@ impl<B: Backend> State<B> {
Ok(()) Ok(())
} }
/// Propagate local cache into shared canonical state cache. // t_nb 9.4 Propagate local cache into shared canonical state cache.
fn propagate_to_global_cache(&mut self) { fn propagate_to_global_cache(&mut self) {
let mut addresses = self.cache.borrow_mut(); let mut addresses = self.cache.borrow_mut();
trace!("Committing cache {:?} entries", addresses.len()); trace!("Committing cache {:?} entries", addresses.len());

View File

@ -147,6 +147,7 @@ impl StateDB {
Ok(records) Ok(records)
} }
// t_nb 9.15
/// Mark a given candidate from an ancient era as canonical, enacting its removals from the /// Mark a given candidate from an ancient era as canonical, enacting its removals from the
/// backing database and reverting any non-canonical historical commit's insertions. /// backing database and reverting any non-canonical historical commit's insertions.
pub fn mark_canonical( pub fn mark_canonical(
@ -158,7 +159,7 @@ impl StateDB {
self.db.mark_canonical(batch, end_era, canon_id) self.db.mark_canonical(batch, end_era, canon_id)
} }
/// Propagate local cache into the global cache and synchonize // t_nb 9.10 Propagate local cache into the global cache and synchonize
/// the global cache with the best block state. /// the global cache with the best block state.
/// This function updates the global cache by removing entries /// This function updates the global cache by removing entries
/// that are invalidated by chain reorganization. `sync_cache` /// that are invalidated by chain reorganization. `sync_cache`

View File

@ -95,6 +95,7 @@ pub mod blocks {
type Unverified = Unverified; type Unverified = Unverified;
type Verified = PreverifiedBlock; type Verified = PreverifiedBlock;
// t_nb 4.0 verify_block_basic
fn create( fn create(
input: Self::Input, input: Self::Input,
engine: &dyn EthEngine, engine: &dyn EthEngine,
@ -113,6 +114,7 @@ pub mod blocks {
} }
} }
// t_nb 5.0 verify standalone block
fn verify( fn verify(
un: Self::Unverified, un: Self::Unverified,
engine: &dyn EthEngine, engine: &dyn EthEngine,

View File

@ -364,7 +364,7 @@ impl<K: Kind> VerificationQueue<K> {
} }
} }
// do work. // do work on this item.
let item = { let item = {
// acquire these locks before getting the item to verify. // acquire these locks before getting the item to verify.
let mut unverified = verification.unverified.lock(); let mut unverified = verification.unverified.lock();
@ -387,10 +387,12 @@ impl<K: Kind> VerificationQueue<K> {
}; };
let hash = item.hash(); let hash = item.hash();
// t_nb 5.0 verify standalone block (this verification is done in VerificationQueue thread pool)
let is_ready = match K::verify(item, &*engine, verification.check_seal) { let is_ready = match K::verify(item, &*engine, verification.check_seal) {
Ok(verified) => { Ok(verified) => {
let mut verifying = verification.verifying.lock(); let mut verifying = verification.verifying.lock();
let mut idx = None; let mut idx = None;
// find item again and remove it from verified queue
for (i, e) in verifying.iter_mut().enumerate() { for (i, e) in verifying.iter_mut().enumerate() {
if e.hash == hash { if e.hash == hash {
idx = Some(i); idx = Some(i);
@ -515,17 +517,20 @@ impl<K: Kind> VerificationQueue<K> {
} }
/// Add a block to the queue. /// Add a block to the queue.
// t_nb 3.0 import block to verification queue
pub fn import(&self, input: K::Input) -> Result<H256, (Option<K::Input>, Error)> { pub fn import(&self, input: K::Input) -> Result<H256, (Option<K::Input>, Error)> {
let hash = input.hash(); let hash = input.hash();
let raw_hash = input.raw_hash(); let raw_hash = input.raw_hash();
// t_nb 3.1 check if block is currently processing or marked as bad.
{ {
// t_nb 3.1.0 is currently processing
if self.processing.read().contains_key(&hash) { if self.processing.read().contains_key(&hash) {
bail!(( bail!((
Some(input), Some(input),
ErrorKind::Import(ImportErrorKind::AlreadyQueued).into() ErrorKind::Import(ImportErrorKind::AlreadyQueued).into()
)); ));
} }
// t_nb 3.1.1 is marked as bad
let mut bad = self.verification.bad.lock(); let mut bad = self.verification.bad.lock();
if bad.contains(&hash) || bad.contains(&raw_hash) { if bad.contains(&hash) || bad.contains(&raw_hash) {
bail!(( bail!((
@ -533,7 +538,7 @@ impl<K: Kind> VerificationQueue<K> {
ErrorKind::Import(ImportErrorKind::KnownBad).into() ErrorKind::Import(ImportErrorKind::KnownBad).into()
)); ));
} }
// t_nb 3.1.2 its parent is marked as bad
if bad.contains(&input.parent_hash()) { if bad.contains(&input.parent_hash()) {
bad.insert(hash); bad.insert(hash);
bail!(( bail!((

View File

@ -63,26 +63,36 @@ impl HeapSizeOf for PreverifiedBlock {
} }
} }
/// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block // t_nb 4.0 Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block
pub fn verify_block_basic( pub fn verify_block_basic(
block: &Unverified, block: &Unverified,
engine: &dyn EthEngine, engine: &dyn EthEngine,
check_seal: bool, check_seal: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
// t_nb 4.1 verify header params
verify_header_params(&block.header, engine, true, check_seal)?; verify_header_params(&block.header, engine, true, check_seal)?;
// t_nb 4.2 verify header time (addded in new OE version)
// t_nb 4.3 verify block integrity
verify_block_integrity(block)?; verify_block_integrity(block)?;
if check_seal { if check_seal {
// t_nb 4.4 Check block seal. It calls engine to verify block basic
engine.verify_block_basic(&block.header)?; engine.verify_block_basic(&block.header)?;
} }
// t_nb 4.5 for all uncled verify header and call engine to verify block basic
for uncle in &block.uncles { for uncle in &block.uncles {
// t_nb 4.5.1
verify_header_params(uncle, engine, false, check_seal)?; verify_header_params(uncle, engine, false, check_seal)?;
if check_seal { if check_seal {
// t_nb 4.5.2
engine.verify_block_basic(uncle)?; engine.verify_block_basic(uncle)?;
} }
} }
// t_nb 4.6 call engine.gas_limit_override (Used only by Aura) TODO added in new version
// t_nb 4.7 for every transaction call engine.verify_transaction_basic
for t in &block.transactions { for t in &block.transactions {
engine.verify_transaction_basic(t, &block.header)?; engine.verify_transaction_basic(t, &block.header)?;
} }
@ -90,7 +100,7 @@ pub fn verify_block_basic(
Ok(()) Ok(())
} }
/// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash. // t_nb 5.0 Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash.
/// Still operates on a individual block /// Still operates on a individual block
/// Returns a `PreverifiedBlock` structure populated with transactions /// Returns a `PreverifiedBlock` structure populated with transactions
pub fn verify_block_unordered( pub fn verify_block_unordered(
@ -100,8 +110,10 @@ pub fn verify_block_unordered(
) -> Result<PreverifiedBlock, Error> { ) -> Result<PreverifiedBlock, Error> {
let header = block.header; let header = block.header;
if check_seal { if check_seal {
// t_nb 5.1
engine.verify_block_unordered(&header)?; engine.verify_block_unordered(&header)?;
for uncle in &block.uncles { for uncle in &block.uncles {
// t_nb 5.2
engine.verify_block_unordered(uncle)?; engine.verify_block_unordered(uncle)?;
} }
} }
@ -112,11 +124,14 @@ pub fn verify_block_unordered(
None None
}; };
// t_nb 5.3 iterate over all transactions
let transactions = block let transactions = block
.transactions .transactions
.into_iter() .into_iter()
.map(|t| { .map(|t| {
// t_nb 5.3.1 call verify_unordered. Check signatures and calculate address
let t = engine.verify_transaction_unordered(t, &header)?; let t = engine.verify_transaction_unordered(t, &header)?;
// t_nb 5.3.2 check if nonce is more then max nonce (EIP-168 and EIP169)
if let Some(max_nonce) = nonce_cap { if let Some(max_nonce) = nonce_cap {
if t.nonce >= max_nonce { if t.nonce >= max_nonce {
return Err(BlockError::TooManyTransactions(t.sender()).into()); return Err(BlockError::TooManyTransactions(t.sender()).into());
@ -146,7 +161,7 @@ pub struct FullFamilyParams<'a, C: BlockInfo + CallContract + 'a> {
pub client: &'a C, pub client: &'a C,
} }
/// Phase 3 verification. Check block information against parent and uncles. // t_nb 6.3 Phase 3 verification. Check block information against parent and uncles.
pub fn verify_block_family<C: BlockInfo + CallContract>( pub fn verify_block_family<C: BlockInfo + CallContract>(
header: &Header, header: &Header,
parent: &Header, parent: &Header,
@ -154,6 +169,7 @@ pub fn verify_block_family<C: BlockInfo + CallContract>(
do_full: Option<FullFamilyParams<C>>, do_full: Option<FullFamilyParams<C>>,
) -> Result<(), Error> { ) -> Result<(), Error> {
// TODO: verify timestamp // TODO: verify timestamp
// t_nb 6.3.1 verify parent
verify_parent(&header, &parent, engine)?; verify_parent(&header, &parent, engine)?;
engine.verify_block_family(&header, &parent)?; engine.verify_block_family(&header, &parent)?;
@ -162,8 +178,10 @@ pub fn verify_block_family<C: BlockInfo + CallContract>(
None => return Ok(()), None => return Ok(()),
}; };
// t_nb 6.3.2 verify uncles
verify_uncles(params.block, params.block_provider, engine)?; verify_uncles(params.block, params.block_provider, engine)?;
// t_nb 6.3.3 verify all transactions
for tx in &params.block.transactions { for tx in &params.block.transactions {
// transactions are verified against the parent header since the current // transactions are verified against the parent header since the current
// state wasn't available when the tx was created // state wasn't available when the tx was created

View File

@ -529,6 +529,7 @@ impl ChainNotify for EthSync {
} }
} }
// t_nb 11.4
fn new_blocks(&self, new_blocks: NewBlocks) { fn new_blocks(&self, new_blocks: NewBlocks) {
if new_blocks.has_more_blocks_to_import { if new_blocks.has_more_blocks_to_import {
return; return;

View File

@ -155,6 +155,7 @@ impl SyncHandler {
trace!(target: "sync", "Ignoring new block from unconfirmed peer {}", peer_id); trace!(target: "sync", "Ignoring new block from unconfirmed peer {}", peer_id);
return Ok(()); return Ok(());
} }
// t_nb 1.0 decode RLP
let block = Unverified::from_rlp(r.at(0)?.as_raw().to_vec())?; let block = Unverified::from_rlp(r.at(0)?.as_raw().to_vec())?;
let hash = block.header.hash(); let hash = block.header.hash();
let number = block.header.number(); let number = block.header.number();
@ -166,7 +167,9 @@ impl SyncHandler {
let difficulty: U256 = r.val_at(1)?; let difficulty: U256 = r.val_at(1)?;
// Most probably the sent block is being imported by peer right now // Most probably the sent block is being imported by peer right now
// Use td and hash, that peer must have for now // Use td and hash, that peer must have for now
// t_nb 1.1 check new block diffuculty it can be found as second item in RLP and update peer diffuculty
let parent_td = difficulty.checked_sub(*block.header.difficulty()); let parent_td = difficulty.checked_sub(*block.header.difficulty());
if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) {
if peer if peer
.difficulty .difficulty
@ -181,6 +184,7 @@ impl SyncHandler {
peer.latest_hash = *parent_hash; peer.latest_hash = *parent_hash;
} }
// t_nb 1.2 if block number is to older then 20 dont process it
let last_imported_number = sync.new_blocks.last_imported_block_number(); let last_imported_number = sync.new_blocks.last_imported_block_number();
if last_imported_number > number && last_imported_number - number > MAX_NEW_BLOCK_AGE { if last_imported_number > number && last_imported_number - number > MAX_NEW_BLOCK_AGE {
trace!(target: "sync", "Ignored ancient new block {:?}", hash); trace!(target: "sync", "Ignored ancient new block {:?}", hash);

View File

@ -1421,7 +1421,7 @@ impl ChainSync {
self.check_resume(io); self.check_resume(io);
} }
/// called when block is imported to chain - propagates the blocks and updates transactions sent to peers // t_nb 11.4 called when block is imported to chain - propagates the blocks and updates transactions sent to peers
pub fn chain_new_blocks( pub fn chain_new_blocks(
&mut self, &mut self,
io: &mut dyn SyncIo, io: &mut dyn SyncIo,
@ -1437,7 +1437,9 @@ impl ChainSync {
if !is_syncing || !sealed.is_empty() || !proposed.is_empty() { if !is_syncing || !sealed.is_empty() || !proposed.is_empty() {
trace!(target: "sync", "Propagating blocks, state={:?}", self.state); trace!(target: "sync", "Propagating blocks, state={:?}", self.state);
// t_nb 11.4.1 propagate latest blocks
SyncPropagator::propagate_latest_blocks(self, io, sealed); SyncPropagator::propagate_latest_blocks(self, io, sealed);
// t_nb 11.4.4 propagate proposed blocks
SyncPropagator::propagate_proposed_blocks(self, io, proposed); SyncPropagator::propagate_proposed_blocks(self, io, proposed);
} }
if !invalid.is_empty() { if !invalid.is_empty() {
@ -1446,7 +1448,7 @@ impl ChainSync {
} }
if !is_syncing && !enacted.is_empty() && !self.peers.is_empty() { if !is_syncing && !enacted.is_empty() && !self.peers.is_empty() {
// Select random peer to re-broadcast transactions to. // t_nb 11.4.5 Select random peer to re-broadcast transactions to.
let peer = random::new().gen_range(0, self.peers.len()); let peer = random::new().gen_range(0, self.peers.len());
trace!(target: "sync", "Re-broadcasting transactions to a random peer."); trace!(target: "sync", "Re-broadcasting transactions to a random peer.");
self.peers.values_mut().nth(peer).map(|peer_info| { self.peers.values_mut().nth(peer).map(|peer_info| {

View File

@ -39,7 +39,7 @@ use super::{
pub struct SyncPropagator; pub struct SyncPropagator;
impl SyncPropagator { impl SyncPropagator {
/// propagates latest block to a set of peers // t_nb 11.4.3 propagates latest block to a set of peers
pub fn propagate_blocks( pub fn propagate_blocks(
sync: &mut ChainSync, sync: &mut ChainSync,
chain_info: &BlockChainInfo, chain_info: &BlockChainInfo,
@ -72,7 +72,7 @@ impl SyncPropagator {
sent sent
} }
/// propagates new known hashes to all peers // t_nb 11.4.2 propagates new known hashes to all peers
pub fn propagate_new_hashes( pub fn propagate_new_hashes(
sync: &mut ChainSync, sync: &mut ChainSync,
chain_info: &BlockChainInfo, chain_info: &BlockChainInfo,
@ -279,6 +279,7 @@ impl SyncPropagator {
sent_to_peers sent_to_peers
} }
// t_nb 11.4.1 propagate latest blocks to peers
pub fn propagate_latest_blocks(sync: &mut ChainSync, io: &mut dyn SyncIo, sealed: &[H256]) { pub fn propagate_latest_blocks(sync: &mut ChainSync, io: &mut dyn SyncIo, sealed: &[H256]) {
let chain_info = io.chain().chain_info(); let chain_info = io.chain().chain_info();
if (((chain_info.best_block_number as i64) - (sync.last_sent_block_number as i64)).abs() if (((chain_info.best_block_number as i64) - (sync.last_sent_block_number as i64)).abs()
@ -287,15 +288,19 @@ impl SyncPropagator {
{ {
let peers = sync.get_lagging_peers(&chain_info); let peers = sync.get_lagging_peers(&chain_info);
if sealed.is_empty() { if sealed.is_empty() {
// t_nb 11.4.2
let hashes = SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers); let hashes = SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers);
let peers = ChainSync::select_random_peers(&peers); let peers = ChainSync::select_random_peers(&peers);
// t_nb 11.4.3
let blocks = let blocks =
SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers); SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers);
if blocks != 0 || hashes != 0 { if blocks != 0 || hashes != 0 {
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
} }
} else { } else {
// t_nb 11.4.3
SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers); SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers);
// t_nb 11.4.2
SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers); SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers);
trace!(target: "sync", "Sent sealed block to all peers"); trace!(target: "sync", "Sent sealed block to all peers");
}; };
@ -303,7 +308,7 @@ impl SyncPropagator {
sync.last_sent_block_number = chain_info.best_block_number; sync.last_sent_block_number = chain_info.best_block_number;
} }
/// Distribute valid proposed blocks to subset of current peers. // t_nb 11.4.4 Distribute valid proposed blocks to subset of current peers. (if there is any proposed)
pub fn propagate_proposed_blocks( pub fn propagate_proposed_blocks(
sync: &mut ChainSync, sync: &mut ChainSync,
io: &mut dyn SyncIo, io: &mut dyn SyncIo,

View File

@ -468,7 +468,7 @@ impl From<SignedTransaction> for UnverifiedTransaction {
} }
impl SignedTransaction { impl SignedTransaction {
/// Try to verify transaction and recover sender. // t_nb 5.3.1 Try to verify transaction and recover sender.
pub fn new(transaction: UnverifiedTransaction) -> Result<Self, ethkey::Error> { pub fn new(transaction: UnverifiedTransaction) -> Result<Self, ethkey::Error> {
if transaction.is_unsigned() { if transaction.is_unsigned() {
return Err(ethkey::Error::InvalidSignature); return Err(ethkey::Error::InvalidSignature);

View File

@ -251,6 +251,7 @@ impl TransactionQueue {
self.pool.write().listener_mut().0.set_in_chain_checker(f) self.pool.write().listener_mut().0.set_in_chain_checker(f)
} }
// t_nb 10.2
/// Import a set of transactions to the pool. /// Import a set of transactions to the pool.
/// ///
/// Given blockchain and state access (Client) /// Given blockchain and state access (Client)
@ -471,7 +472,7 @@ impl TransactionQueue {
(pending_readiness, state_readiness) (pending_readiness, state_readiness)
} }
/// Culls all stalled transactions from the pool. // t_nb 10.5.1 Culls all stalled transactions from the pool.
pub fn cull<C: client::NonceClient + Clone>(&self, client: C) { pub fn cull<C: client::NonceClient + Clone>(&self, client: C) {
trace_time!("pool::cull"); trace_time!("pool::cull");
// We don't care about future transactions, so nonce_cap is not important. // We don't care about future transactions, so nonce_cap is not important.

View File

@ -336,6 +336,7 @@ impl<T: InformantData> Informant<T> {
} }
impl ChainNotify for Informant<FullNodeInformantData> { impl ChainNotify for Informant<FullNodeInformantData> {
// t_nb 11.2 Informant. Prints new block inclusiong to console/log.
fn new_blocks(&self, new_blocks: NewBlocks) { fn new_blocks(&self, new_blocks: NewBlocks) {
if new_blocks.has_more_blocks_to_import { if new_blocks.has_more_blocks_to_import {
return; return;

View File

@ -175,6 +175,7 @@ impl<C> ChainNotificationHandler<C> {
} }
impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> { impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> {
// t_nb 11.3 RPC. Notify subscriber header/logs about new block
fn new_blocks(&self, new_blocks: NewBlocks) { fn new_blocks(&self, new_blocks: NewBlocks) {
if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() { if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() {
return; return;

View File

@ -83,6 +83,7 @@ impl AclStorage for OnChainAclStorage {
} }
impl ChainNotify for OnChainAclStorage { impl ChainNotify for OnChainAclStorage {
// t_nb 11.5 SecretStore OnChainAclStorage.
fn new_blocks(&self, new_blocks: NewBlocks) { fn new_blocks(&self, new_blocks: NewBlocks) {
if new_blocks.has_more_blocks_to_import { if new_blocks.has_more_blocks_to_import {
return; return;

View File

@ -168,6 +168,7 @@ impl KeyServerSet for OnChainKeyServerSet {
} }
impl ChainNotify for OnChainKeyServerSet { impl ChainNotify for OnChainKeyServerSet {
// t_nb 11.6 SecretStore OnChainKeyServerSet.
fn new_blocks(&self, new_blocks: NewBlocks) { fn new_blocks(&self, new_blocks: NewBlocks) {
if new_blocks.has_more_blocks_to_import { if new_blocks.has_more_blocks_to_import {
return; return;

View File

@ -624,6 +624,7 @@ impl Drop for ServiceContractListener {
} }
impl ChainNotify for ServiceContractListener { impl ChainNotify for ServiceContractListener {
// t_nb 11.7 SecretStore ServiceContractListener
fn new_blocks(&self, new_blocks: NewBlocks) { fn new_blocks(&self, new_blocks: NewBlocks) {
if new_blocks.has_more_blocks_to_import { if new_blocks.has_more_blocks_to_import {
return; return;

View File

@ -332,6 +332,7 @@ impl JournalDB for OverlayRecentDB {
self.journal_overlay.read().earliest_era self.journal_overlay.read().earliest_era
} }
// t_nb 9.6
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> { fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
trace!(target: "journaldb", "entry: #{} ({})", now, id); trace!(target: "journaldb", "entry: #{} ({})", now, id);