Trace comment on new block inclusion (#100)
This commit is contained in:
parent
bfb65140d2
commit
f9f536cd08
@ -1476,7 +1476,7 @@ impl BlockChain {
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply pending insertion updates
|
||||
// t_nb 9.12 commit changed to become current greatest by applying pending insertion updates
|
||||
pub fn commit(&self) {
|
||||
let mut pending_best_ancient_block = self.pending_best_ancient_block.write();
|
||||
let mut pending_best_block = self.pending_best_block.write();
|
||||
|
@ -161,7 +161,7 @@ pub trait Drain {
|
||||
}
|
||||
|
||||
impl<'x> OpenBlock<'x> {
|
||||
/// Create a new `OpenBlock` ready for transaction pushing.
|
||||
// t_nb 8.1 Create a new `OpenBlock` ready for transaction pushing.
|
||||
pub fn new<'a, I: IntoIterator<Item = ExtendedHeader>>(
|
||||
engine: &'x dyn EthEngine,
|
||||
factories: Factories,
|
||||
@ -176,6 +176,8 @@ impl<'x> OpenBlock<'x> {
|
||||
ancestry: I,
|
||||
) -> Result<Self, Error> {
|
||||
let number = parent.number() + 1;
|
||||
|
||||
// t_nb 8.1.1 get parent StateDB.
|
||||
let state = State::from_existing(
|
||||
db,
|
||||
parent.state_root().clone(),
|
||||
@ -198,14 +200,17 @@ impl<'x> OpenBlock<'x> {
|
||||
let gas_floor_target = cmp::max(gas_range_target.0, engine.params().min_gas_limit);
|
||||
let gas_ceil_target = cmp::max(gas_range_target.1, gas_floor_target);
|
||||
|
||||
// t_nb 8.1.2 It calculated child gas limits should be.
|
||||
engine.machine().populate_from_parent(
|
||||
&mut r.block.header,
|
||||
parent,
|
||||
gas_floor_target,
|
||||
gas_ceil_target,
|
||||
);
|
||||
// t_nb 8.1.3 this adds engine specific things
|
||||
engine.populate_from_parent(&mut r.block.header, parent);
|
||||
|
||||
// t_nb 8.1.3 updating last hashes and the DAO fork, for ethash.
|
||||
engine.machine().on_new_block(&mut r.block)?;
|
||||
engine.on_new_block(&mut r.block, is_epoch_begin, &mut ancestry.into_iter())?;
|
||||
|
||||
@ -222,7 +227,7 @@ impl<'x> OpenBlock<'x> {
|
||||
self.block.header.set_gas_limit(U256::max_value());
|
||||
}
|
||||
|
||||
/// Add an uncle to the block, if possible.
|
||||
// t_nb 8.4 Add an uncle to the block, if possible.
|
||||
///
|
||||
/// NOTE Will check chain constraints and the uncle number but will NOT check
|
||||
/// that the header itself is actually valid.
|
||||
@ -343,13 +348,17 @@ impl<'x> OpenBlock<'x> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Turn this into a `LockedBlock`.
|
||||
// t_nb 8.5 Turn this into a `LockedBlock`.
|
||||
pub fn close_and_lock(self) -> Result<LockedBlock, Error> {
|
||||
let mut s = self;
|
||||
|
||||
// t_nb 8.5.1 engine applies block rewards (Ethash and AuRa do.Clique is empty)
|
||||
s.engine.on_close_block(&mut s.block)?;
|
||||
|
||||
// t_nb 8.5.2 commit account changes from cache to tree
|
||||
s.block.state.commit()?;
|
||||
|
||||
// t_nb 8.5.3 fill open block header with all other fields
|
||||
s.block.header.set_transactions_root(ordered_trie_root(
|
||||
s.block.transactions.iter().map(|e| e.rlp_bytes()),
|
||||
));
|
||||
@ -506,7 +515,7 @@ impl Drain for SealedBlock {
|
||||
}
|
||||
}
|
||||
|
||||
/// Enact the block given by block header, transactions and uncles
|
||||
// t_nb 8.0 Enact the block given by block header, transactions and uncles
|
||||
pub(crate) fn enact(
|
||||
header: Header,
|
||||
transactions: Vec<SignedTransaction>,
|
||||
@ -532,6 +541,7 @@ pub(crate) fn enact(
|
||||
None
|
||||
};
|
||||
|
||||
// t_nb 8.1 Created new OpenBlock
|
||||
let mut b = OpenBlock::new(
|
||||
engine,
|
||||
factories,
|
||||
@ -556,17 +566,22 @@ pub(crate) fn enact(
|
||||
b.block.header.number(), root, env.author, author_balance);
|
||||
}
|
||||
|
||||
// t_nb 8.2 transfer all field from current header to OpenBlock header that we created
|
||||
b.populate_from(&header);
|
||||
|
||||
// t_nb 8.3 execute transactions one by one
|
||||
b.push_transactions(transactions)?;
|
||||
|
||||
// t_nb 8.4 Push uncles to OpenBlock and check if we have more then max uncles
|
||||
for u in uncles {
|
||||
b.push_uncle(u)?;
|
||||
}
|
||||
|
||||
// t_nb 8.5 close block
|
||||
b.close_and_lock()
|
||||
}
|
||||
|
||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
|
||||
// t_nb 8.0 Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
|
||||
pub fn enact_verified(
|
||||
block: PreverifiedBlock,
|
||||
engine: &dyn EthEngine,
|
||||
|
@ -275,7 +275,7 @@ impl Importer {
|
||||
})
|
||||
}
|
||||
|
||||
/// This is triggered by a message coming from a block queue when the block is ready for insertion
|
||||
// t_nb 6.0 This is triggered by a message coming from a block queue when the block is ready for insertion
|
||||
pub fn import_verified_blocks(&self, client: &Client) -> usize {
|
||||
// Shortcut out if we know we're incapable of syncing the chain.
|
||||
if !client.enabled.load(AtomicOrdering::Relaxed) {
|
||||
@ -315,11 +315,13 @@ impl Importer {
|
||||
invalid_blocks.insert(hash);
|
||||
continue;
|
||||
}
|
||||
|
||||
// t_nb 7.0 check and lock block
|
||||
match self.check_and_lock_block(&bytes, block, client) {
|
||||
Ok((closed_block, pending)) => {
|
||||
imported_blocks.push(hash);
|
||||
let transactions_len = closed_block.transactions.len();
|
||||
|
||||
// t_nb 8.0 commit block to db
|
||||
let route = self.commit_block(
|
||||
closed_block,
|
||||
&header,
|
||||
@ -362,6 +364,7 @@ impl Importer {
|
||||
if !imported_blocks.is_empty() {
|
||||
let route = ChainRoute::from(import_results.as_ref());
|
||||
|
||||
// t_nb 10 Notify miner about new included block.
|
||||
if !has_more_blocks_to_import {
|
||||
self.miner.chain_new_blocks(
|
||||
client,
|
||||
@ -373,6 +376,7 @@ impl Importer {
|
||||
);
|
||||
}
|
||||
|
||||
// t_nb 11 notify rest of system about new block inclusion
|
||||
client.notify(|notify| {
|
||||
notify.new_blocks(NewBlocks::new(
|
||||
imported_blocks.clone(),
|
||||
@ -394,6 +398,7 @@ impl Importer {
|
||||
imported
|
||||
}
|
||||
|
||||
// t_nb 6.0.1 check and lock block,
|
||||
fn check_and_lock_block(
|
||||
&self,
|
||||
bytes: &[u8],
|
||||
@ -404,13 +409,14 @@ impl Importer {
|
||||
let header = block.header.clone();
|
||||
|
||||
// Check the block isn't so old we won't be able to enact it.
|
||||
// t_nb 7.1 check if block is older then last pruned block
|
||||
let best_block_number = client.chain.read().best_block_number();
|
||||
if client.pruning_info().earliest_state > header.number() {
|
||||
warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number);
|
||||
bail!("Block is ancient");
|
||||
}
|
||||
|
||||
// Check if parent is in chain
|
||||
// t_nb 7.2 Check if parent is in chain
|
||||
let parent = match client.block_header_decoded(BlockId::Hash(*header.parent_hash())) {
|
||||
Some(h) => h,
|
||||
None => {
|
||||
@ -420,7 +426,7 @@ impl Importer {
|
||||
};
|
||||
|
||||
let chain = client.chain.read();
|
||||
// Verify Block Family
|
||||
// t_nb 7.3 verify block family
|
||||
let verify_family_result = self.verifier.verify_block_family(
|
||||
&header,
|
||||
&parent,
|
||||
@ -437,6 +443,7 @@ impl Importer {
|
||||
bail!(e);
|
||||
};
|
||||
|
||||
// t_nb 7.4 verify block external
|
||||
let verify_external_result = self.verifier.verify_block_external(&header, engine);
|
||||
if let Err(e) = verify_external_result {
|
||||
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
||||
@ -444,7 +451,9 @@ impl Importer {
|
||||
};
|
||||
|
||||
// Enact Verified Block
|
||||
// t_nb 7.5 Get build last hashes. Get parent state db. Get epoch_transition
|
||||
let last_hashes = client.build_last_hashes(header.parent_hash());
|
||||
|
||||
let db = client
|
||||
.state_db
|
||||
.read()
|
||||
@ -454,6 +463,7 @@ impl Importer {
|
||||
.epoch_transition(parent.number(), *header.parent_hash())
|
||||
.is_some();
|
||||
|
||||
// t_nb 8.0 Block enacting. Execution of transactions.
|
||||
let enact_result = enact_verified(
|
||||
block,
|
||||
engine,
|
||||
@ -474,7 +484,7 @@ impl Importer {
|
||||
}
|
||||
};
|
||||
|
||||
// Strip receipts for blocks before validate_receipts_transition,
|
||||
// t_nb 7.6 Strip receipts for blocks before validate_receipts_transition,
|
||||
// if the expected receipts root header does not match.
|
||||
// (i.e. allow inconsistency in receipts outcome before the transition block)
|
||||
if header.number() < engine.params().validate_receipts_transition
|
||||
@ -483,7 +493,7 @@ impl Importer {
|
||||
locked_block.strip_receipts_outcomes();
|
||||
}
|
||||
|
||||
// Final Verification
|
||||
// t_nb 7.7 Final Verification. See if block that we created (executed) matches exactly with block that we received.
|
||||
if let Err(e) = self
|
||||
.verifier
|
||||
.verify_block_final(&header, &locked_block.header)
|
||||
@ -570,6 +580,7 @@ impl Importer {
|
||||
|
||||
let mut batch = DBTransaction::new();
|
||||
|
||||
// t_nb 9.1 Gather all ancestry actions. (Used only by AuRa)
|
||||
let ancestry_actions = self
|
||||
.engine
|
||||
.ancestry_actions(&header, &mut chain.ancestry_with_metadata_iter(*parent));
|
||||
@ -605,24 +616,28 @@ impl Importer {
|
||||
}
|
||||
};
|
||||
|
||||
// t_nb 9.2 calcuate route between current and latest block.
|
||||
let route = chain.tree_route(best_hash, *parent).expect("forks are only kept when it has common ancestors; tree route from best to prospective's parent always exists; qed");
|
||||
|
||||
// t_nb 9.3 Check block total difficulty
|
||||
let fork_choice = if route.is_from_route_finalized {
|
||||
ForkChoice::Old
|
||||
} else {
|
||||
self.engine.fork_choice(&new, &best)
|
||||
};
|
||||
|
||||
// CHECK! I *think* this is fine, even if the state_root is equal to another
|
||||
// t_nb 9.4 CHECK! I *think* this is fine, even if the state_root is equal to another
|
||||
// already-imported block of the same number.
|
||||
// TODO: Prove it with a test.
|
||||
let mut state = block.state.drop().1;
|
||||
|
||||
// check epoch end signal, potentially generating a proof on the current
|
||||
// state.
|
||||
// t_nb 9.5 check epoch end signal, potentially generating a proof on the current
|
||||
// state. Write transition into db.
|
||||
if let Some(pending) = pending {
|
||||
chain.insert_pending_transition(&mut batch, header.hash(), pending);
|
||||
}
|
||||
|
||||
// t_nb 9.6 push state to database Transaction. (It calls journal_under from JournalDB)
|
||||
state
|
||||
.journal_under(&mut batch, number, hash)
|
||||
.expect("DB commit failed");
|
||||
@ -633,6 +648,7 @@ impl Importer {
|
||||
let AncestryAction::MarkFinalized(a) = ancestry_action;
|
||||
|
||||
if a != header.hash() {
|
||||
// t_nb 9.7 if there are finalized ancester, mark that chainge in block in db. (Used by AuRa)
|
||||
chain
|
||||
.mark_finalized(&mut batch, a)
|
||||
.expect("Engine's ancestry action must be known blocks; qed");
|
||||
@ -645,6 +661,7 @@ impl Importer {
|
||||
})
|
||||
.collect();
|
||||
|
||||
// t_nb 9.8 insert block
|
||||
let route = chain.insert_block(
|
||||
&mut batch,
|
||||
block_data,
|
||||
@ -655,6 +672,7 @@ impl Importer {
|
||||
},
|
||||
);
|
||||
|
||||
// t_nb 9.9 insert traces (if they are enabled)
|
||||
client.tracedb.read().import(
|
||||
&mut batch,
|
||||
TraceImportRequest {
|
||||
@ -667,15 +685,22 @@ impl Importer {
|
||||
);
|
||||
|
||||
let is_canon = route.enacted.last().map_or(false, |h| h == hash);
|
||||
|
||||
// t_nb 9.10 sync cache
|
||||
state.sync_cache(&route.enacted, &route.retracted, is_canon);
|
||||
// Final commit to the DB
|
||||
// t_nb 9.11 Write Transaction to database (cached)
|
||||
client.db.read().key_value().write_buffered(batch);
|
||||
// t_nb 9.12 commit changed to become current greatest by applying pending insertion updates (Sync point)
|
||||
chain.commit();
|
||||
|
||||
// t_nb 9.13 check epoch end. Related only to AuRa and it seems light engine
|
||||
self.check_epoch_end(&header, &finalized, &chain, client);
|
||||
|
||||
// t_nb 9.14 update last hashes. They are build in step 7.5
|
||||
client.update_last_hashes(&parent, hash);
|
||||
|
||||
// t_nb 9.15 prune ancient states
|
||||
if let Err(e) = client.prune_ancient(state, &chain) {
|
||||
warn!("Failed to prune ancient state data: {}", e);
|
||||
}
|
||||
@ -1098,7 +1123,7 @@ impl Client {
|
||||
with_call(&call)
|
||||
}
|
||||
|
||||
// prune ancient states until below the memory limit or only the minimum amount remain.
|
||||
// t_nb 9.15 prune ancient states until below the memory limit or only the minimum amount remain.
|
||||
fn prune_ancient(
|
||||
&self,
|
||||
mut state_db: StateDB,
|
||||
@ -1138,6 +1163,7 @@ impl Client {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// t_nb 9.14 update last hashes. They are build in step 7.5
|
||||
fn update_last_hashes(&self, parent: &H256, hash: &H256) {
|
||||
let mut hashes = self.last_hashes.write();
|
||||
if hashes.front().map_or(false, |h| h == parent) {
|
||||
@ -1721,11 +1747,14 @@ impl CallContract for Client {
|
||||
}
|
||||
|
||||
impl ImportBlock for Client {
|
||||
// t_nb 2.0 import block to client
|
||||
fn import_block(&self, unverified: Unverified) -> EthcoreResult<H256> {
|
||||
// t_nb 2.1 check if header hash is known to us.
|
||||
if self.chain.read().is_known(&unverified.hash()) {
|
||||
bail!(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain));
|
||||
}
|
||||
|
||||
// t_nb 2.2 check if parent is known
|
||||
let status = self.block_status(BlockId::Hash(unverified.parent_hash()));
|
||||
if status == BlockStatus::Unknown {
|
||||
bail!(EthcoreErrorKind::Block(BlockError::UnknownParent(
|
||||
@ -1743,14 +1772,16 @@ impl ImportBlock for Client {
|
||||
None
|
||||
};
|
||||
|
||||
// t_nb 2.3
|
||||
match self.importer.block_queue.import(unverified) {
|
||||
Ok(hash) => {
|
||||
// t_nb 2.4 If block is okay and the queue is empty we propagate the block in a `PriorityTask` to be rebrodcasted
|
||||
if let Some((raw, hash, difficulty)) = raw {
|
||||
self.notify(move |n| n.block_pre_import(&raw, &hash, &difficulty));
|
||||
}
|
||||
Ok(hash)
|
||||
}
|
||||
// we only care about block errors (not import errors)
|
||||
// t_nb 2.5 if block is not okay print error. we only care about block errors (not import errors)
|
||||
Err((Some(block), EthcoreError(EthcoreErrorKind::Block(err), _))) => {
|
||||
self.importer
|
||||
.bad_blocks
|
||||
|
@ -1307,6 +1307,7 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// t_nb 8.1.5
|
||||
fn on_new_block(
|
||||
&self,
|
||||
block: &mut ExecutedBlock,
|
||||
@ -1531,7 +1532,7 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Check the validators.
|
||||
// t_nb 6.4 Check the validators.
|
||||
fn verify_block_external(&self, header: &Header) -> Result<(), Error> {
|
||||
let (validators, set_number) = self.epoch_set(header)?;
|
||||
|
||||
|
@ -488,7 +488,7 @@ pub trait Engine<M: Machine>: Sync + Send {
|
||||
header_timestamp > parent_timestamp
|
||||
}
|
||||
|
||||
/// Gather all ancestry actions. Called at the last stage when a block is committed. The Engine must guarantee that
|
||||
// t_nb 9.1 Gather all ancestry actions. Called at the last stage when a block is committed. The Engine must guarantee that
|
||||
/// the ancestry exists.
|
||||
fn ancestry_actions(
|
||||
&self,
|
||||
@ -507,7 +507,7 @@ pub trait Engine<M: Machine>: Sync + Send {
|
||||
}
|
||||
}
|
||||
|
||||
/// Check whether a given block is the best block based on the default total difficulty rule.
|
||||
// t_nb 9.3 Check whether a given block is the best block based on the default total difficulty rule.
|
||||
pub fn total_difficulty_fork_choice(new: &ExtendedHeader, best: &ExtendedHeader) -> ForkChoice {
|
||||
if new.total_score() > best.total_score() {
|
||||
ForkChoice::New
|
||||
@ -562,7 +562,7 @@ pub trait EthEngine: Engine<::machine::EthereumMachine> {
|
||||
self.machine().create_address_scheme(number)
|
||||
}
|
||||
|
||||
/// Verify a particular transaction is valid.
|
||||
// t_nb 5.3.1 Verify a particular transaction is valid.
|
||||
///
|
||||
/// Unordered verification doesn't rely on the transaction execution order,
|
||||
/// i.e. it should only verify stuff that doesn't assume any previous transactions
|
||||
|
@ -238,7 +238,7 @@ impl EthereumMachine {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Logic to perform on a new block: updating last hashes and the DAO
|
||||
// t_nb 8.1.3 Logic to perform on a new block: updating last hashes and the DAO
|
||||
/// fork, for ethash.
|
||||
pub fn on_new_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> {
|
||||
self.push_last_hash(block)?;
|
||||
|
@ -1230,7 +1230,7 @@ impl miner::MinerService for Miner {
|
||||
)
|
||||
}
|
||||
|
||||
/// Update sealing if required.
|
||||
// t_nb 10.4 Update sealing if required.
|
||||
/// Prepare the block and work if the Engine does not seal internally.
|
||||
fn update_sealing<C>(&self, chain: &C, force: ForceUpdateSealing)
|
||||
where
|
||||
@ -1339,6 +1339,7 @@ impl miner::MinerService for Miner {
|
||||
})
|
||||
}
|
||||
|
||||
// t_nb 10 notify miner about new include blocks
|
||||
fn chain_new_blocks<C>(
|
||||
&self,
|
||||
chain: &C,
|
||||
@ -1363,11 +1364,11 @@ impl miner::MinerService for Miner {
|
||||
self.nonce_cache.clear();
|
||||
}
|
||||
|
||||
// First update gas limit in transaction queue and minimal gas price.
|
||||
// t_nb 10.1 First update gas limit in transaction queue and minimal gas price.
|
||||
let gas_limit = *chain.best_block_header().gas_limit();
|
||||
self.update_transaction_queue_limits(gas_limit);
|
||||
|
||||
// Then import all transactions from retracted blocks.
|
||||
// t_nb 10.2 Then import all transactions from retracted blocks (retracted means from side chain).
|
||||
let client = self.pool_client(chain);
|
||||
{
|
||||
retracted
|
||||
@ -1378,7 +1379,8 @@ impl miner::MinerService for Miner {
|
||||
let txs = block.transactions()
|
||||
.into_iter()
|
||||
.map(pool::verifier::Transaction::Retracted)
|
||||
.collect();
|
||||
.collect();
|
||||
// t_nb 10.2
|
||||
let _ = self.transaction_queue.import(
|
||||
client.clone(),
|
||||
txs,
|
||||
@ -1387,12 +1389,13 @@ impl miner::MinerService for Miner {
|
||||
}
|
||||
|
||||
if has_new_best_block || (imported.len() > 0 && self.options.reseal_on_uncle) {
|
||||
// Reset `next_allowed_reseal` in case a block is imported.
|
||||
// t_nb 10.3 Reset `next_allowed_reseal` in case a block is imported.
|
||||
// Even if min_period is high, we will always attempt to create
|
||||
// new pending block.
|
||||
self.sealing.lock().next_allowed_reseal = Instant::now();
|
||||
|
||||
if !is_internal_import {
|
||||
// t_nb 10.4 if it is internal import update sealing
|
||||
// --------------------------------------------------------------------------
|
||||
// | NOTE Code below requires sealing locks. |
|
||||
// | Make sure to release the locks before calling that method. |
|
||||
@ -1402,7 +1405,7 @@ impl miner::MinerService for Miner {
|
||||
}
|
||||
|
||||
if has_new_best_block {
|
||||
// Make sure to cull transactions after we update sealing.
|
||||
// t_nb 10.5 Make sure to cull transactions after we update sealing.
|
||||
// Not culling won't lead to old transactions being added to the block
|
||||
// (thanks to Ready), but culling can take significant amount of time,
|
||||
// so best to leave it after we create some work for miners to prevent increased
|
||||
@ -1424,7 +1427,9 @@ impl miner::MinerService for Miner {
|
||||
&*accounts,
|
||||
service_transaction_checker.as_ref(),
|
||||
);
|
||||
// t_nb 10.5 do culling
|
||||
queue.cull(client);
|
||||
// reseal is only used by InstaSeal engine
|
||||
if engine.should_reseal_on_update() {
|
||||
// force update_sealing here to skip `reseal_required` checks
|
||||
chain.update_sealing(ForceUpdateSealing::Yes);
|
||||
@ -1435,13 +1440,16 @@ impl miner::MinerService for Miner {
|
||||
warn!(target: "miner", "Error queueing cull: {:?}", e);
|
||||
}
|
||||
} else {
|
||||
// t_nb 10.5 do culling
|
||||
self.transaction_queue.cull(client);
|
||||
// reseal is only used by InstaSeal engine
|
||||
if self.engine.should_reseal_on_update() {
|
||||
// force update_sealing here to skip `reseal_required` checks
|
||||
self.update_sealing(chain, ForceUpdateSealing::Yes);
|
||||
}
|
||||
}
|
||||
}
|
||||
// t_nb 10.6 For service transaction checker update addresses to latest block
|
||||
if let Some(ref service_transaction_checker) = self.service_transaction_checker {
|
||||
match service_transaction_checker.refresh_cache(chain) {
|
||||
Ok(true) => {
|
||||
|
@ -111,6 +111,7 @@ impl Watcher {
|
||||
}
|
||||
|
||||
impl ChainNotify for Watcher {
|
||||
// t_nb 11.1 check number of block and trigger snapshot creation if needed.
|
||||
fn new_blocks(&self, new_blocks: NewBlocks) {
|
||||
if self.oracle.is_major_importing() || new_blocks.has_more_blocks_to_import {
|
||||
return;
|
||||
|
@ -43,7 +43,7 @@ pub trait Backend: Send {
|
||||
/// Treat the backend as a writeable hashdb.
|
||||
fn as_hash_db_mut(&mut self) -> &mut dyn HashDB<KeccakHasher, DBValue>;
|
||||
|
||||
/// Add an account entry to the cache.
|
||||
// t_nb 9.4 Add an account entry to the cache.
|
||||
fn add_to_account_cache(&mut self, addr: Address, data: Option<Account>, modified: bool);
|
||||
|
||||
/// Add a global code cache entry. This doesn't need to worry about canonicality because
|
||||
|
@ -996,7 +996,7 @@ impl<B: Backend> State<B> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Commits our cached account changes into the trie.
|
||||
// t_nb 8.5.2 Commits our cached account changes into the trie.
|
||||
pub fn commit(&mut self) -> Result<(), Error> {
|
||||
assert!(self.checkpoints.borrow().is_empty());
|
||||
// first, commit the sub trees.
|
||||
@ -1036,7 +1036,7 @@ impl<B: Backend> State<B> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Propagate local cache into shared canonical state cache.
|
||||
// t_nb 9.4 Propagate local cache into shared canonical state cache.
|
||||
fn propagate_to_global_cache(&mut self) {
|
||||
let mut addresses = self.cache.borrow_mut();
|
||||
trace!("Committing cache {:?} entries", addresses.len());
|
||||
|
@ -147,6 +147,7 @@ impl StateDB {
|
||||
Ok(records)
|
||||
}
|
||||
|
||||
// t_nb 9.15
|
||||
/// Mark a given candidate from an ancient era as canonical, enacting its removals from the
|
||||
/// backing database and reverting any non-canonical historical commit's insertions.
|
||||
pub fn mark_canonical(
|
||||
@ -158,7 +159,7 @@ impl StateDB {
|
||||
self.db.mark_canonical(batch, end_era, canon_id)
|
||||
}
|
||||
|
||||
/// Propagate local cache into the global cache and synchonize
|
||||
// t_nb 9.10 Propagate local cache into the global cache and synchonize
|
||||
/// the global cache with the best block state.
|
||||
/// This function updates the global cache by removing entries
|
||||
/// that are invalidated by chain reorganization. `sync_cache`
|
||||
|
@ -95,6 +95,7 @@ pub mod blocks {
|
||||
type Unverified = Unverified;
|
||||
type Verified = PreverifiedBlock;
|
||||
|
||||
// t_nb 4.0 verify_block_basic
|
||||
fn create(
|
||||
input: Self::Input,
|
||||
engine: &dyn EthEngine,
|
||||
@ -113,6 +114,7 @@ pub mod blocks {
|
||||
}
|
||||
}
|
||||
|
||||
// t_nb 5.0 verify standalone block
|
||||
fn verify(
|
||||
un: Self::Unverified,
|
||||
engine: &dyn EthEngine,
|
||||
|
@ -364,7 +364,7 @@ impl<K: Kind> VerificationQueue<K> {
|
||||
}
|
||||
}
|
||||
|
||||
// do work.
|
||||
// do work on this item.
|
||||
let item = {
|
||||
// acquire these locks before getting the item to verify.
|
||||
let mut unverified = verification.unverified.lock();
|
||||
@ -387,10 +387,12 @@ impl<K: Kind> VerificationQueue<K> {
|
||||
};
|
||||
|
||||
let hash = item.hash();
|
||||
// t_nb 5.0 verify standalone block (this verification is done in VerificationQueue thread pool)
|
||||
let is_ready = match K::verify(item, &*engine, verification.check_seal) {
|
||||
Ok(verified) => {
|
||||
let mut verifying = verification.verifying.lock();
|
||||
let mut idx = None;
|
||||
// find item again and remove it from verified queue
|
||||
for (i, e) in verifying.iter_mut().enumerate() {
|
||||
if e.hash == hash {
|
||||
idx = Some(i);
|
||||
@ -515,17 +517,20 @@ impl<K: Kind> VerificationQueue<K> {
|
||||
}
|
||||
|
||||
/// Add a block to the queue.
|
||||
// t_nb 3.0 import block to verification queue
|
||||
pub fn import(&self, input: K::Input) -> Result<H256, (Option<K::Input>, Error)> {
|
||||
let hash = input.hash();
|
||||
let raw_hash = input.raw_hash();
|
||||
// t_nb 3.1 check if block is currently processing or marked as bad.
|
||||
{
|
||||
// t_nb 3.1.0 is currently processing
|
||||
if self.processing.read().contains_key(&hash) {
|
||||
bail!((
|
||||
Some(input),
|
||||
ErrorKind::Import(ImportErrorKind::AlreadyQueued).into()
|
||||
));
|
||||
}
|
||||
|
||||
// t_nb 3.1.1 is marked as bad
|
||||
let mut bad = self.verification.bad.lock();
|
||||
if bad.contains(&hash) || bad.contains(&raw_hash) {
|
||||
bail!((
|
||||
@ -533,7 +538,7 @@ impl<K: Kind> VerificationQueue<K> {
|
||||
ErrorKind::Import(ImportErrorKind::KnownBad).into()
|
||||
));
|
||||
}
|
||||
|
||||
// t_nb 3.1.2 its parent is marked as bad
|
||||
if bad.contains(&input.parent_hash()) {
|
||||
bad.insert(hash);
|
||||
bail!((
|
||||
|
@ -63,26 +63,36 @@ impl HeapSizeOf for PreverifiedBlock {
|
||||
}
|
||||
}
|
||||
|
||||
/// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block
|
||||
// t_nb 4.0 Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block
|
||||
pub fn verify_block_basic(
|
||||
block: &Unverified,
|
||||
engine: &dyn EthEngine,
|
||||
check_seal: bool,
|
||||
) -> Result<(), Error> {
|
||||
// t_nb 4.1 verify header params
|
||||
verify_header_params(&block.header, engine, true, check_seal)?;
|
||||
// t_nb 4.2 verify header time (addded in new OE version)
|
||||
// t_nb 4.3 verify block integrity
|
||||
verify_block_integrity(block)?;
|
||||
|
||||
if check_seal {
|
||||
// t_nb 4.4 Check block seal. It calls engine to verify block basic
|
||||
engine.verify_block_basic(&block.header)?;
|
||||
}
|
||||
|
||||
// t_nb 4.5 for all uncled verify header and call engine to verify block basic
|
||||
for uncle in &block.uncles {
|
||||
// t_nb 4.5.1
|
||||
verify_header_params(uncle, engine, false, check_seal)?;
|
||||
if check_seal {
|
||||
// t_nb 4.5.2
|
||||
engine.verify_block_basic(uncle)?;
|
||||
}
|
||||
}
|
||||
|
||||
// t_nb 4.6 call engine.gas_limit_override (Used only by Aura) TODO added in new version
|
||||
|
||||
// t_nb 4.7 for every transaction call engine.verify_transaction_basic
|
||||
for t in &block.transactions {
|
||||
engine.verify_transaction_basic(t, &block.header)?;
|
||||
}
|
||||
@ -90,7 +100,7 @@ pub fn verify_block_basic(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash.
|
||||
// t_nb 5.0 Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash.
|
||||
/// Still operates on a individual block
|
||||
/// Returns a `PreverifiedBlock` structure populated with transactions
|
||||
pub fn verify_block_unordered(
|
||||
@ -100,8 +110,10 @@ pub fn verify_block_unordered(
|
||||
) -> Result<PreverifiedBlock, Error> {
|
||||
let header = block.header;
|
||||
if check_seal {
|
||||
// t_nb 5.1
|
||||
engine.verify_block_unordered(&header)?;
|
||||
for uncle in &block.uncles {
|
||||
// t_nb 5.2
|
||||
engine.verify_block_unordered(uncle)?;
|
||||
}
|
||||
}
|
||||
@ -112,11 +124,14 @@ pub fn verify_block_unordered(
|
||||
None
|
||||
};
|
||||
|
||||
// t_nb 5.3 iterate over all transactions
|
||||
let transactions = block
|
||||
.transactions
|
||||
.into_iter()
|
||||
.map(|t| {
|
||||
// t_nb 5.3.1 call verify_unordered. Check signatures and calculate address
|
||||
let t = engine.verify_transaction_unordered(t, &header)?;
|
||||
// t_nb 5.3.2 check if nonce is more then max nonce (EIP-168 and EIP169)
|
||||
if let Some(max_nonce) = nonce_cap {
|
||||
if t.nonce >= max_nonce {
|
||||
return Err(BlockError::TooManyTransactions(t.sender()).into());
|
||||
@ -146,7 +161,7 @@ pub struct FullFamilyParams<'a, C: BlockInfo + CallContract + 'a> {
|
||||
pub client: &'a C,
|
||||
}
|
||||
|
||||
/// Phase 3 verification. Check block information against parent and uncles.
|
||||
// t_nb 6.3 Phase 3 verification. Check block information against parent and uncles.
|
||||
pub fn verify_block_family<C: BlockInfo + CallContract>(
|
||||
header: &Header,
|
||||
parent: &Header,
|
||||
@ -154,6 +169,7 @@ pub fn verify_block_family<C: BlockInfo + CallContract>(
|
||||
do_full: Option<FullFamilyParams<C>>,
|
||||
) -> Result<(), Error> {
|
||||
// TODO: verify timestamp
|
||||
// t_nb 6.3.1 verify parent
|
||||
verify_parent(&header, &parent, engine)?;
|
||||
engine.verify_block_family(&header, &parent)?;
|
||||
|
||||
@ -162,8 +178,10 @@ pub fn verify_block_family<C: BlockInfo + CallContract>(
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
// t_nb 6.3.2 verify uncles
|
||||
verify_uncles(params.block, params.block_provider, engine)?;
|
||||
|
||||
// t_nb 6.3.3 verify all transactions
|
||||
for tx in ¶ms.block.transactions {
|
||||
// transactions are verified against the parent header since the current
|
||||
// state wasn't available when the tx was created
|
||||
|
@ -529,6 +529,7 @@ impl ChainNotify for EthSync {
|
||||
}
|
||||
}
|
||||
|
||||
// t_nb 11.4
|
||||
fn new_blocks(&self, new_blocks: NewBlocks) {
|
||||
if new_blocks.has_more_blocks_to_import {
|
||||
return;
|
||||
|
@ -155,6 +155,7 @@ impl SyncHandler {
|
||||
trace!(target: "sync", "Ignoring new block from unconfirmed peer {}", peer_id);
|
||||
return Ok(());
|
||||
}
|
||||
// t_nb 1.0 decode RLP
|
||||
let block = Unverified::from_rlp(r.at(0)?.as_raw().to_vec())?;
|
||||
let hash = block.header.hash();
|
||||
let number = block.header.number();
|
||||
@ -166,7 +167,9 @@ impl SyncHandler {
|
||||
let difficulty: U256 = r.val_at(1)?;
|
||||
// Most probably the sent block is being imported by peer right now
|
||||
// Use td and hash, that peer must have for now
|
||||
// t_nb 1.1 check new block diffuculty it can be found as second item in RLP and update peer diffuculty
|
||||
let parent_td = difficulty.checked_sub(*block.header.difficulty());
|
||||
|
||||
if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) {
|
||||
if peer
|
||||
.difficulty
|
||||
@ -181,6 +184,7 @@ impl SyncHandler {
|
||||
peer.latest_hash = *parent_hash;
|
||||
}
|
||||
|
||||
// t_nb 1.2 if block number is to older then 20 dont process it
|
||||
let last_imported_number = sync.new_blocks.last_imported_block_number();
|
||||
if last_imported_number > number && last_imported_number - number > MAX_NEW_BLOCK_AGE {
|
||||
trace!(target: "sync", "Ignored ancient new block {:?}", hash);
|
||||
|
@ -1421,7 +1421,7 @@ impl ChainSync {
|
||||
self.check_resume(io);
|
||||
}
|
||||
|
||||
/// called when block is imported to chain - propagates the blocks and updates transactions sent to peers
|
||||
// t_nb 11.4 called when block is imported to chain - propagates the blocks and updates transactions sent to peers
|
||||
pub fn chain_new_blocks(
|
||||
&mut self,
|
||||
io: &mut dyn SyncIo,
|
||||
@ -1437,7 +1437,9 @@ impl ChainSync {
|
||||
|
||||
if !is_syncing || !sealed.is_empty() || !proposed.is_empty() {
|
||||
trace!(target: "sync", "Propagating blocks, state={:?}", self.state);
|
||||
// t_nb 11.4.1 propagate latest blocks
|
||||
SyncPropagator::propagate_latest_blocks(self, io, sealed);
|
||||
// t_nb 11.4.4 propagate proposed blocks
|
||||
SyncPropagator::propagate_proposed_blocks(self, io, proposed);
|
||||
}
|
||||
if !invalid.is_empty() {
|
||||
@ -1446,7 +1448,7 @@ impl ChainSync {
|
||||
}
|
||||
|
||||
if !is_syncing && !enacted.is_empty() && !self.peers.is_empty() {
|
||||
// Select random peer to re-broadcast transactions to.
|
||||
// t_nb 11.4.5 Select random peer to re-broadcast transactions to.
|
||||
let peer = random::new().gen_range(0, self.peers.len());
|
||||
trace!(target: "sync", "Re-broadcasting transactions to a random peer.");
|
||||
self.peers.values_mut().nth(peer).map(|peer_info| {
|
||||
|
@ -39,7 +39,7 @@ use super::{
|
||||
pub struct SyncPropagator;
|
||||
|
||||
impl SyncPropagator {
|
||||
/// propagates latest block to a set of peers
|
||||
// t_nb 11.4.3 propagates latest block to a set of peers
|
||||
pub fn propagate_blocks(
|
||||
sync: &mut ChainSync,
|
||||
chain_info: &BlockChainInfo,
|
||||
@ -72,7 +72,7 @@ impl SyncPropagator {
|
||||
sent
|
||||
}
|
||||
|
||||
/// propagates new known hashes to all peers
|
||||
// t_nb 11.4.2 propagates new known hashes to all peers
|
||||
pub fn propagate_new_hashes(
|
||||
sync: &mut ChainSync,
|
||||
chain_info: &BlockChainInfo,
|
||||
@ -279,6 +279,7 @@ impl SyncPropagator {
|
||||
sent_to_peers
|
||||
}
|
||||
|
||||
// t_nb 11.4.1 propagate latest blocks to peers
|
||||
pub fn propagate_latest_blocks(sync: &mut ChainSync, io: &mut dyn SyncIo, sealed: &[H256]) {
|
||||
let chain_info = io.chain().chain_info();
|
||||
if (((chain_info.best_block_number as i64) - (sync.last_sent_block_number as i64)).abs()
|
||||
@ -287,15 +288,19 @@ impl SyncPropagator {
|
||||
{
|
||||
let peers = sync.get_lagging_peers(&chain_info);
|
||||
if sealed.is_empty() {
|
||||
// t_nb 11.4.2
|
||||
let hashes = SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers);
|
||||
let peers = ChainSync::select_random_peers(&peers);
|
||||
// t_nb 11.4.3
|
||||
let blocks =
|
||||
SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers);
|
||||
if blocks != 0 || hashes != 0 {
|
||||
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
|
||||
}
|
||||
} else {
|
||||
// t_nb 11.4.3
|
||||
SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers);
|
||||
// t_nb 11.4.2
|
||||
SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers);
|
||||
trace!(target: "sync", "Sent sealed block to all peers");
|
||||
};
|
||||
@ -303,7 +308,7 @@ impl SyncPropagator {
|
||||
sync.last_sent_block_number = chain_info.best_block_number;
|
||||
}
|
||||
|
||||
/// Distribute valid proposed blocks to subset of current peers.
|
||||
// t_nb 11.4.4 Distribute valid proposed blocks to subset of current peers. (if there is any proposed)
|
||||
pub fn propagate_proposed_blocks(
|
||||
sync: &mut ChainSync,
|
||||
io: &mut dyn SyncIo,
|
||||
|
@ -468,7 +468,7 @@ impl From<SignedTransaction> for UnverifiedTransaction {
|
||||
}
|
||||
|
||||
impl SignedTransaction {
|
||||
/// Try to verify transaction and recover sender.
|
||||
// t_nb 5.3.1 Try to verify transaction and recover sender.
|
||||
pub fn new(transaction: UnverifiedTransaction) -> Result<Self, ethkey::Error> {
|
||||
if transaction.is_unsigned() {
|
||||
return Err(ethkey::Error::InvalidSignature);
|
||||
|
@ -251,6 +251,7 @@ impl TransactionQueue {
|
||||
self.pool.write().listener_mut().0.set_in_chain_checker(f)
|
||||
}
|
||||
|
||||
// t_nb 10.2
|
||||
/// Import a set of transactions to the pool.
|
||||
///
|
||||
/// Given blockchain and state access (Client)
|
||||
@ -471,7 +472,7 @@ impl TransactionQueue {
|
||||
(pending_readiness, state_readiness)
|
||||
}
|
||||
|
||||
/// Culls all stalled transactions from the pool.
|
||||
// t_nb 10.5.1 Culls all stalled transactions from the pool.
|
||||
pub fn cull<C: client::NonceClient + Clone>(&self, client: C) {
|
||||
trace_time!("pool::cull");
|
||||
// We don't care about future transactions, so nonce_cap is not important.
|
||||
|
@ -336,6 +336,7 @@ impl<T: InformantData> Informant<T> {
|
||||
}
|
||||
|
||||
impl ChainNotify for Informant<FullNodeInformantData> {
|
||||
// t_nb 11.2 Informant. Prints new block inclusiong to console/log.
|
||||
fn new_blocks(&self, new_blocks: NewBlocks) {
|
||||
if new_blocks.has_more_blocks_to_import {
|
||||
return;
|
||||
|
@ -175,6 +175,7 @@ impl<C> ChainNotificationHandler<C> {
|
||||
}
|
||||
|
||||
impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> {
|
||||
// t_nb 11.3 RPC. Notify subscriber header/logs about new block
|
||||
fn new_blocks(&self, new_blocks: NewBlocks) {
|
||||
if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() {
|
||||
return;
|
||||
|
@ -83,6 +83,7 @@ impl AclStorage for OnChainAclStorage {
|
||||
}
|
||||
|
||||
impl ChainNotify for OnChainAclStorage {
|
||||
// t_nb 11.5 SecretStore OnChainAclStorage.
|
||||
fn new_blocks(&self, new_blocks: NewBlocks) {
|
||||
if new_blocks.has_more_blocks_to_import {
|
||||
return;
|
||||
|
@ -168,6 +168,7 @@ impl KeyServerSet for OnChainKeyServerSet {
|
||||
}
|
||||
|
||||
impl ChainNotify for OnChainKeyServerSet {
|
||||
// t_nb 11.6 SecretStore OnChainKeyServerSet.
|
||||
fn new_blocks(&self, new_blocks: NewBlocks) {
|
||||
if new_blocks.has_more_blocks_to_import {
|
||||
return;
|
||||
|
@ -624,6 +624,7 @@ impl Drop for ServiceContractListener {
|
||||
}
|
||||
|
||||
impl ChainNotify for ServiceContractListener {
|
||||
// t_nb 11.7 SecretStore ServiceContractListener
|
||||
fn new_blocks(&self, new_blocks: NewBlocks) {
|
||||
if new_blocks.has_more_blocks_to_import {
|
||||
return;
|
||||
|
@ -332,6 +332,7 @@ impl JournalDB for OverlayRecentDB {
|
||||
self.journal_overlay.read().earliest_era
|
||||
}
|
||||
|
||||
// t_nb 9.6
|
||||
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
|
||||
trace!(target: "journaldb", "entry: #{} ({})", now, id);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user