Less cloning when importing blocks (#11531)

* WIP

* cleanup

* check_and_lock_block() only needs reference

* Cleanup&docs

* Push uncles by ref to clone at the last possible time.

* Missing import

* Review grumbles

* Update util/journaldb/src/overlayrecentdb.rs

Co-Authored-By: Niklas Adolfsson <niklasadolfsson1@gmail.com>

* Update ethcore/types/src/client_types.rs

Co-Authored-By: Andronik Ordian <write@reusable.software>

* deref U256 before adding

* More review grumbles

* review grumbles: pass by value

* cleanup

* Move the block

* Don't clone the header

* Update ethcore/src/client/client.rs

Co-Authored-By: Niklas Adolfsson <niklasadolfsson1@gmail.com>

* Update ethcore/src/client/client.rs

Co-Authored-By: Niklas Adolfsson <niklasadolfsson1@gmail.com>

* Add comment

Co-authored-by: Niklas Adolfsson <niklasadolfsson1@gmail.com>
Co-authored-by: Andronik Ordian <write@reusable.software>
This commit is contained in:
David 2020-03-04 18:34:29 +01:00 committed by GitHub
parent 729b10e1e0
commit 6c0134f2ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 97 additions and 120 deletions

View File

@ -1069,8 +1069,8 @@ impl BlockChain {
}
/// Write a pending epoch transition by block hash.
pub fn insert_pending_transition(&self, batch: &mut DBTransaction, hash: H256, t: PendingEpochTransition) {
batch.write(db::COL_EXTRA, &hash, &t);
pub fn insert_pending_transition(&self, batch: &mut DBTransaction, hash: &H256, t: PendingEpochTransition) {
batch.write(db::COL_EXTRA, hash, &t);
}
/// Get a pending epoch transition by block hash.

View File

@ -2553,14 +2553,15 @@ mod tests {
// step 3
let mut b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr2, (3141562.into(), 31415620.into()), vec![], false).unwrap();
b2.push_transaction(Transaction {
let signed_tx = Transaction {
action: Action::Create,
nonce: U256::from(0),
gas_price: U256::from(3000),
gas: U256::from(53_000),
value: U256::from(1),
data: vec![],
}.fake_sign(addr2)).unwrap();
}.fake_sign(addr2);
b2.push_transaction(signed_tx).unwrap();
let b2 = b2.close_and_lock().unwrap();
// we will now seal a block with 1tx and include the accumulated empty step message

View File

@ -85,7 +85,6 @@ use rand::Rng;
use unexpected::{Mismatch, OutOfBounds};
use time_utils::CheckedSystemTime;
use common_types::{
BlockNumber,
ids::BlockId,
header::Header,
engines::{
@ -379,8 +378,6 @@ impl Engine for Clique {
engine_info
}
fn maximum_uncle_count(&self, _block: BlockNumber) -> usize { 0 }
fn on_new_block(
&self,
_block: &mut ExecutedBlock,

View File

@ -19,7 +19,10 @@
use std::sync::Arc;
use std::collections::HashMap;
use common_types::errors::{EthcoreError as Error, EngineError};
use common_types::{
BlockNumber,
errors::{EthcoreError as Error, EngineError}
};
use ethcore::{
block::*,
test_helpers::get_temp_state_db,

View File

@ -49,7 +49,6 @@ use vm::LastHashes;
use hash::keccak;
use rlp::{RlpStream, Encodable, encode_list};
use types::{
block::PreverifiedBlock,
errors::{EthcoreError as Error, BlockError},
transaction::{SignedTransaction, Error as TransactionError},
header::Header,
@ -177,7 +176,7 @@ impl<'x> OpenBlock<'x> {
let outcome = self.block.state.apply(&env_info, self.engine.machine(), &t, self.block.traces.is_enabled())?;
self.block.transactions_set.insert(t.hash());
self.block.transactions.push(t.into());
self.block.transactions.push(t);
if let Tracing::Enabled(ref mut traces) = self.block.traces {
traces.push(outcome.trace.into());
}
@ -405,9 +404,11 @@ impl Drain for SealedBlock {
}
}
/// Enact the block given by block header, transactions and uncles
/// Enact the block. Takes the block header, transactions and uncles from a
/// `PreVerified` block and Produces a new `LockedBlock` after applying all
/// transactions and committing the state to disk.
pub(crate) fn enact(
header: Header,
header: &Header,
transactions: Vec<SignedTransaction>,
uncles: Vec<Header>,
engine: &dyn Engine,
@ -434,7 +435,7 @@ pub(crate) fn enact(
last_hashes,
// Engine such as Clique will calculate author from extra_data.
// this is only important for executing contracts as the 'executive_author'.
engine.executive_author(&header)?,
engine.executive_author(header)?,
(3141562.into(), 31415620.into()),
vec![],
is_epoch_begin,
@ -448,7 +449,7 @@ pub(crate) fn enact(
b.block.header.number(), root, env.author, author_balance);
}
b.populate_from(&header);
b.populate_from(header);
b.push_transactions(transactions)?;
for u in uncles {
@ -458,32 +459,6 @@ pub(crate) fn enact(
b.close_and_lock()
}
/// Enact the block given by `block_bytes` using `engine` on the database `db` with the given `parent` block header
pub fn enact_verified(
block: PreverifiedBlock,
engine: &dyn Engine,
tracing: bool,
db: StateDB,
parent: &Header,
last_hashes: Arc<LastHashes>,
factories: Factories,
is_epoch_begin: bool,
) -> Result<LockedBlock, Error> {
enact(
block.header,
block.transactions,
block.uncles,
engine,
tracing,
db,
parent,
last_hashes,
factories,
is_epoch_begin,
)
}
#[cfg(test)]
mod tests {
use test_helpers::get_temp_state_db;

View File

@ -38,7 +38,7 @@ use trie::{Trie, TrieFactory, TrieSpec};
use account_state::State;
use account_state::state::StateInfo;
use block::{ClosedBlock, Drain, enact_verified, LockedBlock, OpenBlock, SealedBlock};
use block::{ClosedBlock, Drain, enact, LockedBlock, OpenBlock, SealedBlock};
use blockchain::{
BlockChain,
BlockChainDB,
@ -283,10 +283,9 @@ impl Importer {
}
let max_blocks_to_import = client.config.max_round_blocks_to_import;
let (imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, duration, has_more_blocks_to_import) = {
let (imported_blocks, import_results, invalid_blocks, imported, duration, has_more_blocks_to_import) = {
let mut imported_blocks = Vec::with_capacity(max_blocks_to_import);
let mut invalid_blocks = HashSet::new();
let proposed_blocks = Vec::with_capacity(max_blocks_to_import);
let mut import_results = Vec::with_capacity(max_blocks_to_import);
let _import_lock = self.import_lock.lock();
@ -297,27 +296,32 @@ impl Importer {
trace_time!("import_verified_blocks");
let start = Instant::now();
for block in blocks {
let header = block.header.clone();
let bytes = block.bytes.clone();
let hash = header.hash();
for mut block in blocks {
let hash = block.header.hash();
let is_invalid = invalid_blocks.contains(header.parent_hash());
let is_invalid = invalid_blocks.contains(block.header.parent_hash());
if is_invalid {
invalid_blocks.insert(hash);
continue;
}
// --------------------------------------------------------
// NOTE: this will remove the RLP bytes from the
// `PreverifiedBlock` so be careful not to use the bytes
// anywhere after this, it will be an empty `Vec`.
// --------------------------------------------------------
let block_bytes = std::mem::take(&mut block.bytes);
match self.check_and_lock_block(block, client) {
Ok((closed_block, pending)) => {
Ok((locked_block, pending)) => {
imported_blocks.push(hash);
let transactions_len = closed_block.transactions.len();
let route = self.commit_block(closed_block, &header, encoded::Block::new(bytes), pending, client);
let transactions_len = locked_block.transactions.len();
let gas_used = *locked_block.header.gas_used();
let route = self.commit_block(locked_block, encoded::Block::new(block_bytes), pending, client);
import_results.push(route);
client.report.write().accrue_block(&header, transactions_len);
},
client.report.write().accrue_block(gas_used, transactions_len);
}
Err(err) => {
self.bad_blocks.report(bytes, format!("{:?}", err));
self.bad_blocks.report(block_bytes, err.to_string());
invalid_blocks.insert(hash);
},
}
@ -330,7 +334,7 @@ impl Importer {
self.block_queue.mark_as_bad(&invalid_blocks);
}
let has_more_blocks_to_import = !self.block_queue.mark_as_good(&imported_blocks);
(imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, start.elapsed(), has_more_blocks_to_import)
(imported_blocks, import_results, invalid_blocks, imported, start.elapsed(), has_more_blocks_to_import)
};
{
@ -348,7 +352,7 @@ impl Importer {
invalid_blocks.clone(),
route.clone(),
Vec::new(),
proposed_blocks.clone(),
Vec::new(),
duration,
has_more_blocks_to_import,
)
@ -364,8 +368,7 @@ impl Importer {
fn check_and_lock_block(&self, block: PreverifiedBlock, client: &Client) -> EthcoreResult<(LockedBlock, Option<PendingTransition>)> {
let engine = &*self.engine;
let header = block.header.clone();
let header = &block.header;
// Check the block isn't so old we won't be able to enact it.
let best_block_number = client.chain.read().best_block_number();
if client.pruning_info().earliest_state > header.number() {
@ -385,7 +388,7 @@ impl Importer {
let chain = client.chain.read();
// Verify Block Family
let verify_family_result = verification::verify_block_family(
&header,
header,
&parent,
engine,
verification::FullFamilyParams {
@ -412,8 +415,10 @@ impl Importer {
let is_epoch_begin = chain.epoch_transition(parent.number(), *header.parent_hash()).is_some();
let enact_result = enact_verified(
block,
let enact_result = enact(
header,
block.transactions,
block.uncles,
engine,
client.tracedb.read().tracing_enabled(),
db,
@ -489,13 +494,14 @@ impl Importer {
fn commit_block<B>(
&self,
block: B,
header: &Header,
block_data: encoded::Block,
pending: Option<PendingTransition>,
client: &Client
) -> ImportRoute
where B: Drain
{
let block = block.drain();
let header = block.header;
let hash = &header.hash();
let number = header.number();
let parent = header.parent_hash();
@ -503,8 +509,7 @@ impl Importer {
let mut is_finalized = false;
// Commit results
let block = block.drain();
debug_assert_eq!(header.hash(), block_data.header_view().hash());
debug_assert_eq!(*hash, block_data.header_view().hash());
let mut batch = DBTransaction::new();
@ -542,7 +547,7 @@ impl Importer {
// check epoch end signal, potentially generating a proof on the current
// state.
if let Some(pending) = pending {
chain.insert_pending_transition(&mut batch, header.hash(), pending);
chain.insert_pending_transition(&mut batch, hash, pending);
}
state.journal_under(&mut batch, number, hash).expect("DB commit failed");
@ -550,7 +555,7 @@ impl Importer {
let finalized: Vec<_> = ancestry_actions.into_iter().map(|ancestry_action| {
let AncestryAction::MarkFinalized(a) = ancestry_action;
if a != header.hash() {
if a != *hash {
chain.mark_finalized(&mut batch, a).expect("Engine's ancestry action must be known blocks; qed");
} else {
// we're finalizing the current block
@ -1058,14 +1063,14 @@ impl Client {
};
self.block_header(id).and_then(|header| {
let db = self.state_db.read().boxed_clone();
let state_db = self.state_db.read();
// early exit for pruned blocks
if db.is_prunable() && self.pruning_info().earliest_state > block_number {
if state_db.is_prunable() && self.pruning_info().earliest_state > block_number {
trace!(target: "client", "State for block #{} is pruned. Earliest state: {:?}", block_number, self.pruning_info().earliest_state);
return None;
}
let db = state_db.boxed_clone();
let root = header.state_root();
State::from_existing(db, root, self.engine.account_start_nonce(block_number), self.factories.clone()).ok()
})
@ -2345,7 +2350,7 @@ impl PrepareOpenBlock for Client {
chain
.find_uncle_headers(&h, MAX_UNCLE_AGE)
.unwrap_or_else(Vec::new)
.into_iter()
.iter()
.take(engine.maximum_uncle_count(open_block.header.number()))
.for_each(|h| {
open_block.push_uncle(h.decode().expect("decoding failure")).expect("pushing maximum_uncle_count;
@ -2401,7 +2406,6 @@ impl ImportSealedBlock for Client {
)?;
let route = self.importer.commit_block(
block,
&header,
encoded::Block::new(block_bytes),
pending,
self

View File

@ -183,14 +183,15 @@ pub fn generate_dummy_client_with_spec_and_data<F>(
// first block we don't have any balance, so can't send any transactions.
for _ in 0..txs_per_block {
b.push_transaction(Transaction {
let signed_tx = Transaction {
nonce: n.into(),
gas_price: tx_gas_prices[n % tx_gas_prices.len()],
gas: 100000.into(),
action: Action::Create,
data: vec![],
value: U256::zero(),
}.sign(kp.secret(), Some(test_spec.chain_id()))).unwrap();
}.sign(kp.secret(), Some(test_spec.chain_id()));
b.push_transaction(signed_tx).unwrap();
n += 1;
}

View File

@ -150,18 +150,16 @@ fn can_trace_block_and_uncle_reward() {
rolling_timestamp += 10;
block.set_timestamp(rolling_timestamp);
let mut n = 0;
for _ in 0..1 {
block.push_transaction(Transaction {
nonce: n.into(),
gas_price: 10000.into(),
gas: 100000.into(),
action: Action::Create,
data: vec![],
value: U256::zero(),
}.sign(kp.secret(), Some(spec.network_id()))).unwrap();
n += 1;
}
let signed_tx = Transaction {
nonce: 0.into(),
gas_price: 10000.into(),
gas: 100000.into(),
action: Action::Create,
data: vec![],
value: U256::zero(),
}.sign(kp.secret(), Some(spec.network_id()));
block.push_transaction(signed_tx).unwrap();
let mut uncle = Header::new();
let uncle_author = Address::from_str("ef2d6d194084c2de36e0dabfce45d046b37d1106").unwrap();

View File

@ -222,7 +222,7 @@ impl StateDB {
self.db.mark_canonical(batch, end_era, canon_id)
}
/// Propagate local cache into the global cache and synchonize
/// Propagate local cache into the global cache and synchronize
/// the global cache with the best block state.
/// This function updates the global cache by removing entries
/// that are invalidated by chain reorganization. `sync_cache`

View File

@ -131,7 +131,7 @@ pub struct NewBlocks {
pub route: ChainRoute,
/// Sealed
pub sealed: Vec<H256>,
/// Block bytes.
/// Block bytes. Only used in EthSync.
pub proposed: Vec<Bytes>,
/// Duration
pub duration: Duration,

View File

@ -24,7 +24,6 @@ use std::{
};
use ethereum_types::U256;
use crate::header::Header;
/// Operating mode for the client.
#[derive(Debug, Eq, PartialEq, Clone)]
@ -67,10 +66,10 @@ pub struct ClientReport {
impl ClientReport {
/// Alter internal reporting to reflect the additional `block` has been processed.
pub fn accrue_block(&mut self, header: &Header, transactions: usize) {
pub fn accrue_block(&mut self, gas_used: U256, transactions: usize) {
self.blocks_imported += 1;
self.transactions_applied += transactions;
self.gas_processed = self.gas_processed + *header.gas_used();
self.gas_processed += gas_used;
}
}
@ -98,4 +97,3 @@ pub enum StateResult<T> {
/// State is some
Some(T),
}

View File

@ -186,7 +186,7 @@ impl OverlayRecentDB {
index: 0usize,
};
while let Some(rlp_data) = db.get(col, &encode(&db_key)).expect("Low-level database error.") {
trace!("read_overlay: era={}, index={}", era, db_key.index);
trace!(target: "journaldb", "read_overlay: era={}, index={}", era, db_key.index);
let value = decode::<DatabaseValue>(&rlp_data).unwrap_or_else(|e| {
panic!("read_overlay: Error decoding DatabaseValue era={}, index={}, error={}",
era, db_key.index, e
@ -264,32 +264,9 @@ impl JournalDB for OverlayRecentDB {
self.backing.get(self.column, &LATEST_ERA_KEY).expect("Low level database error").is_none()
}
fn backing(&self) -> &Arc<dyn KeyValueDB> {
&self.backing
}
fn latest_era(&self) -> Option<u64> { self.journal_overlay.read().latest_era }
fn earliest_era(&self) -> Option<u64> { self.journal_overlay.read().earliest_era }
fn state(&self, key: &H256) -> Option<Bytes> {
let key = to_short_key(key);
// Hold the read lock for shortest possible amount of time.
let maybe_state_data = {
let journal_overlay = self.journal_overlay.read();
journal_overlay
.backing_overlay
.get(&key, EMPTY_PREFIX)
.or_else(|| journal_overlay.pending_overlay.get(&key).cloned())
};
maybe_state_data.or_else(|| {
let pkey = &key[..DB_PREFIX_LEN];
self.backing
.get_by_prefix(self.column, &pkey)
.map(|b| b.into_vec())
})
}
fn latest_era(&self) -> Option<u64> { self.journal_overlay.read().latest_era }
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
trace!(target: "journaldb", "entry: #{} ({})", now, id);
@ -416,10 +393,6 @@ impl JournalDB for OverlayRecentDB {
Ok(ops as u32)
}
fn flush(&self) {
self.journal_overlay.write().pending_overlay.clear();
}
fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> {
let mut ops = 0;
for (key, (value, rc)) in self.transaction_overlay.drain() {
@ -443,6 +416,33 @@ impl JournalDB for OverlayRecentDB {
Ok(ops)
}
fn state(&self, key: &H256) -> Option<Bytes> {
let key = to_short_key(key);
// Hold the read lock for shortest possible amount of time.
let maybe_state_data = {
let journal_overlay = self.journal_overlay.read();
journal_overlay
.backing_overlay
.get(&key, EMPTY_PREFIX)
.or_else(|| journal_overlay.pending_overlay.get(&key).cloned())
};
maybe_state_data.or_else(|| {
let pkey = &key[..DB_PREFIX_LEN];
self.backing
.get_by_prefix(self.column, &pkey)
.map(|b| b.into_vec())
})
}
fn backing(&self) -> &Arc<dyn KeyValueDB> {
&self.backing
}
fn flush(&self) {
self.journal_overlay.write().pending_overlay.clear();
}
fn consolidate(&mut self, with: super::MemoryDB) {
self.transaction_overlay.consolidate(with);
}