Misc fixes (#11510)
* Misc fixes Did some code reading and made random changes as I went. * Update ethcore/src/client/client.rs Co-Authored-By: Andronik Ordian <write@reusable.software> Co-authored-by: Andronik Ordian <write@reusable.software>
This commit is contained in:
parent
2018f5b0ab
commit
bbcd094906
@ -434,7 +434,7 @@ impl<B: Backend> State<B> {
|
|||||||
/// Get the nonce of account `a`.
|
/// Get the nonce of account `a`.
|
||||||
pub fn nonce(&self, a: &Address) -> TrieResult<U256> {
|
pub fn nonce(&self, a: &Address) -> TrieResult<U256> {
|
||||||
self.ensure_cached(a, RequireCache::None, true,
|
self.ensure_cached(a, RequireCache::None, true,
|
||||||
|a| a.as_ref().map_or(self.account_start_nonce, |account| *account.nonce()))
|
|a| a.map_or(self.account_start_nonce, |account| *account.nonce()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Whether the base storage root of an account remains unchanged.
|
/// Whether the base storage root of an account remains unchanged.
|
||||||
@ -1014,13 +1014,13 @@ impl<B: Backend> State<B> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too.
|
/// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too.
|
||||||
pub fn require<'a>(&'a self, a: &Address, require_code: bool) -> TrieResult<RefMut<'a, Account>> {
|
pub fn require(&self, a: &Address, require_code: bool) -> TrieResult<RefMut<Account>> {
|
||||||
self.require_or_from(a, require_code, || Account::new_basic(0u8.into(), self.account_start_nonce), |_| {})
|
self.require_or_from(a, require_code, || Account::new_basic(0u8.into(), self.account_start_nonce), |_| {})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too.
|
/// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too.
|
||||||
/// If it doesn't exist, make account equal the evaluation of `default`.
|
/// If it doesn't exist, make account equal the evaluation of `default`.
|
||||||
pub fn require_or_from<'a, F, G>(&'a self, a: &Address, require_code: bool, default: F, not_default: G) -> TrieResult<RefMut<'a, Account>>
|
pub fn require_or_from<F, G>(&self, a: &Address, require_code: bool, default: F, not_default: G) -> TrieResult<RefMut<Account>>
|
||||||
where F: FnOnce() -> Account, G: FnOnce(&mut Account),
|
where F: FnOnce() -> Account, G: FnOnce(&mut Account),
|
||||||
{
|
{
|
||||||
let contains_key = self.cache.borrow().contains_key(a);
|
let contains_key = self.cache.borrow().contains_key(a);
|
||||||
@ -1137,8 +1137,8 @@ impl<B: Backend> State<B> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//// TODO: cloning for `State` shouldn't be possible in general; Remove this and use
|
// TODO: cloning for `State` shouldn't be possible in general; Remove this and use
|
||||||
//// checkpoints where possible.
|
// checkpoints where possible.
|
||||||
impl<B: Backend + Clone> Clone for State<B> {
|
impl<B: Backend + Clone> Clone for State<B> {
|
||||||
fn clone(&self) -> State<B> {
|
fn clone(&self) -> State<B> {
|
||||||
let cache = {
|
let cache = {
|
||||||
|
@ -2558,7 +2558,7 @@ mod tests {
|
|||||||
gas: U256::from(53_000),
|
gas: U256::from(53_000),
|
||||||
value: U256::from(1),
|
value: U256::from(1),
|
||||||
data: vec![],
|
data: vec![],
|
||||||
}.fake_sign(addr2), None).unwrap();
|
}.fake_sign(addr2)).unwrap();
|
||||||
let b2 = b2.close_and_lock().unwrap();
|
let b2 = b2.close_and_lock().unwrap();
|
||||||
|
|
||||||
// we will now seal a block with 1tx and include the accumulated empty step message
|
// we will now seal a block with 1tx and include the accumulated empty step message
|
||||||
|
@ -100,7 +100,7 @@ impl From<ethjson::spec::Account> for PodAccount {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Determine difference between two optionally existant `Account`s. Returns None
|
/// Determine difference between two optionally existent `Account`s. Returns None
|
||||||
/// if they are the same.
|
/// if they are the same.
|
||||||
pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option<AccountDiff> {
|
pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option<AccountDiff> {
|
||||||
match (pre, post) {
|
match (pre, post) {
|
||||||
|
@ -35,7 +35,7 @@ use std::{cmp, ops};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use ethereum_types::{H256, U256, Address, Bloom};
|
use ethereum_types::{U256, Address, Bloom};
|
||||||
|
|
||||||
use engine::Engine;
|
use engine::Engine;
|
||||||
use trie_vm_factories::Factories;
|
use trie_vm_factories::Factories;
|
||||||
@ -168,7 +168,7 @@ impl<'x> OpenBlock<'x> {
|
|||||||
/// Push a transaction into the block.
|
/// Push a transaction into the block.
|
||||||
///
|
///
|
||||||
/// If valid, it will be executed, and archived together with the receipt.
|
/// If valid, it will be executed, and archived together with the receipt.
|
||||||
pub fn push_transaction(&mut self, t: SignedTransaction, h: Option<H256>) -> Result<&Receipt, Error> {
|
pub fn push_transaction(&mut self, t: SignedTransaction) -> Result<&Receipt, Error> {
|
||||||
if self.block.transactions_set.contains(&t.hash()) {
|
if self.block.transactions_set.contains(&t.hash()) {
|
||||||
return Err(TransactionError::AlreadyImported.into());
|
return Err(TransactionError::AlreadyImported.into());
|
||||||
}
|
}
|
||||||
@ -176,7 +176,7 @@ impl<'x> OpenBlock<'x> {
|
|||||||
let env_info = self.block.env_info();
|
let env_info = self.block.env_info();
|
||||||
let outcome = self.block.state.apply(&env_info, self.engine.machine(), &t, self.block.traces.is_enabled())?;
|
let outcome = self.block.state.apply(&env_info, self.engine.machine(), &t, self.block.traces.is_enabled())?;
|
||||||
|
|
||||||
self.block.transactions_set.insert(h.unwrap_or_else(||t.hash()));
|
self.block.transactions_set.insert(t.hash());
|
||||||
self.block.transactions.push(t.into());
|
self.block.transactions.push(t.into());
|
||||||
if let Tracing::Enabled(ref mut traces) = self.block.traces {
|
if let Tracing::Enabled(ref mut traces) = self.block.traces {
|
||||||
traces.push(outcome.trace.into());
|
traces.push(outcome.trace.into());
|
||||||
@ -189,7 +189,7 @@ impl<'x> OpenBlock<'x> {
|
|||||||
#[cfg(not(feature = "slow-blocks"))]
|
#[cfg(not(feature = "slow-blocks"))]
|
||||||
fn push_transactions(&mut self, transactions: Vec<SignedTransaction>) -> Result<(), Error> {
|
fn push_transactions(&mut self, transactions: Vec<SignedTransaction>) -> Result<(), Error> {
|
||||||
for t in transactions {
|
for t in transactions {
|
||||||
self.push_transaction(t, None)?;
|
self.push_transaction(t)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -203,7 +203,7 @@ impl<'x> OpenBlock<'x> {
|
|||||||
for t in transactions {
|
for t in transactions {
|
||||||
let hash = t.hash();
|
let hash = t.hash();
|
||||||
let start = time::Instant::now();
|
let start = time::Instant::now();
|
||||||
self.push_transaction(t, None)?;
|
self.push_transaction(t)?;
|
||||||
let took = start.elapsed();
|
let took = start.elapsed();
|
||||||
let took_ms = took.as_secs() * 1000 + took.subsec_nanos() as u64 / 1000000;
|
let took_ms = took.as_secs() * 1000 + took.subsec_nanos() as u64 / 1000000;
|
||||||
if took > time::Duration::from_millis(slow_tx) {
|
if took > time::Duration::from_millis(slow_tx) {
|
||||||
|
@ -742,7 +742,7 @@ impl Client {
|
|||||||
let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone()));
|
let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone()));
|
||||||
let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()));
|
let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()));
|
||||||
|
|
||||||
trace!("Cleanup journal: DB Earliest = {:?}, Latest = {:?}", state_db.journal_db().earliest_era(), state_db.journal_db().latest_era());
|
debug!(target: "client", "Cleanup journal: DB Earliest = {:?}, Latest = {:?}", state_db.journal_db().earliest_era(), state_db.journal_db().latest_era());
|
||||||
|
|
||||||
let history = if config.history < MIN_HISTORY_SIZE {
|
let history = if config.history < MIN_HISTORY_SIZE {
|
||||||
info!(target: "client", "Ignoring pruning history parameter of {}\
|
info!(target: "client", "Ignoring pruning history parameter of {}\
|
||||||
@ -942,7 +942,6 @@ impl Client {
|
|||||||
let (result, items) = self.prove_transaction(tx, id)
|
let (result, items) = self.prove_transaction(tx, id)
|
||||||
.ok_or_else(|| "Unable to make call. State unavailable?".to_string())?;
|
.ok_or_else(|| "Unable to make call. State unavailable?".to_string())?;
|
||||||
|
|
||||||
let items = items.into_iter().map(|x| x.to_vec()).collect();
|
|
||||||
Ok((result, items))
|
Ok((result, items))
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1070,6 +1069,7 @@ impl Client {
|
|||||||
|
|
||||||
// early exit for pruned blocks
|
// early exit for pruned blocks
|
||||||
if db.is_prunable() && self.pruning_info().earliest_state > block_number {
|
if db.is_prunable() && self.pruning_info().earliest_state > block_number {
|
||||||
|
trace!(target: "client", "State for block #{} is pruned. Earliest state: {:?}", block_number, self.pruning_info().earliest_state);
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ pub struct ClientConfig {
|
|||||||
pub spec_name: String,
|
pub spec_name: String,
|
||||||
/// Type of block verifier used by client.
|
/// Type of block verifier used by client.
|
||||||
pub verifier_type: VerifierType,
|
pub verifier_type: VerifierType,
|
||||||
/// State db cache-size.
|
/// State db cache-size. Default: 25Mb.
|
||||||
pub state_cache_size: usize,
|
pub state_cache_size: usize,
|
||||||
/// EVM jump-tables cache size.
|
/// EVM jump-tables cache size.
|
||||||
pub jump_table_size: usize,
|
pub jump_table_size: usize,
|
||||||
|
@ -535,7 +535,7 @@ impl Miner {
|
|||||||
let result = client.verify_for_pending_block(&transaction, &open_block.header)
|
let result = client.verify_for_pending_block(&transaction, &open_block.header)
|
||||||
.map_err(|e| e.into())
|
.map_err(|e| e.into())
|
||||||
.and_then(|_| {
|
.and_then(|_| {
|
||||||
open_block.push_transaction(transaction, None)
|
open_block.push_transaction(transaction)
|
||||||
});
|
});
|
||||||
|
|
||||||
let took = start.elapsed();
|
let took = start.elapsed();
|
||||||
|
@ -190,7 +190,7 @@ pub fn generate_dummy_client_with_spec_and_data<F>(
|
|||||||
action: Action::Create,
|
action: Action::Create,
|
||||||
data: vec![],
|
data: vec![],
|
||||||
value: U256::zero(),
|
value: U256::zero(),
|
||||||
}.sign(kp.secret(), Some(test_spec.chain_id())), None).unwrap();
|
}.sign(kp.secret(), Some(test_spec.chain_id()))).unwrap();
|
||||||
n += 1;
|
n += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -247,7 +247,7 @@ pub fn push_block_with_transactions(client: &Arc<Client>, transactions: &[Signed
|
|||||||
b.set_timestamp(block_number * 10);
|
b.set_timestamp(block_number * 10);
|
||||||
|
|
||||||
for t in transactions {
|
for t in transactions {
|
||||||
b.push_transaction(t.clone(), None).unwrap();
|
b.push_transaction(t.clone()).unwrap();
|
||||||
}
|
}
|
||||||
let b = b.close_and_lock().unwrap().seal(test_engine, vec![]).unwrap();
|
let b = b.close_and_lock().unwrap().seal(test_engine, vec![]).unwrap();
|
||||||
|
|
||||||
|
@ -159,7 +159,7 @@ fn can_trace_block_and_uncle_reward() {
|
|||||||
action: Action::Create,
|
action: Action::Create,
|
||||||
data: vec![],
|
data: vec![],
|
||||||
value: U256::zero(),
|
value: U256::zero(),
|
||||||
}.sign(kp.secret(), Some(spec.network_id())), None).unwrap();
|
}.sign(kp.secret(), Some(spec.network_id()))).unwrap();
|
||||||
n += 1;
|
n += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +52,9 @@ impl Default for CacheConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CacheConfig {
|
impl CacheConfig {
|
||||||
/// Creates new cache config with cumulative size equal `total`.
|
/// Creates new cache config with cumulative size equal to `total`, distributed as follows: 70%
|
||||||
|
/// to rocksdb, 10% to the blockchain cache and 20% to the state cache. The transaction queue
|
||||||
|
/// cache size is set to 40Mb and the trace cache to 20Mb.
|
||||||
pub fn new_with_total_cache_size(total: u32) -> Self {
|
pub fn new_with_total_cache_size(total: u32) -> Self {
|
||||||
CacheConfig {
|
CacheConfig {
|
||||||
db: total * 7 / 10,
|
db: total * 7 / 10,
|
||||||
@ -63,14 +65,14 @@ impl CacheConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates new cache config with gitven details.
|
/// Creates new cache config with given details.
|
||||||
pub fn new(db: u32, blockchain: u32, queue: u32, state: u32) -> Self {
|
pub fn new(db: u32, blockchain: u32, queue: u32, state: u32) -> Self {
|
||||||
CacheConfig {
|
CacheConfig {
|
||||||
db: db,
|
db,
|
||||||
blockchain: blockchain,
|
blockchain,
|
||||||
queue: queue,
|
queue,
|
||||||
traces: DEFAULT_TRACE_CACHE_SIZE,
|
traces: DEFAULT_TRACE_CACHE_SIZE,
|
||||||
state: state,
|
state,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user