openethereum/ethcore/src/miner/miner.rs

1106 lines
40 KiB
Rust
Raw Normal View History

// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use rayon::prelude::*;
use std::time::{Instant, Duration};
2016-03-11 14:48:30 +01:00
2016-03-27 15:39:45 +02:00
use util::*;
use util::using_queue::{UsingQueue, GetAction};
use account_provider::AccountProvider;
2016-05-31 19:52:53 +02:00
use views::{BlockView, HeaderView};
use state::State;
2016-06-06 12:17:30 +02:00
use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockID, CallAnalytics};
use executive::contract_address;
use block::{ClosedBlock, SealedBlock, IsBlock, Block};
2016-05-31 19:52:53 +02:00
use error::*;
use transaction::{Action, SignedTransaction};
use receipt::{Receipt, RichReceipt};
2016-05-31 19:52:53 +02:00
use spec::Spec;
use engines::Engine;
2016-07-01 21:13:56 +02:00
use miner::{MinerService, MinerStatus, TransactionQueue, AccountDetails, TransactionOrigin};
2016-06-29 20:04:52 +02:00
use miner::work_notify::WorkPoster;
2016-07-01 21:13:56 +02:00
use client::TransactionImportResult;
use miner::price_info::PriceInfo;
use header::BlockNumber;
2016-07-01 21:13:56 +02:00
2016-06-27 19:06:54 +02:00
/// Different possible definitions for pending transaction set.
cli overhaul (#1600) * cli commands * cleanup parity/signer * cleanup parity/signer * remove redundant import of signer crate from main.rs * cli cleanup in progress * cli cleanup in progress * moved few commonly used functions to separate methods with tests * cleaning up blockchain import in progress * cleaning up blockchain import in progress2 * cleaning up blockchain import in progress3 * tests for database compaction profile parsing * cleaning up blockchain import in progress4 * cleaning up blockchain import in progress5 * blockchain import * export blockchain in progress * cleanup execute_export * Configuration::to_duration cleaned up * removed unused code, tests for to_duration * cleanup Configuration::mode function * parsing some of the cli params in params.rs * rpc and signer are no longer optional * move importing extern crates to main.rs file * swipe dies from rpc module * swipe dies from dapps * finding deprecated * several tests and fixes for parity * parity cleanup in progress * cleanup price parsing * parity cleanup in progress * swiped all dies * parity cleanup in progress * replace usages of from_str with parse() in parity/params.rs * removed few more from_str * split parity/params.rs into params and helpers * removed wildcard import from configuration.rs * cleanup directories/path creation * cleaning up run cmd * moved LoggerConfig * defaults for cli params * fixed indention in raise_fd_limit * tests for rpc_apis * tests for default ipc and rpc settings * ipc socket * cleanup in progress * account service * cleanup miner config * BlockChain commands use Directiores structure now * client_config * network settings and dapps configuration * removing warnings * default logger config * fixed client_path * overhaul * fixing export && import * default export DataFormat * import and export also upgrade db * fixed export && import * polishing pr * polishing pr * fixed custom bootnodes * fixed daemonize on windows * fixed setting up enable network * finished pr * fixed compiling on windows * Fixed warning; windows build * Better cache management * Fixed tests on windows * Fixed test * Restored pruning method names * --cache alias * Fixed more tests * Ensure default options actually listed as valid [ci:skip]
2016-07-25 16:09:47 +02:00
#[derive(Debug, PartialEq)]
2016-06-27 19:06:54 +02:00
pub enum PendingSet {
/// Always just the transactions in the queue. These have had only cheap checks.
AlwaysQueue,
/// Always just the transactions in the sealing block. These have had full checks but
/// may be empty if the node is not actively mining or has force_sealing enabled.
AlwaysSealing,
/// Try the sealing block, but if it is not currently sealing, fallback to the queue.
SealingOrElseQueue,
}
/// Configures the behaviour of the miner.
cli overhaul (#1600) * cli commands * cleanup parity/signer * cleanup parity/signer * remove redundant import of signer crate from main.rs * cli cleanup in progress * cli cleanup in progress * moved few commonly used functions to separate methods with tests * cleaning up blockchain import in progress * cleaning up blockchain import in progress2 * cleaning up blockchain import in progress3 * tests for database compaction profile parsing * cleaning up blockchain import in progress4 * cleaning up blockchain import in progress5 * blockchain import * export blockchain in progress * cleanup execute_export * Configuration::to_duration cleaned up * removed unused code, tests for to_duration * cleanup Configuration::mode function * parsing some of the cli params in params.rs * rpc and signer are no longer optional * move importing extern crates to main.rs file * swipe dies from rpc module * swipe dies from dapps * finding deprecated * several tests and fixes for parity * parity cleanup in progress * cleanup price parsing * parity cleanup in progress * swiped all dies * parity cleanup in progress * replace usages of from_str with parse() in parity/params.rs * removed few more from_str * split parity/params.rs into params and helpers * removed wildcard import from configuration.rs * cleanup directories/path creation * cleaning up run cmd * moved LoggerConfig * defaults for cli params * fixed indention in raise_fd_limit * tests for rpc_apis * tests for default ipc and rpc settings * ipc socket * cleanup in progress * account service * cleanup miner config * BlockChain commands use Directiores structure now * client_config * network settings and dapps configuration * removing warnings * default logger config * fixed client_path * overhaul * fixing export && import * default export DataFormat * import and export also upgrade db * fixed export && import * polishing pr * polishing pr * fixed custom bootnodes * fixed daemonize on windows * fixed setting up enable network * finished pr * fixed compiling on windows * Fixed warning; windows build * Better cache management * Fixed tests on windows * Fixed test * Restored pruning method names * --cache alias * Fixed more tests * Ensure default options actually listed as valid [ci:skip]
2016-07-25 16:09:47 +02:00
#[derive(Debug, PartialEq)]
pub struct MinerOptions {
/// URLs to notify when there is new work.
pub new_work_notify: Vec<String>,
/// Force the miner to reseal, even when nobody has asked for work.
pub force_sealing: bool,
/// Reseal on receipt of new external transactions.
pub reseal_on_external_tx: bool,
/// Reseal on receipt of new local transactions.
pub reseal_on_own_tx: bool,
/// Minimum period between transaction-inspired reseals.
pub reseal_min_period: Duration,
/// Maximum amount of gas to bother considering for block insertion.
2016-06-28 10:40:35 +02:00
pub tx_gas_limit: U256,
/// Maximum size of the transaction queue.
pub tx_queue_size: usize,
/// Whether we should fallback to providing all the queue's transactions or just pending.
2016-06-27 19:06:54 +02:00
pub pending_set: PendingSet,
/// How many historical work packages can we store before running out?
pub work_queue_size: usize,
/// Can we submit two different solutions for the same block and expect both to result in an import?
pub enable_resubmission: bool,
}
impl Default for MinerOptions {
fn default() -> Self {
MinerOptions {
new_work_notify: vec![],
force_sealing: false,
cli overhaul (#1600) * cli commands * cleanup parity/signer * cleanup parity/signer * remove redundant import of signer crate from main.rs * cli cleanup in progress * cli cleanup in progress * moved few commonly used functions to separate methods with tests * cleaning up blockchain import in progress * cleaning up blockchain import in progress2 * cleaning up blockchain import in progress3 * tests for database compaction profile parsing * cleaning up blockchain import in progress4 * cleaning up blockchain import in progress5 * blockchain import * export blockchain in progress * cleanup execute_export * Configuration::to_duration cleaned up * removed unused code, tests for to_duration * cleanup Configuration::mode function * parsing some of the cli params in params.rs * rpc and signer are no longer optional * move importing extern crates to main.rs file * swipe dies from rpc module * swipe dies from dapps * finding deprecated * several tests and fixes for parity * parity cleanup in progress * cleanup price parsing * parity cleanup in progress * swiped all dies * parity cleanup in progress * replace usages of from_str with parse() in parity/params.rs * removed few more from_str * split parity/params.rs into params and helpers * removed wildcard import from configuration.rs * cleanup directories/path creation * cleaning up run cmd * moved LoggerConfig * defaults for cli params * fixed indention in raise_fd_limit * tests for rpc_apis * tests for default ipc and rpc settings * ipc socket * cleanup in progress * account service * cleanup miner config * BlockChain commands use Directiores structure now * client_config * network settings and dapps configuration * removing warnings * default logger config * fixed client_path * overhaul * fixing export && import * default export DataFormat * import and export also upgrade db * fixed export && import * polishing pr * polishing pr * fixed custom bootnodes * fixed daemonize on windows * fixed setting up enable network * finished pr * fixed compiling on windows * Fixed warning; windows build * Better cache management * Fixed tests on windows * Fixed test * Restored pruning method names * --cache alias * Fixed more tests * Ensure default options actually listed as valid [ci:skip]
2016-07-25 16:09:47 +02:00
reseal_on_external_tx: false,
reseal_on_own_tx: true,
2016-06-28 10:40:35 +02:00
tx_gas_limit: !U256::zero(),
tx_queue_size: 1024,
2016-06-27 19:06:54 +02:00
pending_set: PendingSet::AlwaysQueue,
cli overhaul (#1600) * cli commands * cleanup parity/signer * cleanup parity/signer * remove redundant import of signer crate from main.rs * cli cleanup in progress * cli cleanup in progress * moved few commonly used functions to separate methods with tests * cleaning up blockchain import in progress * cleaning up blockchain import in progress2 * cleaning up blockchain import in progress3 * tests for database compaction profile parsing * cleaning up blockchain import in progress4 * cleaning up blockchain import in progress5 * blockchain import * export blockchain in progress * cleanup execute_export * Configuration::to_duration cleaned up * removed unused code, tests for to_duration * cleanup Configuration::mode function * parsing some of the cli params in params.rs * rpc and signer are no longer optional * move importing extern crates to main.rs file * swipe dies from rpc module * swipe dies from dapps * finding deprecated * several tests and fixes for parity * parity cleanup in progress * cleanup price parsing * parity cleanup in progress * swiped all dies * parity cleanup in progress * replace usages of from_str with parse() in parity/params.rs * removed few more from_str * split parity/params.rs into params and helpers * removed wildcard import from configuration.rs * cleanup directories/path creation * cleaning up run cmd * moved LoggerConfig * defaults for cli params * fixed indention in raise_fd_limit * tests for rpc_apis * tests for default ipc and rpc settings * ipc socket * cleanup in progress * account service * cleanup miner config * BlockChain commands use Directiores structure now * client_config * network settings and dapps configuration * removing warnings * default logger config * fixed client_path * overhaul * fixing export && import * default export DataFormat * import and export also upgrade db * fixed export && import * polishing pr * polishing pr * fixed custom bootnodes * fixed daemonize on windows * fixed setting up enable network * finished pr * fixed compiling on windows * Fixed warning; windows build * Better cache management * Fixed tests on windows * Fixed test * Restored pruning method names * --cache alias * Fixed more tests * Ensure default options actually listed as valid [ci:skip]
2016-07-25 16:09:47 +02:00
reseal_min_period: Duration::from_secs(2),
work_queue_size: 20,
enable_resubmission: true,
}
}
}
/// Options for the dynamic gas price recalibrator.
cli overhaul (#1600) * cli commands * cleanup parity/signer * cleanup parity/signer * remove redundant import of signer crate from main.rs * cli cleanup in progress * cli cleanup in progress * moved few commonly used functions to separate methods with tests * cleaning up blockchain import in progress * cleaning up blockchain import in progress2 * cleaning up blockchain import in progress3 * tests for database compaction profile parsing * cleaning up blockchain import in progress4 * cleaning up blockchain import in progress5 * blockchain import * export blockchain in progress * cleanup execute_export * Configuration::to_duration cleaned up * removed unused code, tests for to_duration * cleanup Configuration::mode function * parsing some of the cli params in params.rs * rpc and signer are no longer optional * move importing extern crates to main.rs file * swipe dies from rpc module * swipe dies from dapps * finding deprecated * several tests and fixes for parity * parity cleanup in progress * cleanup price parsing * parity cleanup in progress * swiped all dies * parity cleanup in progress * replace usages of from_str with parse() in parity/params.rs * removed few more from_str * split parity/params.rs into params and helpers * removed wildcard import from configuration.rs * cleanup directories/path creation * cleaning up run cmd * moved LoggerConfig * defaults for cli params * fixed indention in raise_fd_limit * tests for rpc_apis * tests for default ipc and rpc settings * ipc socket * cleanup in progress * account service * cleanup miner config * BlockChain commands use Directiores structure now * client_config * network settings and dapps configuration * removing warnings * default logger config * fixed client_path * overhaul * fixing export && import * default export DataFormat * import and export also upgrade db * fixed export && import * polishing pr * polishing pr * fixed custom bootnodes * fixed daemonize on windows * fixed setting up enable network * finished pr * fixed compiling on windows * Fixed warning; windows build * Better cache management * Fixed tests on windows * Fixed test * Restored pruning method names * --cache alias * Fixed more tests * Ensure default options actually listed as valid [ci:skip]
2016-07-25 16:09:47 +02:00
#[derive(Debug, PartialEq)]
pub struct GasPriceCalibratorOptions {
/// Base transaction price to match against.
pub usd_per_tx: f32,
/// How frequently we should recalibrate.
pub recalibration_period: Duration,
}
/// The gas price validator variant for a `GasPricer`.
cli overhaul (#1600) * cli commands * cleanup parity/signer * cleanup parity/signer * remove redundant import of signer crate from main.rs * cli cleanup in progress * cli cleanup in progress * moved few commonly used functions to separate methods with tests * cleaning up blockchain import in progress * cleaning up blockchain import in progress2 * cleaning up blockchain import in progress3 * tests for database compaction profile parsing * cleaning up blockchain import in progress4 * cleaning up blockchain import in progress5 * blockchain import * export blockchain in progress * cleanup execute_export * Configuration::to_duration cleaned up * removed unused code, tests for to_duration * cleanup Configuration::mode function * parsing some of the cli params in params.rs * rpc and signer are no longer optional * move importing extern crates to main.rs file * swipe dies from rpc module * swipe dies from dapps * finding deprecated * several tests and fixes for parity * parity cleanup in progress * cleanup price parsing * parity cleanup in progress * swiped all dies * parity cleanup in progress * replace usages of from_str with parse() in parity/params.rs * removed few more from_str * split parity/params.rs into params and helpers * removed wildcard import from configuration.rs * cleanup directories/path creation * cleaning up run cmd * moved LoggerConfig * defaults for cli params * fixed indention in raise_fd_limit * tests for rpc_apis * tests for default ipc and rpc settings * ipc socket * cleanup in progress * account service * cleanup miner config * BlockChain commands use Directiores structure now * client_config * network settings and dapps configuration * removing warnings * default logger config * fixed client_path * overhaul * fixing export && import * default export DataFormat * import and export also upgrade db * fixed export && import * polishing pr * polishing pr * fixed custom bootnodes * fixed daemonize on windows * fixed setting up enable network * finished pr * fixed compiling on windows * Fixed warning; windows build * Better cache management * Fixed tests on windows * Fixed test * Restored pruning method names * --cache alias * Fixed more tests * Ensure default options actually listed as valid [ci:skip]
2016-07-25 16:09:47 +02:00
#[derive(Debug, PartialEq)]
pub struct GasPriceCalibrator {
options: GasPriceCalibratorOptions,
next_calibration: Instant,
}
impl GasPriceCalibrator {
fn recalibrate<F: Fn(U256) + Sync + Send + 'static>(&mut self, set_price: F) {
trace!(target: "miner", "Recalibrating {:?} versus {:?}", Instant::now(), self.next_calibration);
if Instant::now() >= self.next_calibration {
let usd_per_tx = self.options.usd_per_tx;
trace!(target: "miner", "Getting price info");
if let Ok(_) = PriceInfo::get(move |price: PriceInfo| {
trace!(target: "miner", "Price info arrived: {:?}", price);
let usd_per_eth = price.ethusd;
let wei_per_usd: f32 = 1.0e18 / usd_per_eth;
let gas_per_tx: f32 = 21000.0;
let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx;
info!(target: "miner", "Updated conversion rate to Ξ1 = {} ({} wei/gas)", Colour::White.bold().paint(format!("US${}", usd_per_eth)), Colour::Yellow.bold().paint(format!("{}", wei_per_gas)));
set_price(U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap());
}) {
self.next_calibration = Instant::now() + self.options.recalibration_period;
} else {
warn!(target: "miner", "Unable to update Ether price.");
}
}
}
}
/// Struct to look after updating the acceptable gas price of a miner.
cli overhaul (#1600) * cli commands * cleanup parity/signer * cleanup parity/signer * remove redundant import of signer crate from main.rs * cli cleanup in progress * cli cleanup in progress * moved few commonly used functions to separate methods with tests * cleaning up blockchain import in progress * cleaning up blockchain import in progress2 * cleaning up blockchain import in progress3 * tests for database compaction profile parsing * cleaning up blockchain import in progress4 * cleaning up blockchain import in progress5 * blockchain import * export blockchain in progress * cleanup execute_export * Configuration::to_duration cleaned up * removed unused code, tests for to_duration * cleanup Configuration::mode function * parsing some of the cli params in params.rs * rpc and signer are no longer optional * move importing extern crates to main.rs file * swipe dies from rpc module * swipe dies from dapps * finding deprecated * several tests and fixes for parity * parity cleanup in progress * cleanup price parsing * parity cleanup in progress * swiped all dies * parity cleanup in progress * replace usages of from_str with parse() in parity/params.rs * removed few more from_str * split parity/params.rs into params and helpers * removed wildcard import from configuration.rs * cleanup directories/path creation * cleaning up run cmd * moved LoggerConfig * defaults for cli params * fixed indention in raise_fd_limit * tests for rpc_apis * tests for default ipc and rpc settings * ipc socket * cleanup in progress * account service * cleanup miner config * BlockChain commands use Directiores structure now * client_config * network settings and dapps configuration * removing warnings * default logger config * fixed client_path * overhaul * fixing export && import * default export DataFormat * import and export also upgrade db * fixed export && import * polishing pr * polishing pr * fixed custom bootnodes * fixed daemonize on windows * fixed setting up enable network * finished pr * fixed compiling on windows * Fixed warning; windows build * Better cache management * Fixed tests on windows * Fixed test * Restored pruning method names * --cache alias * Fixed more tests * Ensure default options actually listed as valid [ci:skip]
2016-07-25 16:09:47 +02:00
#[derive(Debug, PartialEq)]
pub enum GasPricer {
/// A fixed gas price in terms of Wei - always the argument given.
Fixed(U256),
/// Gas price is calibrated according to a fixed amount of USD.
Calibrated(GasPriceCalibrator),
}
impl GasPricer {
/// Create a new Calibrated `GasPricer`.
pub fn new_calibrated(options: GasPriceCalibratorOptions) -> GasPricer {
GasPricer::Calibrated(GasPriceCalibrator {
options: options,
next_calibration: Instant::now(),
})
}
/// Create a new Fixed `GasPricer`.
pub fn new_fixed(gas_price: U256) -> GasPricer {
GasPricer::Fixed(gas_price)
}
fn recalibrate<F: Fn(U256) + Sync + Send + 'static>(&mut self, set_price: F) {
match *self {
GasPricer::Fixed(ref max) => set_price(max.clone()),
GasPricer::Calibrated(ref mut cal) => cal.recalibrate(set_price),
}
}
}
struct SealingWork {
queue: UsingQueue<ClosedBlock>,
enabled: bool,
}
/// Keeps track of transactions using priority queue and holds currently mined block.
/// Handles preparing work for "work sealing" or seals "internally" if Engine does not require work.
pub struct Miner {
2016-06-19 12:33:50 +02:00
// NOTE [ToDr] When locking always lock in this order!
transaction_queue: Arc<Mutex<TransactionQueue>>,
sealing_work: Mutex<SealingWork>,
2016-08-09 11:45:07 +02:00
next_allowed_reseal: Mutex<Instant>,
sealing_block_last_request: Mutex<u64>,
// for sealing...
options: MinerOptions,
seals_internally: bool,
2016-06-23 14:29:16 +02:00
gas_range_target: RwLock<(U256, U256)>,
author: RwLock<Address>,
extra_data: RwLock<Bytes>,
engine: Arc<Engine>,
accounts: Option<Arc<AccountProvider>>,
2016-06-29 20:04:52 +02:00
work_poster: Option<WorkPoster>,
gas_pricer: Mutex<GasPricer>,
}
2016-06-20 10:28:38 +02:00
impl Miner {
/// Creates new instance of miner without accounts, but with given spec.
pub fn with_spec(spec: &Spec) -> Miner {
let author = Address::default();
let is_sealer = spec.engine.is_sealer(&author);
Miner {
transaction_queue: Arc::new(Mutex::new(TransactionQueue::new())),
options: Default::default(),
next_allowed_reseal: Mutex::new(Instant::now()),
sealing_block_last_request: Mutex::new(0),
sealing_work: Mutex::new(SealingWork{
queue: UsingQueue::new(20),
enabled: is_sealer.unwrap_or(false)
}),
seals_internally: is_sealer.is_some(),
2016-06-23 14:29:16 +02:00
gas_range_target: RwLock::new((U256::zero(), U256::zero())),
author: RwLock::new(author),
extra_data: RwLock::new(Vec::new()),
accounts: None,
engine: spec.engine.clone(),
2016-06-29 20:04:52 +02:00
work_poster: None,
gas_pricer: Mutex::new(GasPricer::new_fixed(20_000_000_000u64.into())),
2016-06-20 10:28:38 +02:00
}
}
/// Creates new instance of miner
pub fn new(options: MinerOptions, gas_pricer: GasPricer, spec: &Spec, accounts: Option<Arc<AccountProvider>>) -> Arc<Miner> {
2016-06-29 20:04:52 +02:00
let work_poster = if !options.new_work_notify.is_empty() { Some(WorkPoster::new(&options.new_work_notify)) } else { None };
let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit)));
let author = Address::default();
let is_sealer = spec.engine.is_sealer(&author);
Arc::new(Miner {
transaction_queue: txq,
next_allowed_reseal: Mutex::new(Instant::now()),
sealing_block_last_request: Mutex::new(0),
sealing_work: Mutex::new(SealingWork{
queue: UsingQueue::new(options.work_queue_size),
enabled: options.force_sealing
|| !options.new_work_notify.is_empty()
|| is_sealer.unwrap_or(false)
}),
seals_internally: is_sealer.is_some(),
2016-06-23 14:29:16 +02:00
gas_range_target: RwLock::new((U256::zero(), U256::zero())),
author: RwLock::new(author),
extra_data: RwLock::new(Vec::new()),
options: options,
2016-06-20 10:28:38 +02:00
accounts: accounts,
engine: spec.engine.clone(),
2016-06-29 20:04:52 +02:00
work_poster: work_poster,
gas_pricer: Mutex::new(gas_pricer),
})
}
2016-06-29 20:04:52 +02:00
fn forced_sealing(&self) -> bool {
self.options.force_sealing || !self.options.new_work_notify.is_empty()
}
/// Clear all pending block states
pub fn clear(&self) {
self.sealing_work.lock().queue.reset();
}
/// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing.
pub fn pending_state(&self) -> Option<State> {
self.sealing_work.lock().queue.peek_last_ref().map(|b| b.block().fields().state.clone())
}
/// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing.
pub fn pending_block(&self) -> Option<Block> {
self.sealing_work.lock().queue.peek_last_ref().map(|b| b.base().clone())
}
/// Prepares new block for sealing including top transactions from queue.
fn prepare_block(&self, chain: &MiningBlockChainClient) -> (ClosedBlock, Option<H256>) {
{
trace!(target: "miner", "prepare_block: recalibrating...");
let txq = self.transaction_queue.clone();
self.gas_pricer.lock().recalibrate(move |price| {
trace!(target: "miner", "prepare_block: Got gas price! {}", price);
txq.lock().set_minimal_gas_price(price);
});
trace!(target: "miner", "prepare_block: done recalibration.");
}
2016-06-30 22:35:59 +02:00
let (transactions, mut open_block, original_work_hash) = {
let transactions = {self.transaction_queue.lock().top_transactions()};
let mut sealing_work = self.sealing_work.lock();
let last_work_hash = sealing_work.queue.peek_last_ref().map(|pb| pb.block().fields().header.hash());
let best_hash = chain.best_block_header().sha3();
/*
// check to see if last ClosedBlock in would_seals is actually same parent block.
// if so
// duplicate, re-open and push any new transactions.
// if at least one was pushed successfully, close and enqueue new ClosedBlock;
// otherwise, leave everything alone.
// otherwise, author a fresh block.
*/
let open_block = match sealing_work.queue.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) {
Some(old_block) => {
trace!(target: "miner", "prepare_block: Already have previous work; updating and returning");
// add transactions to old_block
old_block.reopen(&*self.engine)
}
None => {
// block not found - create it.
trace!(target: "miner", "prepare_block: No existing work - making new block");
chain.prepare_open_block(
self.author(),
2016-06-23 14:29:16 +02:00
(self.gas_floor_target(), self.gas_ceil_target()),
self.extra_data()
)
}
};
2016-06-30 13:12:15 +02:00
(transactions, open_block, last_work_hash)
2016-03-24 23:03:22 +01:00
};
2016-06-06 14:33:12 +02:00
let mut invalid_transactions = HashSet::new();
let block_number = open_block.block().fields().header.number();
// TODO: push new uncles, too.
for tx in transactions {
let hash = tx.hash();
match open_block.push_transaction(tx, None) {
2016-07-18 13:50:45 +02:00
Err(Error::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, gas })) => {
debug!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?} (limit: {:?}, used: {:?}, gas: {:?})", hash, gas_limit, gas_used, gas);
2016-06-06 14:33:12 +02:00
// Exit early if gas left is smaller then min_tx_gas
let min_tx_gas: U256 = 21000.into(); // TODO: figure this out properly.
if gas_limit - gas_used < min_tx_gas {
break;
}
},
// Invalid nonce error can happen only if previous transaction is skipped because of gas limit.
// If there is errornous state of transaction queue it will be fixed when next block is imported.
2016-07-18 13:50:45 +02:00
Err(Error::Execution(ExecutionError::InvalidNonce { expected, got })) => {
debug!(target: "miner", "Skipping adding transaction to block because of invalid nonce: {:?} (expected: {:?}, got: {:?})", hash, expected, got);
},
// already have transaction - ignore
Err(Error::Transaction(TransactionError::AlreadyImported)) => {},
2016-06-06 14:33:12 +02:00
Err(e) => {
invalid_transactions.insert(hash);
debug!(target: "miner",
2016-06-06 14:33:12 +02:00
"Error adding transaction to block: number={}. transaction_hash={:?}, Error: {:?}",
block_number, hash, e);
},
_ => {} // imported ok
}
}
let block = open_block.close();
let fetch_account = |a: &Address| AccountDetails {
nonce: chain.latest_nonce(a),
balance: chain.latest_balance(a),
};
2016-06-06 14:33:12 +02:00
{
let mut queue = self.transaction_queue.lock();
for hash in invalid_transactions.into_iter() {
queue.remove_invalid(&hash, &fetch_account);
}
}
(block, original_work_hash)
}
/// Check is reseal is allowed and necessary.
fn requires_reseal(&self, best_block: BlockNumber) -> bool {
let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions();
let mut sealing_work = self.sealing_work.lock();
if sealing_work.enabled {
trace!(target: "miner", "requires_reseal: sealing enabled");
let last_request = *self.sealing_block_last_request.lock();
let should_disable_sealing = !self.forced_sealing()
&& !has_local_transactions
&& best_block > last_request
&& best_block - last_request > SEALING_TIMEOUT_IN_BLOCKS;
trace!(target: "miner", "requires_reseal: should_disable_sealing={}; best_block={}, last_request={}", should_disable_sealing, best_block, last_request);
if should_disable_sealing {
trace!(target: "miner", "Miner sleeping (current {}, last {})", best_block, last_request);
sealing_work.enabled = false;
sealing_work.queue.reset();
false
} else {
// sealing enabled and we don't want to sleep.
*self.next_allowed_reseal.lock() = Instant::now() + self.options.reseal_min_period;
true
}
} else {
trace!(target: "miner", "requires_reseal: sealing is disabled");
false
}
}
/// Attempts to perform internal sealing (one that does not require work) to return Ok(sealed),
/// Err(Some(block)) returns for unsuccesful sealing while Err(None) indicates misspecified engine.
fn seal_block_internally(&self, block: ClosedBlock) -> Result<SealedBlock, Option<ClosedBlock>> {
trace!(target: "miner", "seal_block_internally: block has transaction - attempting internal seal.");
let s = self.engine.generate_seal(block.block(), match self.accounts {
Some(ref x) => Some(&**x),
None => None,
});
if let Some(seal) = s {
trace!(target: "miner", "seal_block_internally: managed internal seal. importing...");
block.lock().try_seal(&*self.engine, seal).or_else(|_| {
warn!("prepare_sealing: ERROR: try_seal failed when given internally generated seal. WTF?");
Err(None)
})
} else {
trace!(target: "miner", "seal_block_internally: unable to generate seal internally");
Err(Some(block))
}
}
2016-06-06 14:33:12 +02:00
/// Uses Engine to seal the block internally and then imports it to chain.
fn seal_and_import_block_internally(&self, chain: &MiningBlockChainClient, block: ClosedBlock) -> bool {
2016-06-06 14:33:12 +02:00
if !block.transactions().is_empty() {
if let Ok(sealed) = self.seal_block_internally(block) {
if chain.import_block(sealed.rlp_bytes()).is_ok() {
return true
}
2016-03-26 20:36:03 +01:00
}
2016-03-22 13:05:18 +01:00
}
false
}
/// Prepares work which has to be done to seal.
fn prepare_work(&self, block: ClosedBlock, original_work_hash: Option<H256>) {
2016-06-30 22:35:59 +02:00
let (work, is_new) = {
let mut sealing_work = self.sealing_work.lock();
let last_work_hash = sealing_work.queue.peek_last_ref().map(|pb| pb.block().fields().header.hash());
trace!(target: "miner", "prepare_work: Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", original_work_hash, last_work_hash, block.block().fields().header.hash());
2016-06-30 22:35:59 +02:00
let (work, is_new) = if last_work_hash.map_or(true, |h| h != block.block().fields().header.hash()) {
trace!(target: "miner", "prepare_work: Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash());
2016-06-29 20:04:52 +02:00
let pow_hash = block.block().fields().header.hash();
let number = block.block().fields().header.number();
let difficulty = *block.block().fields().header.difficulty();
2016-06-30 23:14:54 +02:00
let is_new = original_work_hash.map_or(true, |h| block.block().fields().header.hash() != h);
sealing_work.queue.push(block);
// If push notifications are enabled we assume all work items are used.
if self.work_poster.is_some() && is_new {
sealing_work.queue.use_last_ref();
}
2016-06-30 22:35:59 +02:00
(Some((pow_hash, difficulty, number)), is_new)
2016-06-29 20:04:52 +02:00
} else {
2016-06-30 22:35:59 +02:00
(None, false)
2016-06-29 20:04:52 +02:00
};
trace!(target: "miner", "prepare_work: leaving (last={:?})", sealing_work.queue.peek_last_ref().map(|b| b.block().fields().header.hash()));
2016-06-30 22:35:59 +02:00
(work, is_new)
2016-06-29 20:04:52 +02:00
};
2016-06-30 22:35:59 +02:00
if is_new {
work.map(|(pow_hash, difficulty, number)| self.work_poster.as_ref().map(|p| p.notify(pow_hash, difficulty, number)));
2016-06-30 22:35:59 +02:00
}
}
2016-05-31 20:33:26 +02:00
fn update_gas_limit(&self, chain: &MiningBlockChainClient) {
let gas_limit = HeaderView::new(&chain.best_block_header()).gas_limit();
let mut queue = self.transaction_queue.lock();
queue.set_gas_limit(gas_limit);
}
/// Returns true if we had to prepare new pending block.
fn prepare_work_sealing(&self, chain: &MiningBlockChainClient) -> bool {
trace!(target: "miner", "prepare_work_sealing: entering");
let prepare_new = {
let mut sealing_work = self.sealing_work.lock();
let have_work = sealing_work.queue.peek_last_ref().is_some();
trace!(target: "miner", "prepare_work_sealing: have_work={}", have_work);
if !have_work {
sealing_work.enabled = true;
true
} else {
false
}
};
if prepare_new {
// --------------------------------------------------------------------------
// | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. |
// --------------------------------------------------------------------------
let (block, original_work_hash) = self.prepare_block(chain);
self.prepare_work(block, original_work_hash);
}
let mut sealing_block_last_request = self.sealing_block_last_request.lock();
let best_number = chain.chain_info().best_block_number;
if *sealing_block_last_request != best_number {
trace!(target: "miner", "prepare_work_sealing: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number);
*sealing_block_last_request = best_number;
}
// Return if we restarted
prepare_new
}
fn add_transactions_to_queue(&self, chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>, origin: TransactionOrigin, transaction_queue: &mut TransactionQueue) ->
Vec<Result<TransactionImportResult, Error>> {
let fetch_account = |a: &Address| AccountDetails {
nonce: chain.latest_nonce(a),
balance: chain.latest_balance(a),
};
transactions.into_iter()
.map(|tx| transaction_queue.add(tx, &fetch_account, origin))
.collect()
}
/// Are we allowed to do a non-mandatory reseal?
fn tx_reseal_allowed(&self) -> bool { Instant::now() > *self.next_allowed_reseal.lock() }
2016-03-09 13:28:37 +01:00
}
const SEALING_TIMEOUT_IN_BLOCKS : u64 = 5;
2016-03-09 13:28:37 +01:00
impl MinerService for Miner {
2016-05-31 20:33:26 +02:00
fn clear_and_reset(&self, chain: &MiningBlockChainClient) {
self.transaction_queue.lock().clear();
// --------------------------------------------------------------------------
// | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. |
// --------------------------------------------------------------------------
self.update_sealing(chain);
}
2016-03-09 13:28:37 +01:00
fn status(&self) -> MinerStatus {
let status = self.transaction_queue.lock().status();
let sealing_work = self.sealing_work.lock();
MinerStatus {
transactions_in_pending_queue: status.pending,
transactions_in_future_queue: status.future,
transactions_in_pending_block: sealing_work.queue.peek_last_ref().map_or(0, |b| b.transactions().len()),
}
}
fn call(&self, chain: &MiningBlockChainClient, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, CallError> {
let sealing_work = self.sealing_work.lock();
match sealing_work.queue.peek_last_ref() {
Some(work) => {
let block = work.block();
// TODO: merge this code with client.rs's fn call somwhow.
let header = block.header();
let last_hashes = Arc::new(chain.last_hashes());
let env_info = EnvInfo {
number: header.number(),
author: *header.author(),
timestamp: header.timestamp(),
difficulty: *header.difficulty(),
last_hashes: last_hashes,
gas_used: U256::zero(),
gas_limit: U256::max_value(),
};
// that's just a copy of the state.
let mut state = block.state().clone();
let original_state = if analytics.state_diffing { Some(state.clone()) } else { None };
let sender = try!(t.sender().map_err(|e| {
let message = format!("Transaction malformed: {:?}", e);
ExecutionError::TransactionMalformed(message)
}));
let balance = state.balance(&sender);
let needed_balance = t.value + t.gas * t.gas_price;
if balance < needed_balance {
// give the sender a sufficient balance
state.add_balance(&sender, &(needed_balance - balance));
}
2016-06-02 13:50:50 +02:00
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
let mut ret = try!(Executive::new(&mut state, &env_info, &*self.engine, chain.vm_factory()).transact(t, options));
2016-06-06 14:33:12 +02:00
// TODO gav move this into Executive.
ret.state_diff = original_state.map(|original| state.diff_from(original));
Ok(ret)
},
None => {
chain.call(t, BlockID::Latest, analytics)
}
}
}
2016-05-31 20:33:26 +02:00
fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 {
let sealing_work = self.sealing_work.lock();
sealing_work.queue.peek_last_ref().map_or_else(
|| chain.latest_balance(address),
|b| b.block().fields().state.balance(address)
)
}
2016-05-31 20:33:26 +02:00
fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256 {
let sealing_work = self.sealing_work.lock();
sealing_work.queue.peek_last_ref().map_or_else(
|| chain.latest_storage_at(address, position),
|b| b.block().fields().state.storage_at(address, position)
)
}
2016-05-31 20:33:26 +02:00
fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 {
let sealing_work = self.sealing_work.lock();
sealing_work.queue.peek_last_ref().map_or_else(|| chain.latest_nonce(address), |b| b.block().fields().state.nonce(address))
}
2016-05-31 20:33:26 +02:00
fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option<Bytes> {
let sealing_work = self.sealing_work.lock();
sealing_work.queue.peek_last_ref().map_or_else(|| chain.latest_code(address), |b| b.block().fields().state.code(address))
}
fn set_author(&self, author: Address) {
if self.seals_internally {
let mut sealing_work = self.sealing_work.lock();
sealing_work.enabled = self.engine.is_sealer(&author).unwrap_or(false);
}
*self.author.write() = author;
}
fn set_extra_data(&self, extra_data: Bytes) {
*self.extra_data.write() = extra_data;
}
/// Set the gas limit we wish to target when sealing a new block.
fn set_gas_floor_target(&self, target: U256) {
self.gas_range_target.write().0 = target;
2016-06-23 14:29:16 +02:00
}
fn set_gas_ceil_target(&self, target: U256) {
self.gas_range_target.write().1 = target;
}
fn set_minimal_gas_price(&self, min_gas_price: U256) {
self.transaction_queue.lock().set_minimal_gas_price(min_gas_price);
}
fn minimal_gas_price(&self) -> U256 {
*self.transaction_queue.lock().minimal_gas_price()
}
2016-03-28 18:53:33 +02:00
fn sensible_gas_price(&self) -> U256 {
// 10% above our minimum.
*self.transaction_queue.lock().minimal_gas_price() * 110.into() / 100.into()
2016-03-28 18:53:33 +02:00
}
fn sensible_gas_limit(&self) -> U256 {
self.gas_range_target.read().0 / 5.into()
}
2016-04-18 23:03:41 +02:00
fn transactions_limit(&self) -> usize {
self.transaction_queue.lock().limit()
2016-04-18 23:03:41 +02:00
}
fn set_transactions_limit(&self, limit: usize) {
self.transaction_queue.lock().set_limit(limit)
2016-04-18 23:03:41 +02:00
}
fn set_tx_gas_limit(&self, limit: U256) {
self.transaction_queue.lock().set_tx_gas_limit(limit)
}
/// Get the author that we will seal blocks as.
fn author(&self) -> Address {
*self.author.read()
}
/// Get the extra_data that we will seal blocks with.
fn extra_data(&self) -> Bytes {
self.extra_data.read().clone()
}
/// Get the gas limit we wish to target when sealing a new block.
fn gas_floor_target(&self) -> U256 {
self.gas_range_target.read().0
2016-06-23 14:29:16 +02:00
}
/// Get the gas limit we wish to target when sealing a new block.
fn gas_ceil_target(&self) -> U256 {
self.gas_range_target.read().1
}
fn import_external_transactions(
&self,
chain: &MiningBlockChainClient,
transactions: Vec<SignedTransaction>
) -> Vec<Result<TransactionImportResult, Error>> {
let results = {
let mut transaction_queue = self.transaction_queue.lock();
self.add_transactions_to_queue(
chain, transactions, TransactionOrigin::External, &mut transaction_queue
)
};
if !results.is_empty() && self.options.reseal_on_external_tx && self.tx_reseal_allowed() {
// --------------------------------------------------------------------------
// | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. |
// --------------------------------------------------------------------------
self.update_sealing(chain);
}
results
}
fn import_own_transaction(
&self,
chain: &MiningBlockChainClient,
transaction: SignedTransaction,
) -> Result<TransactionImportResult, Error> {
let hash = transaction.hash();
trace!(target: "own_tx", "Importing transaction: {:?}", transaction);
let imported = {
// Be sure to release the lock before we call prepare_work_sealing
let mut transaction_queue = self.transaction_queue.lock();
let import = self.add_transactions_to_queue(
chain, vec![transaction], TransactionOrigin::Local, &mut transaction_queue
).pop().unwrap();
match import {
Ok(ref res) => {
trace!(target: "own_tx", "Imported transaction to {:?} (hash: {:?})", res, hash);
trace!(target: "own_tx", "Status: {:?}", transaction_queue.status());
},
Err(ref e) => {
trace!(target: "own_tx", "Failed to import transaction {:?} (hash: {:?})", e, hash);
trace!(target: "own_tx", "Status: {:?}", transaction_queue.status());
warn!(target: "own_tx", "Error importing transaction: {:?}", e);
},
}
import
};
// --------------------------------------------------------------------------
// | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. |
// --------------------------------------------------------------------------
if imported.is_ok() && self.options.reseal_on_own_tx && self.tx_reseal_allowed() {
// Make sure to do it after transaction is imported and lock is droped.
// We need to create pending block and enable sealing.
if self.seals_internally || !self.prepare_work_sealing(chain) {
// If new block has not been prepared (means we already had one)
// or Engine might be able to seal internally,
// we need to update sealing.
self.update_sealing(chain);
}
}
imported
2016-04-17 18:26:15 +02:00
}
2016-06-27 19:06:54 +02:00
fn all_transactions(&self) -> Vec<SignedTransaction> {
let queue = self.transaction_queue.lock();
2016-06-27 19:06:54 +02:00
queue.top_transactions()
2016-03-10 16:00:55 +01:00
}
2016-06-27 19:06:54 +02:00
fn pending_transactions(&self) -> Vec<SignedTransaction> {
let queue = self.transaction_queue.lock();
let sw = self.sealing_work.lock();
2016-06-27 19:06:54 +02:00
// TODO: should only use the sealing_work when it's current (it could be an old block)
let sealing_set = match sw.enabled {
true => sw.queue.peek_last_ref(),
2016-06-27 19:06:54 +02:00
false => None,
};
match (&self.options.pending_set, sealing_set) {
(&PendingSet::AlwaysQueue, _) | (&PendingSet::SealingOrElseQueue, None) => queue.top_transactions(),
(_, sealing) => sealing.map_or_else(Vec::new, |s| s.transactions().to_owned()),
}
2016-03-27 15:12:21 +02:00
}
2016-06-27 19:06:54 +02:00
fn pending_transactions_hashes(&self) -> Vec<H256> {
let queue = self.transaction_queue.lock();
let sw = self.sealing_work.lock();
let sealing_set = match sw.enabled {
true => sw.queue.peek_last_ref(),
2016-06-27 19:06:54 +02:00
false => None,
};
match (&self.options.pending_set, sealing_set) {
(&PendingSet::AlwaysQueue, _) | (&PendingSet::SealingOrElseQueue, None) => queue.pending_hashes(),
2016-06-28 10:00:28 +02:00
(_, sealing) => sealing.map_or_else(Vec::new, |s| s.transactions().iter().map(|t| t.hash()).collect()),
2016-06-27 19:06:54 +02:00
}
2016-04-06 23:03:07 +02:00
}
2016-06-27 19:06:54 +02:00
fn transaction(&self, hash: &H256) -> Option<SignedTransaction> {
let queue = self.transaction_queue.lock();
let sw = self.sealing_work.lock();
let sealing_set = match sw.enabled {
true => sw.queue.peek_last_ref(),
2016-06-27 19:06:54 +02:00
false => None,
};
match (&self.options.pending_set, sealing_set) {
(&PendingSet::AlwaysQueue, _) | (&PendingSet::SealingOrElseQueue, None) => queue.find(hash),
(_, sealing) => sealing.and_then(|s| s.transactions().iter().find(|t| &t.hash() == hash).cloned()),
}
}
fn pending_receipt(&self, hash: &H256) -> Option<RichReceipt> {
let sealing_work = self.sealing_work.lock();
match (sealing_work.enabled, sealing_work.queue.peek_last_ref()) {
(true, Some(pending)) => {
let txs = pending.transactions();
txs.iter()
.map(|t| t.hash())
.position(|t| t == *hash)
.map(|index| {
let prev_gas = if index == 0 { Default::default() } else { pending.receipts()[index - 1].gas_used };
let tx = &txs[index];
let receipt = &pending.receipts()[index];
RichReceipt {
transaction_hash: hash.clone(),
transaction_index: index,
cumulative_gas_used: receipt.gas_used,
gas_used: receipt.gas_used - prev_gas,
contract_address: match tx.action {
Action::Call(_) => None,
Action::Create => Some(contract_address(&tx.sender().unwrap(), &tx.nonce)),
},
logs: receipt.logs.clone(),
}
})
},
_ => None
}
}
fn pending_receipts(&self) -> BTreeMap<H256, Receipt> {
let sealing_work = self.sealing_work.lock();
match (sealing_work.enabled, sealing_work.queue.peek_last_ref()) {
(true, Some(pending)) => {
let hashes = pending.transactions()
.iter()
.map(|t| t.hash());
let receipts = pending.receipts().iter().cloned();
hashes.zip(receipts).collect()
},
_ => BTreeMap::new()
}
}
fn last_nonce(&self, address: &Address) -> Option<U256> {
self.transaction_queue.lock().last_nonce(address)
}
/// Update sealing if required.
/// Prepare the block and work if the Engine does not seal internally.
2016-05-31 20:33:26 +02:00
fn update_sealing(&self, chain: &MiningBlockChainClient) {
trace!(target: "miner", "update_sealing");
if self.requires_reseal(chain.chain_info().best_block_number) {
// --------------------------------------------------------------------------
// | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. |
// --------------------------------------------------------------------------
trace!(target: "miner", "update_sealing: preparing a block");
let (block, original_work_hash) = self.prepare_block(chain);
if self.seals_internally {
trace!(target: "miner", "update_sealing: engine indicates internal sealing");
self.seal_and_import_block_internally(chain, block);
} else {
trace!(target: "miner", "update_sealing: engine does not seal internally, preparing work");
self.prepare_work(block, original_work_hash);
}
}
}
fn is_sealing(&self) -> bool {
self.sealing_work.lock().queue.is_in_use()
}
2016-05-31 20:33:26 +02:00
fn map_sealing_work<F, T>(&self, chain: &MiningBlockChainClient, f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T {
2016-03-26 20:36:03 +01:00
trace!(target: "miner", "map_sealing_work: entering");
self.prepare_work_sealing(chain);
trace!(target: "miner", "map_sealing_work: sealing prepared");
let mut sealing_work = self.sealing_work.lock();
let ret = sealing_work.queue.use_last_ref();
2016-03-26 20:36:03 +01:00
trace!(target: "miner", "map_sealing_work: leaving use_last_ref={:?}", ret.as_ref().map(|b| b.block().fields().header.hash()));
ret.map(f)
}
2016-05-31 20:33:26 +02:00
fn submit_seal(&self, chain: &MiningBlockChainClient, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> {
let result = if let Some(b) = self.sealing_work.lock().queue.get_used_if(if self.options.enable_resubmission { GetAction::Clone } else { GetAction::Take }, |b| &b.hash() == &pow_hash) {
b.lock().try_seal(&*self.engine, seal).or_else(|_| {
2016-06-29 19:19:45 +02:00
warn!(target: "miner", "Mined solution rejected: Invalid.");
Err(Error::PowInvalid)
})
2016-03-22 13:05:18 +01:00
} else {
2016-06-29 19:19:45 +02:00
warn!(target: "miner", "Mined solution rejected: Block unknown or out of date.");
2016-03-22 13:05:18 +01:00
Err(Error::PowHashInvalid)
2016-06-29 21:49:12 +02:00
};
result.and_then(|sealed| {
let n = sealed.header().number();
let h = sealed.header().hash();
2016-06-29 21:49:12 +02:00
try!(chain.import_sealed_block(sealed));
info!(target: "miner", "Mined block imported OK. #{}: {}", Colour::White.bold().paint(format!("{}", n)), Colour::White.bold().paint(h.hex()));
2016-06-29 21:49:12 +02:00
Ok(())
})
}
2016-05-31 20:33:26 +02:00
fn chain_new_blocks(&self, chain: &MiningBlockChainClient, _imported: &[H256], _invalid: &[H256], enacted: &[H256], retracted: &[H256]) {
trace!(target: "miner", "chain_new_blocks");
2016-05-31 20:33:26 +02:00
fn fetch_transactions(chain: &MiningBlockChainClient, hash: &H256) -> Vec<SignedTransaction> {
let block = chain
.block(BlockID::Hash(*hash))
// Client should send message after commit to db and inserting to chain.
.expect("Expected in-chain blocks.");
let block = BlockView::new(&block);
let txs = block.transactions();
// populate sender
for tx in &txs {
let _sender = tx.sender();
}
txs
}
// 1. We ignore blocks that were `imported` (because it means that they are not in canon-chain, and transactions
// should be still available in the queue.
// 2. We ignore blocks that are `invalid` because it doesn't have any meaning in terms of the transactions that
// are in those blocks
// First update gas limit in transaction queue
self.update_gas_limit(chain);
// Then import all transactions...
{
2016-03-13 15:29:55 +01:00
let out_of_chain = retracted
.par_iter()
2016-03-13 15:36:03 +01:00
.map(|h| fetch_transactions(chain, h));
2016-03-13 15:29:55 +01:00
out_of_chain.for_each(|txs| {
let mut transaction_queue = self.transaction_queue.lock();
2016-07-06 17:42:01 +02:00
let _ = self.add_transactions_to_queue(
chain, txs, TransactionOrigin::External, &mut transaction_queue
);
});
}
// ...and at the end remove old ones
{
let in_chain = enacted
.par_iter()
.map(|h: &H256| fetch_transactions(chain, h));
in_chain.for_each(|mut txs| {
let mut transaction_queue = self.transaction_queue.lock();
let to_remove = txs.drain(..)
.map(|tx| {
tx.sender().expect("Transaction is in block, so sender has to be defined.")
})
.collect::<HashSet<Address>>();
for sender in to_remove.into_iter() {
transaction_queue.remove_all(sender, chain.latest_nonce(&sender));
}
});
}
if enacted.len() > 0 {
// --------------------------------------------------------------------------
// | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. |
// --------------------------------------------------------------------------
self.update_sealing(chain);
}
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
2016-05-31 22:24:32 +02:00
use super::super::MinerService;
use super::*;
use util::*;
use ethkey::{Generator, Random};
use client::{BlockChainClient, TestBlockChainClient, EachBlockWith, TransactionImportResult};
use header::BlockNumber;
use types::transaction::{Transaction, SignedTransaction, Action};
2016-05-31 22:24:32 +02:00
use block::*;
use spec::Spec;
use tests::helpers::{generate_dummy_client};
#[test]
fn should_prepare_block_to_seal() {
// given
let client = TestBlockChainClient::default();
let miner = Miner::with_spec(&Spec::new_test());
// when
let sealing_work = miner.map_sealing_work(&client, |_| ());
assert!(sealing_work.is_some(), "Expected closed block");
}
#[test]
fn should_still_work_after_a_couple_of_blocks() {
// given
let client = TestBlockChainClient::default();
let miner = Miner::with_spec(&Spec::new_test());
let res = miner.map_sealing_work(&client, |b| b.block().fields().header.hash());
assert!(res.is_some());
assert!(miner.submit_seal(&client, res.unwrap(), vec![]).is_ok());
// two more blocks mined, work requested.
client.add_blocks(1, EachBlockWith::Uncle);
miner.map_sealing_work(&client, |b| b.block().fields().header.hash());
client.add_blocks(1, EachBlockWith::Uncle);
miner.map_sealing_work(&client, |b| b.block().fields().header.hash());
// solution to original work submitted.
assert!(miner.submit_seal(&client, res.unwrap(), vec![]).is_ok());
}
fn miner() -> Miner {
Arc::try_unwrap(Miner::new(
MinerOptions {
new_work_notify: Vec::new(),
force_sealing: false,
reseal_on_external_tx: false,
reseal_on_own_tx: true,
reseal_min_period: Duration::from_secs(5),
tx_gas_limit: !U256::zero(),
tx_queue_size: 1024,
pending_set: PendingSet::AlwaysSealing,
work_queue_size: 5,
enable_resubmission: true,
},
GasPricer::new_fixed(0u64.into()),
&Spec::new_test(),
None, // accounts provider
)).ok().expect("Miner was just created.")
}
fn transaction() -> SignedTransaction {
let keypair = Random.generate().unwrap();
Transaction {
action: Action::Create,
value: U256::zero(),
data: "3331600055".from_hex().unwrap(),
gas: U256::from(100_000),
gas_price: U256::zero(),
nonce: U256::zero(),
}.sign(keypair.secret())
}
#[test]
fn should_make_pending_block_when_importing_own_transaction() {
// given
let client = TestBlockChainClient::default();
let miner = miner();
let transaction = transaction();
// when
let res = miner.import_own_transaction(&client, transaction);
// then
assert_eq!(res.unwrap(), TransactionImportResult::Current);
assert_eq!(miner.all_transactions().len(), 1);
assert_eq!(miner.pending_transactions().len(), 1);
assert_eq!(miner.pending_transactions_hashes().len(), 1);
assert_eq!(miner.pending_receipts().len(), 1);
// This method will let us know if pending block was created (before calling that method)
assert!(!miner.prepare_work_sealing(&client));
}
#[test]
fn should_import_external_transaction() {
// given
let client = TestBlockChainClient::default();
let miner = miner();
let transaction = transaction();
// when
let res = miner.import_external_transactions(&client, vec![transaction]).pop().unwrap();
// then
assert_eq!(res.unwrap(), TransactionImportResult::Current);
assert_eq!(miner.all_transactions().len(), 1);
assert_eq!(miner.pending_transactions_hashes().len(), 0);
assert_eq!(miner.pending_transactions().len(), 0);
assert_eq!(miner.pending_receipts().len(), 0);
// This method will let us know if pending block was created (before calling that method)
assert!(miner.prepare_work_sealing(&client));
}
#[test]
fn should_not_seal_unless_enabled() {
let miner = miner();
let client = TestBlockChainClient::default();
// By default resealing is not required.
assert!(!miner.requires_reseal(1u8.into()));
miner.import_external_transactions(&client, vec![transaction()]).pop().unwrap().unwrap();
assert!(miner.prepare_work_sealing(&client));
// Unless asked to prepare work.
assert!(miner.requires_reseal(1u8.into()));
}
#[test]
fn internal_seals_without_work() {
let miner = Miner::with_spec(&Spec::new_test_instant());
let c = generate_dummy_client(2);
let client = c.reference().as_ref();
assert_eq!(miner.import_external_transactions(client, vec![transaction()]).pop().unwrap().unwrap(), TransactionImportResult::Current);
miner.update_sealing(client);
client.flush_queue();
assert!(miner.pending_block().is_none());
assert_eq!(client.chain_info().best_block_number, 3 as BlockNumber);
assert_eq!(miner.import_own_transaction(client, transaction()).unwrap(), TransactionImportResult::Current);
miner.update_sealing(client);
client.flush_queue();
assert!(miner.pending_block().is_none());
assert_eq!(client.chain_info().best_block_number, 4 as BlockNumber);
}
}