openethereum/miner/src/miner.rs

437 lines
15 KiB
Rust
Raw Normal View History

// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use rayon::prelude::*;
2016-03-11 14:48:30 +01:00
use std::sync::atomic::AtomicBool;
2016-03-27 15:39:45 +02:00
use util::*;
use ethcore::views::{BlockView, HeaderView};
2016-03-09 13:28:37 +01:00
use ethcore::client::{BlockChainClient, BlockId};
use ethcore::block::{ClosedBlock, IsBlock};
2016-03-27 15:39:45 +02:00
use ethcore::error::*;
use ethcore::transaction::SignedTransaction;
2016-04-17 18:26:15 +02:00
use super::{MinerService, MinerStatus, TransactionQueue, AccountDetails, TransactionImportResult};
/// Keeps track of transactions using priority queue and holds currently mined block.
pub struct Miner {
transaction_queue: Mutex<TransactionQueue>,
// for sealing...
force_sealing: bool,
sealing_enabled: AtomicBool,
sealing_block_last_request: Mutex<u64>,
2016-03-24 23:03:22 +01:00
sealing_work: Mutex<UsingQueue<ClosedBlock>>,
gas_floor_target: RwLock<U256>,
author: RwLock<Address>,
extra_data: RwLock<Bytes>,
}
impl Default for Miner {
fn default() -> Miner {
Miner {
transaction_queue: Mutex::new(TransactionQueue::new()),
force_sealing: false,
sealing_enabled: AtomicBool::new(false),
sealing_block_last_request: Mutex::new(0),
2016-03-24 23:03:22 +01:00
sealing_work: Mutex::new(UsingQueue::new(5)),
gas_floor_target: RwLock::new(U256::zero()),
2016-03-11 14:48:30 +01:00
author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()),
}
}
}
impl Miner {
/// Creates new instance of miner
pub fn new(force_sealing: bool) -> Arc<Miner> {
Arc::new(Miner {
transaction_queue: Mutex::new(TransactionQueue::new()),
force_sealing: force_sealing,
sealing_enabled: AtomicBool::new(force_sealing),
sealing_block_last_request: Mutex::new(0),
sealing_work: Mutex::new(UsingQueue::new(5)),
gas_floor_target: RwLock::new(U256::zero()),
author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()),
})
}
/// Prepares new block for sealing including top transactions from queue.
2016-04-06 10:07:24 +02:00
#[cfg_attr(feature="dev", allow(match_same_arms))]
fn prepare_sealing(&self, chain: &BlockChainClient) {
2016-03-26 20:36:03 +01:00
trace!(target: "miner", "prepare_sealing: entering");
let transactions = self.transaction_queue.lock().unwrap().top_transactions();
2016-03-24 23:03:22 +01:00
let mut sealing_work = self.sealing_work.lock().unwrap();
let best_hash = chain.best_block_header().sha3();
/*
// check to see if last ClosedBlock in would_seals is actually same parent block.
// if so
// duplicate, re-open and push any new transactions.
// if at least one was pushed successfully, close and enqueue new ClosedBlock;
// otherwise, leave everything alone.
// otherwise, author a fresh block.
*/
2016-03-24 23:03:22 +01:00
let (b, invalid_transactions) = match sealing_work.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) {
Some(old_block) => {
2016-03-26 20:36:03 +01:00
trace!(target: "miner", "Already have previous work; updating and returning");
// add transactions to old_block
2016-03-24 23:03:22 +01:00
let e = chain.engine();
let mut invalid_transactions = HashSet::new();
let mut block = old_block.reopen(e);
let block_number = block.block().fields().header.number();
// TODO: push new uncles, too.
// TODO: refactor with chain.prepare_sealing
for tx in transactions {
let hash = tx.hash();
let res = block.push_transaction(tx, None);
match res {
Err(Error::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, .. })) => {
trace!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?}", hash);
// Exit early if gas left is smaller then min_tx_gas
let min_tx_gas: U256 = x!(21000); // TODO: figure this out properly.
if gas_limit - gas_used < min_tx_gas {
break;
}
},
Err(Error::Transaction(TransactionError::AlreadyImported)) => {} // already have transaction - ignore
Err(e) => {
invalid_transactions.insert(hash);
trace!(target: "miner",
"Error adding transaction to block: number={}. transaction_hash={:?}, Error: {:?}",
block_number, hash, e);
},
2016-03-26 23:35:36 +01:00
_ => {} // imported ok
}
2016-03-24 23:03:22 +01:00
}
2016-03-26 20:36:03 +01:00
(Some(block.close()), invalid_transactions)
}
None => {
// block not found - create it.
2016-03-26 20:36:03 +01:00
trace!(target: "miner", "No existing work - making new block");
chain.prepare_sealing(
self.author(),
self.gas_floor_target(),
self.extra_data(),
transactions,
)
}
2016-03-24 23:03:22 +01:00
};
let mut queue = self.transaction_queue.lock().unwrap();
let fetch_account = |a: &Address| AccountDetails {
nonce: chain.nonce(a),
balance: chain.balance(a),
};
for hash in invalid_transactions.into_iter() {
queue.remove_invalid(&hash, &fetch_account);
}
if let Some(block) = b {
2016-04-06 10:07:24 +02:00
if sealing_work.peek_last_ref().map_or(true, |pb| pb.block().fields().header.hash() != block.block().fields().header.hash()) {
2016-03-26 20:36:03 +01:00
trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash());
sealing_work.push(block);
}
2016-03-22 13:05:18 +01:00
}
2016-03-26 20:36:03 +01:00
trace!(target: "miner", "prepare_sealing: leaving (last={:?})", sealing_work.peek_last_ref().map(|b| b.block().fields().header.hash()));
}
fn update_gas_limit(&self, chain: &BlockChainClient) {
let gas_limit = HeaderView::new(&chain.best_block_header()).gas_limit();
let mut queue = self.transaction_queue.lock().unwrap();
queue.set_gas_limit(gas_limit);
}
2016-03-09 13:28:37 +01:00
}
const SEALING_TIMEOUT_IN_BLOCKS : u64 = 5;
2016-03-09 13:28:37 +01:00
impl MinerService for Miner {
fn clear_and_reset(&self, chain: &BlockChainClient) {
self.transaction_queue.lock().unwrap().clear();
self.update_sealing(chain);
}
2016-03-09 13:28:37 +01:00
fn status(&self) -> MinerStatus {
let status = self.transaction_queue.lock().unwrap().status();
2016-03-22 13:05:18 +01:00
let sealing_work = self.sealing_work.lock().unwrap();
MinerStatus {
transactions_in_pending_queue: status.pending,
transactions_in_future_queue: status.future,
2016-03-24 23:03:22 +01:00
transactions_in_pending_block: sealing_work.peek_last_ref().map_or(0, |b| b.transactions().len()),
}
}
fn set_author(&self, author: Address) {
*self.author.write().unwrap() = author;
}
fn set_extra_data(&self, extra_data: Bytes) {
*self.extra_data.write().unwrap() = extra_data;
}
/// Set the gas limit we wish to target when sealing a new block.
fn set_gas_floor_target(&self, target: U256) {
*self.gas_floor_target.write().unwrap() = target;
}
fn set_minimal_gas_price(&self, min_gas_price: U256) {
self.transaction_queue.lock().unwrap().set_minimal_gas_price(min_gas_price);
}
fn minimal_gas_price(&self) -> U256 {
*self.transaction_queue.lock().unwrap().minimal_gas_price()
}
2016-03-28 18:53:33 +02:00
fn sensible_gas_price(&self) -> U256 {
// 10% above our minimum.
2016-04-06 10:07:24 +02:00
*self.transaction_queue.lock().unwrap().minimal_gas_price() * x!(110) / x!(100)
2016-03-28 18:53:33 +02:00
}
fn sensible_gas_limit(&self) -> U256 {
*self.gas_floor_target.read().unwrap() / x!(5)
}
2016-04-18 23:03:41 +02:00
fn transactions_limit(&self) -> usize {
self.transaction_queue.lock().unwrap().limit()
}
fn set_transactions_limit(&self, limit: usize) {
self.transaction_queue.lock().unwrap().set_limit(limit)
}
/// Get the author that we will seal blocks as.
fn author(&self) -> Address {
*self.author.read().unwrap()
}
/// Get the extra_data that we will seal blocks with.
fn extra_data(&self) -> Bytes {
self.extra_data.read().unwrap().clone()
}
/// Get the gas limit we wish to target when sealing a new block.
fn gas_floor_target(&self) -> U256 {
*self.gas_floor_target.read().unwrap()
}
2016-04-17 18:26:15 +02:00
fn import_transactions<T>(&self, transactions: Vec<SignedTransaction>, fetch_account: T) ->
Vec<Result<TransactionImportResult, Error>>
where T: Fn(&Address) -> AccountDetails {
let mut transaction_queue = self.transaction_queue.lock().unwrap();
transaction_queue.add_all(transactions, fetch_account)
}
2016-04-17 18:26:15 +02:00
fn import_own_transaction<T>(&self, transaction: SignedTransaction, fetch_account: T) ->
Result<TransactionImportResult, Error>
where T: Fn(&Address) -> AccountDetails {
let hash = transaction.hash();
trace!(target: "own_tx", "Importing transaction: {:?}", transaction);
2016-04-17 18:26:15 +02:00
let mut transaction_queue = self.transaction_queue.lock().unwrap();
let import = transaction_queue.add(transaction, &fetch_account);
match import {
Ok(ref res) => {
trace!(target: "own_tx", "Imported transaction to {:?} (hash: {:?})", res, hash);
2016-04-21 20:52:27 +02:00
trace!(target: "own_tx", "Status: {:?}", transaction_queue.status());
},
Err(ref e) => {
trace!(target: "own_tx", "Failed to import transaction {:?} (hash: {:?})", e, hash);
2016-04-21 20:52:27 +02:00
trace!(target: "own_tx", "Status: {:?}", transaction_queue.status());
},
}
import
2016-04-17 18:26:15 +02:00
}
2016-03-10 16:00:55 +01:00
fn pending_transactions_hashes(&self) -> Vec<H256> {
let transaction_queue = self.transaction_queue.lock().unwrap();
transaction_queue.pending_hashes()
}
2016-03-27 15:12:21 +02:00
fn transaction(&self, hash: &H256) -> Option<SignedTransaction> {
let queue = self.transaction_queue.lock().unwrap();
queue.find(hash)
}
2016-04-06 23:03:07 +02:00
fn pending_transactions(&self) -> Vec<SignedTransaction> {
let queue = self.transaction_queue.lock().unwrap();
queue.top_transactions()
}
fn last_nonce(&self, address: &Address) -> Option<U256> {
self.transaction_queue.lock().unwrap().last_nonce(address)
}
fn update_sealing(&self, chain: &BlockChainClient) {
2016-03-24 14:51:51 +01:00
if self.sealing_enabled.load(atomic::Ordering::Relaxed) {
let current_no = chain.chain_info().best_block_number;
2016-03-24 14:56:22 +01:00
let last_request = *self.sealing_block_last_request.lock().unwrap();
let should_disable_sealing = !self.force_sealing && current_no > last_request && current_no - last_request > SEALING_TIMEOUT_IN_BLOCKS;
2016-03-24 14:51:51 +01:00
if should_disable_sealing {
trace!(target: "miner", "Miner sleeping (current {}, last {})", current_no, last_request);
self.sealing_enabled.store(false, atomic::Ordering::Relaxed);
self.sealing_work.lock().unwrap().reset();
} else if self.sealing_enabled.load(atomic::Ordering::Relaxed) {
2016-03-24 14:51:51 +01:00
self.prepare_sealing(chain);
}
}
}
2016-03-22 13:05:18 +01:00
fn map_sealing_work<F, T>(&self, chain: &BlockChainClient, f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T {
2016-03-26 20:36:03 +01:00
trace!(target: "miner", "map_sealing_work: entering");
let have_work = self.sealing_work.lock().unwrap().peek_last_ref().is_some();
trace!(target: "miner", "map_sealing_work: have_work={}", have_work);
2016-03-22 13:05:18 +01:00
if !have_work {
self.sealing_enabled.store(true, atomic::Ordering::Relaxed);
self.prepare_sealing(chain);
}
2016-03-24 14:56:22 +01:00
let mut sealing_block_last_request = self.sealing_block_last_request.lock().unwrap();
2016-03-24 14:51:51 +01:00
let best_number = chain.chain_info().best_block_number;
if *sealing_block_last_request != best_number {
2016-03-26 20:36:03 +01:00
trace!(target: "miner", "map_sealing_work: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number);
2016-03-24 14:51:51 +01:00
*sealing_block_last_request = best_number;
}
2016-03-26 20:36:03 +01:00
let mut sealing_work = self.sealing_work.lock().unwrap();
let ret = sealing_work.use_last_ref();
trace!(target: "miner", "map_sealing_work: leaving use_last_ref={:?}", ret.as_ref().map(|b| b.block().fields().header.hash()));
ret.map(f)
}
2016-03-09 13:28:37 +01:00
fn submit_seal(&self, chain: &BlockChainClient, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> {
2016-03-24 23:03:22 +01:00
if let Some(b) = self.sealing_work.lock().unwrap().take_used_if(|b| &b.hash() == &pow_hash) {
match chain.try_seal(b.lock(), seal) {
2016-03-24 23:03:22 +01:00
Err(_) => {
2016-03-22 13:05:18 +01:00
Err(Error::PowInvalid)
}
Ok(sealed) => {
// TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice.
try!(chain.import_block(sealed.rlp_bytes()));
Ok(())
}
}
2016-03-22 13:05:18 +01:00
} else {
Err(Error::PowHashInvalid)
}
}
fn chain_new_blocks(&self, chain: &BlockChainClient, _imported: &[H256], _invalid: &[H256], enacted: &[H256], retracted: &[H256]) {
fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec<SignedTransaction> {
let block = chain
2016-03-10 14:06:47 +01:00
.block(BlockId::Hash(*hash))
// Client should send message after commit to db and inserting to chain.
.expect("Expected in-chain blocks.");
let block = BlockView::new(&block);
block.transactions()
}
// 1. We ignore blocks that were `imported` (because it means that they are not in canon-chain, and transactions
// should be still available in the queue.
// 2. We ignore blocks that are `invalid` because it doesn't have any meaning in terms of the transactions that
// are in those blocks
// First update gas limit in transaction queue
self.update_gas_limit(chain);
// Then import all transactions...
{
2016-03-13 15:29:55 +01:00
let out_of_chain = retracted
.par_iter()
2016-03-13 15:36:03 +01:00
.map(|h| fetch_transactions(chain, h));
2016-03-13 15:29:55 +01:00
out_of_chain.for_each(|txs| {
// populate sender
for tx in &txs {
let _sender = tx.sender();
}
let mut transaction_queue = self.transaction_queue.lock().unwrap();
let _ = transaction_queue.add_all(txs, |a| AccountDetails {
nonce: chain.nonce(a),
balance: chain.balance(a)
});
});
}
// ...and at the end remove old ones
{
let in_chain = enacted
.par_iter()
.map(|h: &H256| fetch_transactions(chain, h));
in_chain.for_each(|mut txs| {
let mut transaction_queue = self.transaction_queue.lock().unwrap();
let to_remove = txs.drain(..)
.map(|tx| {
tx.sender().expect("Transaction is in block, so sender has to be defined.")
})
.collect::<HashSet<Address>>();
for sender in to_remove.into_iter() {
transaction_queue.remove_all(sender, chain.nonce(&sender));
}
});
}
self.update_sealing(chain);
}
}
#[cfg(test)]
mod tests {
use MinerService;
use super::{Miner};
use util::*;
use ethcore::client::{TestBlockChainClient, EachBlockWith};
use ethcore::block::*;
// TODO [ToDr] To uncomment when TestBlockChainClient can actually return a ClosedBlock.
#[ignore]
#[test]
fn should_prepare_block_to_seal() {
// given
let client = TestBlockChainClient::default();
let miner = Miner::default();
// when
let sealing_work = miner.map_sealing_work(&client, |_| ());
assert!(sealing_work.is_some(), "Expected closed block");
}
#[ignore]
#[test]
fn should_still_work_after_a_couple_of_blocks() {
// given
let client = TestBlockChainClient::default();
let miner = Miner::default();
let res = miner.map_sealing_work(&client, |b| b.block().fields().header.hash());
assert!(res.is_some());
assert!(miner.submit_seal(&client, res.unwrap(), vec![]).is_ok());
// two more blocks mined, work requested.
client.add_blocks(1, EachBlockWith::Uncle);
miner.map_sealing_work(&client, |b| b.block().fields().header.hash());
client.add_blocks(1, EachBlockWith::Uncle);
miner.map_sealing_work(&client, |b| b.block().fields().header.hash());
// solution to original work submitted.
assert!(miner.submit_seal(&client, res.unwrap(), vec![]).is_ok());
}
}