2016-02-05 13:40:41 +01:00
|
|
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2016-02-02 15:29:53 +01:00
|
|
|
//! Blockchain database client.
|
|
|
|
|
2016-01-09 18:50:45 +01:00
|
|
|
use util::*;
|
2016-02-10 12:50:27 +01:00
|
|
|
use util::panics::*;
|
2016-02-25 14:09:39 +01:00
|
|
|
use blockchain::{BlockChain, BlockProvider};
|
2016-02-10 15:06:13 +01:00
|
|
|
use views::BlockView;
|
2016-01-11 12:28:59 +01:00
|
|
|
use error::*;
|
2016-02-29 15:30:08 +01:00
|
|
|
use header::{BlockNumber};
|
2016-01-26 15:00:22 +01:00
|
|
|
use state::State;
|
2016-01-11 12:28:59 +01:00
|
|
|
use spec::Spec;
|
2016-01-11 11:51:31 +01:00
|
|
|
use engine::Engine;
|
2016-01-26 15:00:22 +01:00
|
|
|
use views::HeaderView;
|
2016-02-25 14:09:39 +01:00
|
|
|
use block_queue::BlockQueue;
|
2016-02-06 23:15:53 +01:00
|
|
|
use service::{NetSyncMessage, SyncMessage};
|
2016-01-14 01:28:37 +01:00
|
|
|
use env_info::LastHashes;
|
|
|
|
use verification::*;
|
|
|
|
use block::*;
|
2016-02-09 15:17:01 +01:00
|
|
|
use transaction::LocalizedTransaction;
|
2016-02-10 19:29:27 +01:00
|
|
|
use extras::TransactionAddress;
|
2016-02-17 12:35:37 +01:00
|
|
|
use filter::Filter;
|
|
|
|
use log_entry::LocalizedLogEntry;
|
2016-02-25 14:09:39 +01:00
|
|
|
pub use block_queue::{BlockQueueConfig, BlockQueueInfo};
|
|
|
|
pub use blockchain::{TreeRoute, BlockChainConfig, CacheSize as BlockChainCacheSize};
|
2016-01-07 21:35:06 +01:00
|
|
|
|
2016-02-10 19:29:27 +01:00
|
|
|
/// Uniquely identifies block.
|
2016-02-10 22:54:12 +01:00
|
|
|
#[derive(Debug, PartialEq, Clone)]
|
2016-02-10 19:29:27 +01:00
|
|
|
pub enum BlockId {
|
|
|
|
/// Block's sha3.
|
|
|
|
/// Querying by hash is always faster.
|
|
|
|
Hash(H256),
|
|
|
|
/// Block number within canon blockchain.
|
2016-02-10 22:36:59 +01:00
|
|
|
Number(BlockNumber),
|
|
|
|
/// Earliest block (genesis).
|
|
|
|
Earliest,
|
|
|
|
/// Latest mined block.
|
|
|
|
Latest
|
2016-02-10 19:29:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Uniquely identifies transaction.
|
2016-02-10 22:54:12 +01:00
|
|
|
#[derive(Debug, PartialEq, Clone)]
|
2016-02-10 19:29:27 +01:00
|
|
|
pub enum TransactionId {
|
|
|
|
/// Transaction's sha3.
|
|
|
|
Hash(H256),
|
|
|
|
/// Block id and transaction index within this block.
|
|
|
|
/// Querying by block position is always faster.
|
|
|
|
Location(BlockId, usize)
|
|
|
|
}
|
|
|
|
|
2016-01-07 21:35:06 +01:00
|
|
|
/// General block status
|
2016-02-02 12:12:32 +01:00
|
|
|
#[derive(Debug, Eq, PartialEq)]
|
2016-01-07 21:35:06 +01:00
|
|
|
pub enum BlockStatus {
|
|
|
|
/// Part of the blockchain.
|
|
|
|
InChain,
|
|
|
|
/// Queued for import.
|
2016-01-10 23:37:09 +01:00
|
|
|
Queued,
|
2016-01-07 21:35:06 +01:00
|
|
|
/// Known as bad.
|
|
|
|
Bad,
|
|
|
|
/// Unknown.
|
|
|
|
Unknown,
|
|
|
|
}
|
|
|
|
|
2016-02-25 14:09:39 +01:00
|
|
|
/// Client configuration. Includes configs for all sub-systems.
|
|
|
|
#[derive(Debug, Default)]
|
|
|
|
pub struct ClientConfig {
|
|
|
|
/// Block queue configuration.
|
|
|
|
pub queue: BlockQueueConfig,
|
|
|
|
/// Blockchain configuration.
|
|
|
|
pub blockchain: BlockChainConfig,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Information about the blockchain gathered together.
|
2016-01-16 13:30:27 +01:00
|
|
|
#[derive(Debug)]
|
2016-01-07 21:35:06 +01:00
|
|
|
pub struct BlockChainInfo {
|
|
|
|
/// Blockchain difficulty.
|
|
|
|
pub total_difficulty: U256,
|
|
|
|
/// Block queue difficulty.
|
|
|
|
pub pending_total_difficulty: U256,
|
|
|
|
/// Genesis block hash.
|
|
|
|
pub genesis_hash: H256,
|
|
|
|
/// Best blockchain block hash.
|
|
|
|
pub best_block_hash: H256,
|
|
|
|
/// Best blockchain block number.
|
|
|
|
pub best_block_number: BlockNumber
|
|
|
|
}
|
|
|
|
|
2016-01-18 19:23:28 +01:00
|
|
|
impl fmt::Display for BlockChainInfo {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
write!(f, "#{}.{}", self.best_block_number, self.best_block_hash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-07 21:35:06 +01:00
|
|
|
/// Blockchain database client. Owns and manages a blockchain and a block queue.
|
2016-01-14 19:03:48 +01:00
|
|
|
pub trait BlockChainClient : Sync + Send {
|
2016-02-10 19:29:27 +01:00
|
|
|
/// Get raw block header data by block id.
|
|
|
|
fn block_header(&self, id: BlockId) -> Option<Bytes>;
|
2016-01-07 21:35:06 +01:00
|
|
|
|
2016-02-10 19:29:27 +01:00
|
|
|
/// Get raw block body data by block id.
|
2016-01-07 21:35:06 +01:00
|
|
|
/// Block body is an RLP list of two items: uncles and transactions.
|
2016-02-10 19:29:27 +01:00
|
|
|
fn block_body(&self, id: BlockId) -> Option<Bytes>;
|
2016-01-07 21:35:06 +01:00
|
|
|
|
|
|
|
/// Get raw block data by block header hash.
|
2016-02-10 19:29:27 +01:00
|
|
|
fn block(&self, id: BlockId) -> Option<Bytes>;
|
2016-01-07 21:35:06 +01:00
|
|
|
|
|
|
|
/// Get block status by block header hash.
|
2016-02-10 19:29:27 +01:00
|
|
|
fn block_status(&self, id: BlockId) -> BlockStatus;
|
2016-01-07 21:35:06 +01:00
|
|
|
|
2016-01-27 14:43:43 +01:00
|
|
|
/// Get block total difficulty.
|
2016-02-10 19:29:27 +01:00
|
|
|
fn block_total_difficulty(&self, id: BlockId) -> Option<U256>;
|
2016-01-27 12:31:54 +01:00
|
|
|
|
2016-02-08 10:58:08 +01:00
|
|
|
/// Get address code.
|
|
|
|
fn code(&self, address: &Address) -> Option<Bytes>;
|
|
|
|
|
2016-02-09 13:17:44 +01:00
|
|
|
/// Get transaction with given hash.
|
2016-02-10 11:28:40 +01:00
|
|
|
fn transaction(&self, id: TransactionId) -> Option<LocalizedTransaction>;
|
2016-02-09 13:17:44 +01:00
|
|
|
|
2016-01-07 21:35:06 +01:00
|
|
|
/// Get a tree route between `from` and `to`.
|
|
|
|
/// See `BlockChain::tree_route`.
|
2016-01-10 23:37:09 +01:00
|
|
|
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute>;
|
2016-01-07 21:35:06 +01:00
|
|
|
|
|
|
|
/// Get latest state node
|
|
|
|
fn state_data(&self, hash: &H256) -> Option<Bytes>;
|
|
|
|
|
|
|
|
/// Get raw block receipts data by block header hash.
|
|
|
|
fn block_receipts(&self, hash: &H256) -> Option<Bytes>;
|
|
|
|
|
|
|
|
/// Import a block into the blockchain.
|
2016-01-21 23:33:52 +01:00
|
|
|
fn import_block(&self, bytes: Bytes) -> ImportResult;
|
2016-01-07 21:35:06 +01:00
|
|
|
|
|
|
|
/// Get block queue information.
|
2016-01-22 04:54:38 +01:00
|
|
|
fn queue_info(&self) -> BlockQueueInfo;
|
2016-01-07 21:35:06 +01:00
|
|
|
|
2016-01-09 10:16:35 +01:00
|
|
|
/// Clear block queue and abort all import activity.
|
2016-01-21 23:33:52 +01:00
|
|
|
fn clear_queue(&self);
|
2016-01-07 21:35:06 +01:00
|
|
|
|
|
|
|
/// Get blockchain information.
|
|
|
|
fn chain_info(&self) -> BlockChainInfo;
|
2016-01-26 15:00:22 +01:00
|
|
|
|
|
|
|
/// Get the best block header.
|
|
|
|
fn best_block_header(&self) -> Bytes {
|
2016-02-10 19:29:27 +01:00
|
|
|
self.block_header(BlockId::Hash(self.chain_info().best_block_hash)).unwrap()
|
2016-01-26 15:00:22 +01:00
|
|
|
}
|
2016-02-13 13:05:28 +01:00
|
|
|
|
|
|
|
/// Returns numbers of blocks containing given bloom.
|
|
|
|
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option<Vec<BlockNumber>>;
|
2016-02-17 12:35:37 +01:00
|
|
|
|
|
|
|
/// Returns logs matching given filter.
|
|
|
|
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry>;
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
|
|
|
|
2016-01-18 23:23:32 +01:00
|
|
|
#[derive(Default, Clone, Debug, Eq, PartialEq)]
|
2016-02-02 23:43:29 +01:00
|
|
|
/// Report on the status of a client.
|
2016-01-18 23:23:32 +01:00
|
|
|
pub struct ClientReport {
|
2016-02-02 23:43:29 +01:00
|
|
|
/// How many blocks have been imported so far.
|
2016-01-18 23:23:32 +01:00
|
|
|
pub blocks_imported: usize,
|
2016-02-02 23:43:29 +01:00
|
|
|
/// How many transactions have been applied so far.
|
2016-01-18 23:23:32 +01:00
|
|
|
pub transactions_applied: usize,
|
2016-02-02 23:43:29 +01:00
|
|
|
/// How much gas has been processed so far.
|
2016-01-18 23:23:32 +01:00
|
|
|
pub gas_processed: U256,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ClientReport {
|
2016-02-02 23:43:29 +01:00
|
|
|
/// Alter internal reporting to reflect the additional `block` has been processed.
|
2016-01-18 23:23:32 +01:00
|
|
|
pub fn accrue_block(&mut self, block: &PreVerifiedBlock) {
|
|
|
|
self.blocks_imported += 1;
|
|
|
|
self.transactions_applied += block.transactions.len();
|
2016-02-19 00:06:06 +01:00
|
|
|
self.gas_processed = self.gas_processed + block.header.gas_used;
|
2016-01-18 23:23:32 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-07 21:35:06 +01:00
|
|
|
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
|
2016-01-25 18:56:36 +01:00
|
|
|
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
|
2016-01-07 21:35:06 +01:00
|
|
|
pub struct Client {
|
2016-01-11 13:42:32 +01:00
|
|
|
chain: Arc<RwLock<BlockChain>>,
|
2016-01-14 01:28:37 +01:00
|
|
|
engine: Arc<Box<Engine>>,
|
2016-02-07 23:07:36 +01:00
|
|
|
state_db: Mutex<JournalDB>,
|
2016-01-21 23:33:52 +01:00
|
|
|
block_queue: RwLock<BlockQueue>,
|
|
|
|
report: RwLock<ClientReport>,
|
2016-02-10 12:50:27 +01:00
|
|
|
import_lock: Mutex<()>,
|
2016-02-10 15:28:43 +01:00
|
|
|
panic_handler: Arc<PanicHandler>,
|
2016-02-29 14:57:41 +01:00
|
|
|
|
|
|
|
// for sealing...
|
|
|
|
_sealing_block: Mutex<Option<ClosedBlock>>,
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
|
|
|
|
2016-02-04 21:33:30 +01:00
|
|
|
const HISTORY: u64 = 1000;
|
2016-02-26 13:16:22 +01:00
|
|
|
const CLIENT_DB_VER_STR: &'static str = "4.0";
|
2016-01-18 13:54:46 +01:00
|
|
|
|
2016-01-07 21:35:06 +01:00
|
|
|
impl Client {
|
2016-01-15 01:03:29 +01:00
|
|
|
/// Create a new client with given spec and DB path.
|
2016-02-25 14:09:39 +01:00
|
|
|
pub fn new(config: ClientConfig, spec: Spec, path: &Path, message_channel: IoChannel<NetSyncMessage> ) -> Result<Arc<Client>, Error> {
|
2016-02-05 15:08:18 +01:00
|
|
|
let mut dir = path.to_path_buf();
|
|
|
|
dir.push(H64::from(spec.genesis_header().hash()).hex());
|
|
|
|
//TODO: sec/fat: pruned/full versioning
|
|
|
|
dir.push(format!("v{}-sec-pruned", CLIENT_DB_VER_STR));
|
|
|
|
let path = dir.as_path();
|
2016-01-26 15:00:22 +01:00
|
|
|
let gb = spec.genesis_block();
|
2016-02-25 14:09:39 +01:00
|
|
|
let chain = Arc::new(RwLock::new(BlockChain::new(config.blockchain, &gb, path)));
|
2016-01-14 01:28:37 +01:00
|
|
|
let mut state_path = path.to_path_buf();
|
|
|
|
state_path.push("state");
|
2016-02-10 12:50:27 +01:00
|
|
|
|
2016-01-16 11:52:28 +01:00
|
|
|
let engine = Arc::new(try!(spec.to_engine()));
|
2016-02-18 03:46:24 +01:00
|
|
|
let mut state_db = JournalDB::new(state_path.to_str().unwrap());
|
2016-02-05 01:49:06 +01:00
|
|
|
if state_db.is_empty() && engine.spec().ensure_db_good(&mut state_db) {
|
|
|
|
state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
|
2016-01-18 13:54:46 +01:00
|
|
|
}
|
2016-02-10 12:50:27 +01:00
|
|
|
|
2016-02-25 14:09:39 +01:00
|
|
|
let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel);
|
2016-02-10 16:35:52 +01:00
|
|
|
let panic_handler = PanicHandler::new_in_arc();
|
|
|
|
panic_handler.forward_from(&block_queue);
|
2016-02-10 12:50:27 +01:00
|
|
|
|
2016-01-22 04:57:02 +01:00
|
|
|
Ok(Arc::new(Client {
|
2016-01-16 11:52:28 +01:00
|
|
|
chain: chain,
|
2016-02-10 12:50:27 +01:00
|
|
|
engine: engine,
|
2016-02-07 23:07:36 +01:00
|
|
|
state_db: Mutex::new(state_db),
|
2016-02-10 12:50:27 +01:00
|
|
|
block_queue: RwLock::new(block_queue),
|
2016-01-21 23:33:52 +01:00
|
|
|
report: RwLock::new(Default::default()),
|
|
|
|
import_lock: Mutex::new(()),
|
2016-02-29 14:57:41 +01:00
|
|
|
panic_handler: panic_handler,
|
|
|
|
_sealing_block: Mutex::new(None),
|
2016-01-22 04:57:02 +01:00
|
|
|
}))
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
2016-01-13 23:15:44 +01:00
|
|
|
|
2016-01-25 18:56:36 +01:00
|
|
|
/// Flush the block import queue.
|
|
|
|
pub fn flush_queue(&self) {
|
2016-01-25 19:20:34 +01:00
|
|
|
self.block_queue.write().unwrap().flush();
|
2016-01-25 18:56:36 +01:00
|
|
|
}
|
|
|
|
|
2016-02-29 14:57:41 +01:00
|
|
|
fn build_last_hashes(&self, parent_hash: H256) -> LastHashes {
|
2016-02-23 18:44:13 +01:00
|
|
|
let mut last_hashes = LastHashes::new();
|
|
|
|
last_hashes.resize(256, H256::new());
|
2016-02-29 14:57:41 +01:00
|
|
|
last_hashes[0] = parent_hash;
|
2016-02-23 18:44:13 +01:00
|
|
|
let chain = self.chain.read().unwrap();
|
|
|
|
for i in 0..255 {
|
|
|
|
match chain.block_details(&last_hashes[i]) {
|
|
|
|
Some(details) => {
|
|
|
|
last_hashes[i + 1] = details.parent.clone();
|
|
|
|
},
|
|
|
|
None => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
last_hashes
|
|
|
|
}
|
|
|
|
|
2016-02-24 10:55:34 +01:00
|
|
|
fn check_and_close_block(&self, block: &PreVerifiedBlock) -> Result<ClosedBlock, ()> {
|
|
|
|
let engine = self.engine.deref().deref();
|
|
|
|
let header = &block.header;
|
|
|
|
|
|
|
|
// Verify Block Family
|
|
|
|
let verify_family_result = verify_block_family(&header, &block.bytes, engine, self.chain.read().unwrap().deref());
|
|
|
|
if let Err(e) = verify_family_result {
|
|
|
|
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
|
|
|
return Err(());
|
|
|
|
};
|
|
|
|
|
|
|
|
// Check if Parent is in chain
|
|
|
|
let chain_has_parent = self.chain.read().unwrap().block_header(&header.parent_hash);
|
|
|
|
if let None = chain_has_parent {
|
|
|
|
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash);
|
|
|
|
return Err(());
|
|
|
|
};
|
|
|
|
|
|
|
|
// Enact Verified Block
|
|
|
|
let parent = chain_has_parent.unwrap();
|
2016-02-29 14:57:41 +01:00
|
|
|
let last_hashes = self.build_last_hashes(header.parent_hash.clone());
|
2016-02-24 10:55:34 +01:00
|
|
|
let db = self.state_db.lock().unwrap().clone();
|
|
|
|
|
|
|
|
let enact_result = enact_verified(&block, engine, db, &parent, last_hashes);
|
|
|
|
if let Err(e) = enact_result {
|
|
|
|
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
|
|
|
return Err(());
|
|
|
|
};
|
|
|
|
|
|
|
|
// Final Verification
|
|
|
|
let closed_block = enact_result.unwrap();
|
|
|
|
if let Err(e) = verify_block_final(&header, closed_block.block().header()) {
|
|
|
|
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
|
|
|
return Err(());
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(closed_block)
|
|
|
|
}
|
|
|
|
|
2016-01-15 01:03:29 +01:00
|
|
|
/// This is triggered by a message coming from a block queue when the block is ready for insertion
|
2016-02-06 23:15:53 +01:00
|
|
|
pub fn import_verified_blocks(&self, io: &IoChannel<NetSyncMessage>) -> usize {
|
2016-02-23 18:44:13 +01:00
|
|
|
let max_blocks_to_import = 128;
|
|
|
|
|
|
|
|
let mut good_blocks = Vec::with_capacity(max_blocks_to_import);
|
|
|
|
let mut bad_blocks = HashSet::new();
|
|
|
|
|
2016-01-21 23:33:52 +01:00
|
|
|
let _import_lock = self.import_lock.lock();
|
2016-02-23 18:44:13 +01:00
|
|
|
let blocks = self.block_queue.write().unwrap().drain(max_blocks_to_import);
|
|
|
|
|
2016-02-29 14:57:41 +01:00
|
|
|
let original_best = self.chain_info().best_block_hash;
|
|
|
|
|
2016-01-25 23:24:51 +01:00
|
|
|
for block in blocks {
|
2016-02-24 11:17:25 +01:00
|
|
|
let header = &block.header;
|
2016-02-23 18:44:13 +01:00
|
|
|
|
2016-02-24 10:55:34 +01:00
|
|
|
if bad_blocks.contains(&header.parent_hash) {
|
|
|
|
bad_blocks.insert(header.hash());
|
2016-01-17 23:07:58 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-02-24 10:55:34 +01:00
|
|
|
let closed_block = self.check_and_close_block(&block);
|
|
|
|
if let Err(_) = closed_block {
|
|
|
|
bad_blocks.insert(header.hash());
|
2016-01-27 13:28:15 +01:00
|
|
|
break;
|
2016-01-14 19:03:48 +01:00
|
|
|
}
|
2016-01-14 01:28:37 +01:00
|
|
|
|
2016-02-23 18:44:13 +01:00
|
|
|
// Insert block
|
2016-02-24 10:55:34 +01:00
|
|
|
let closed_block = closed_block.unwrap();
|
2016-02-26 19:43:06 +01:00
|
|
|
self.chain.write().unwrap().insert_block(&block.bytes, closed_block.block().receipts().clone());
|
2016-02-23 18:44:13 +01:00
|
|
|
good_blocks.push(header.hash());
|
|
|
|
|
|
|
|
let ancient = if header.number() >= HISTORY {
|
|
|
|
let n = header.number() - HISTORY;
|
|
|
|
let chain = self.chain.read().unwrap();
|
|
|
|
Some((n, chain.block_hash(n).unwrap()))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
// Commit results
|
2016-02-24 10:55:34 +01:00
|
|
|
closed_block.drain()
|
|
|
|
.commit(header.number(), &header.hash(), ancient)
|
|
|
|
.expect("State DB commit failed.");
|
2016-02-23 18:44:13 +01:00
|
|
|
|
2016-01-21 23:33:52 +01:00
|
|
|
self.report.write().unwrap().accrue_block(&block);
|
2016-01-18 23:23:32 +01:00
|
|
|
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
|
2016-01-14 01:28:37 +01:00
|
|
|
}
|
2016-02-23 18:44:13 +01:00
|
|
|
|
2016-02-24 10:55:34 +01:00
|
|
|
let imported = good_blocks.len();
|
|
|
|
let bad_blocks = bad_blocks.into_iter().collect::<Vec<H256>>();
|
|
|
|
|
|
|
|
{
|
2016-02-24 11:17:25 +01:00
|
|
|
let mut block_queue = self.block_queue.write().unwrap();
|
2016-02-24 10:55:34 +01:00
|
|
|
block_queue.mark_as_bad(&bad_blocks);
|
|
|
|
block_queue.mark_as_good(&good_blocks);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
let block_queue = self.block_queue.read().unwrap();
|
|
|
|
if !good_blocks.is_empty() && block_queue.queue_info().is_empty() {
|
|
|
|
io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
|
|
|
|
good: good_blocks,
|
|
|
|
bad: bad_blocks,
|
|
|
|
})).unwrap();
|
|
|
|
}
|
2016-02-11 22:14:06 +01:00
|
|
|
}
|
2016-02-24 10:55:34 +01:00
|
|
|
|
2016-02-29 14:57:41 +01:00
|
|
|
if self.chain_info().best_block_hash != original_best {
|
|
|
|
self.new_chain_head();
|
|
|
|
}
|
|
|
|
|
2016-02-23 18:44:13 +01:00
|
|
|
imported
|
2016-01-13 23:15:44 +01:00
|
|
|
}
|
2016-01-18 19:23:28 +01:00
|
|
|
|
2016-01-26 15:00:22 +01:00
|
|
|
/// Get a copy of the best block's state.
|
|
|
|
pub fn state(&self) -> State {
|
2016-02-07 23:07:36 +01:00
|
|
|
State::from_existing(self.state_db.lock().unwrap().clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce())
|
2016-01-26 15:00:22 +01:00
|
|
|
}
|
|
|
|
|
2016-01-18 19:23:28 +01:00
|
|
|
/// Get info on the cache.
|
2016-02-25 14:09:39 +01:00
|
|
|
pub fn blockchain_cache_info(&self) -> BlockChainCacheSize {
|
2016-01-18 19:23:28 +01:00
|
|
|
self.chain.read().unwrap().cache_size()
|
|
|
|
}
|
|
|
|
|
2016-01-18 23:23:32 +01:00
|
|
|
/// Get the report.
|
|
|
|
pub fn report(&self) -> ClientReport {
|
2016-01-21 23:33:52 +01:00
|
|
|
self.report.read().unwrap().clone()
|
2016-01-18 23:23:32 +01:00
|
|
|
}
|
|
|
|
|
2016-01-18 19:23:28 +01:00
|
|
|
/// Tick the client.
|
|
|
|
pub fn tick(&self) {
|
2016-02-02 01:59:14 +01:00
|
|
|
self.chain.read().unwrap().collect_garbage();
|
2016-02-25 14:09:39 +01:00
|
|
|
self.block_queue.read().unwrap().collect_garbage();
|
2016-02-02 01:59:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Set up the cache behaviour.
|
|
|
|
pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) {
|
|
|
|
self.chain.write().unwrap().configure_cache(pref_cache_size, max_cache_size);
|
2016-01-18 19:23:28 +01:00
|
|
|
}
|
2016-02-10 19:29:27 +01:00
|
|
|
|
2016-02-11 21:10:41 +01:00
|
|
|
fn block_hash(chain: &BlockChain, id: BlockId) -> Option<H256> {
|
2016-02-10 19:29:27 +01:00
|
|
|
match id {
|
|
|
|
BlockId::Hash(hash) => Some(hash),
|
2016-02-11 21:10:41 +01:00
|
|
|
BlockId::Number(number) => chain.block_hash(number),
|
|
|
|
BlockId::Earliest => chain.block_hash(0),
|
|
|
|
BlockId::Latest => Some(chain.best_block_hash())
|
2016-02-10 19:29:27 +01:00
|
|
|
}
|
|
|
|
}
|
2016-02-13 13:05:28 +01:00
|
|
|
|
|
|
|
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
|
|
|
match id {
|
|
|
|
BlockId::Number(number) => Some(number),
|
|
|
|
BlockId::Hash(ref hash) => self.chain.read().unwrap().block_number(hash),
|
|
|
|
BlockId::Earliest => Some(0),
|
|
|
|
BlockId::Latest => Some(self.chain.read().unwrap().best_block_number())
|
|
|
|
}
|
|
|
|
}
|
2016-02-29 14:57:41 +01:00
|
|
|
|
|
|
|
/// New chain head event.
|
|
|
|
pub fn new_chain_head(&self) {
|
|
|
|
let h = self.chain.read().unwrap().best_block_hash();
|
|
|
|
info!("NEW CHAIN HEAD: #{}: {}", self.chain.read().unwrap().best_block_number(), h);
|
|
|
|
|
|
|
|
info!("Preparing to seal.");
|
|
|
|
let b = OpenBlock::new(
|
2016-02-29 15:30:08 +01:00
|
|
|
self.engine.deref().deref(),
|
|
|
|
self.state_db.lock().unwrap().clone(),
|
|
|
|
match self.chain.read().unwrap().block_header(&h) { Some(ref x) => x, None => {return;} },
|
2016-02-29 14:57:41 +01:00
|
|
|
self.build_last_hashes(h.clone()),
|
|
|
|
x!("0037a6b811ffeb6e072da21179d11b1406371c63"),
|
2016-02-29 15:30:08 +01:00
|
|
|
b"Parity".to_vec()
|
2016-02-29 14:57:41 +01:00
|
|
|
);
|
|
|
|
let b = b.close();
|
2016-02-29 15:30:08 +01:00
|
|
|
info!("Sealed: hash={}, diff={}, number={}", b.hash(), b.block().header().difficulty(), b.block().header().number());
|
2016-02-29 14:57:41 +01:00
|
|
|
*self._sealing_block.lock().unwrap() = Some(b);
|
|
|
|
}
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
|
|
|
|
2016-02-29 14:57:41 +01:00
|
|
|
// TODO: need MinerService MinerIoHandler
|
|
|
|
|
2016-01-07 21:35:06 +01:00
|
|
|
impl BlockChainClient for Client {
|
2016-02-10 19:29:27 +01:00
|
|
|
fn block_header(&self, id: BlockId) -> Option<Bytes> {
|
2016-02-11 21:10:41 +01:00
|
|
|
let chain = self.chain.read().unwrap();
|
|
|
|
Self::block_hash(&chain, id).and_then(|hash| chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec()))
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
|
|
|
|
2016-02-10 19:29:27 +01:00
|
|
|
fn block_body(&self, id: BlockId) -> Option<Bytes> {
|
2016-02-11 21:10:41 +01:00
|
|
|
let chain = self.chain.read().unwrap();
|
|
|
|
Self::block_hash(&chain, id).and_then(|hash| {
|
|
|
|
chain.block(&hash).map(|bytes| {
|
2016-02-10 19:29:27 +01:00
|
|
|
let rlp = Rlp::new(&bytes);
|
2016-02-11 10:20:15 +01:00
|
|
|
let mut body = RlpStream::new_list(2);
|
2016-02-10 19:29:27 +01:00
|
|
|
body.append_raw(rlp.at(1).as_raw(), 1);
|
|
|
|
body.append_raw(rlp.at(2).as_raw(), 1);
|
|
|
|
body.out()
|
|
|
|
})
|
2016-01-07 21:35:06 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-02-10 19:29:27 +01:00
|
|
|
fn block(&self, id: BlockId) -> Option<Bytes> {
|
2016-02-11 21:10:41 +01:00
|
|
|
let chain = self.chain.read().unwrap();
|
|
|
|
Self::block_hash(&chain, id).and_then(|hash| {
|
|
|
|
chain.block(&hash)
|
2016-02-10 19:29:27 +01:00
|
|
|
})
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
|
|
|
|
2016-02-10 19:29:27 +01:00
|
|
|
fn block_status(&self, id: BlockId) -> BlockStatus {
|
2016-02-11 21:10:41 +01:00
|
|
|
let chain = self.chain.read().unwrap();
|
|
|
|
match Self::block_hash(&chain, id) {
|
|
|
|
Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain,
|
2016-02-10 19:29:27 +01:00
|
|
|
Some(hash) => self.block_queue.read().unwrap().block_status(&hash),
|
|
|
|
None => BlockStatus::Unknown
|
2016-02-02 12:12:32 +01:00
|
|
|
}
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
2016-02-23 18:44:13 +01:00
|
|
|
|
2016-02-10 19:29:27 +01:00
|
|
|
fn block_total_difficulty(&self, id: BlockId) -> Option<U256> {
|
2016-02-11 21:10:41 +01:00
|
|
|
let chain = self.chain.read().unwrap();
|
|
|
|
Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty)
|
2016-01-27 12:31:54 +01:00
|
|
|
}
|
2016-01-07 21:35:06 +01:00
|
|
|
|
2016-02-08 10:58:08 +01:00
|
|
|
fn code(&self, address: &Address) -> Option<Bytes> {
|
|
|
|
self.state().code(address)
|
|
|
|
}
|
|
|
|
|
2016-02-10 11:28:40 +01:00
|
|
|
fn transaction(&self, id: TransactionId) -> Option<LocalizedTransaction> {
|
2016-02-11 21:10:41 +01:00
|
|
|
let chain = self.chain.read().unwrap();
|
2016-02-10 19:29:27 +01:00
|
|
|
match id {
|
2016-02-11 21:10:41 +01:00
|
|
|
TransactionId::Hash(ref hash) => chain.transaction_address(hash),
|
|
|
|
TransactionId::Location(id, index) => Self::block_hash(&chain, id).map(|hash| TransactionAddress {
|
2016-02-10 19:29:27 +01:00
|
|
|
block_hash: hash,
|
|
|
|
index: index
|
|
|
|
})
|
2016-02-11 21:10:41 +01:00
|
|
|
}.and_then(|address| chain.transaction(&address))
|
2016-02-09 13:17:44 +01:00
|
|
|
}
|
|
|
|
|
2016-01-10 23:37:09 +01:00
|
|
|
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
|
2016-02-27 01:37:12 +01:00
|
|
|
let chain = self.chain.read().unwrap();
|
|
|
|
match chain.is_known(from) && chain.is_known(to) {
|
|
|
|
true => Some(chain.tree_route(from.clone(), to.clone())),
|
|
|
|
false => None
|
|
|
|
}
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn state_data(&self, _hash: &H256) -> Option<Bytes> {
|
2016-02-16 01:13:13 +01:00
|
|
|
None
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn block_receipts(&self, _hash: &H256) -> Option<Bytes> {
|
2016-02-16 01:13:13 +01:00
|
|
|
None
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
|
|
|
|
2016-01-21 23:33:52 +01:00
|
|
|
fn import_block(&self, bytes: Bytes) -> ImportResult {
|
2016-01-17 23:07:58 +01:00
|
|
|
let header = BlockView::new(&bytes).header();
|
2016-01-14 01:28:37 +01:00
|
|
|
if self.chain.read().unwrap().is_known(&header.hash()) {
|
|
|
|
return Err(ImportError::AlreadyInChain);
|
|
|
|
}
|
2016-02-10 19:29:27 +01:00
|
|
|
if self.block_status(BlockId::Hash(header.parent_hash)) == BlockStatus::Unknown {
|
2016-02-02 12:12:32 +01:00
|
|
|
return Err(ImportError::UnknownParent);
|
|
|
|
}
|
2016-01-21 23:33:52 +01:00
|
|
|
self.block_queue.write().unwrap().import_block(bytes)
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
|
|
|
|
2016-01-22 04:54:38 +01:00
|
|
|
fn queue_info(&self) -> BlockQueueInfo {
|
|
|
|
self.block_queue.read().unwrap().queue_info()
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
|
|
|
|
2016-01-21 23:33:52 +01:00
|
|
|
fn clear_queue(&self) {
|
|
|
|
self.block_queue.write().unwrap().clear();
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn chain_info(&self) -> BlockChainInfo {
|
2016-01-11 13:42:32 +01:00
|
|
|
let chain = self.chain.read().unwrap();
|
2016-01-07 21:35:06 +01:00
|
|
|
BlockChainInfo {
|
2016-01-11 13:42:32 +01:00
|
|
|
total_difficulty: chain.best_block_total_difficulty(),
|
|
|
|
pending_total_difficulty: chain.best_block_total_difficulty(),
|
|
|
|
genesis_hash: chain.genesis_hash(),
|
|
|
|
best_block_hash: chain.best_block_hash(),
|
|
|
|
best_block_number: From::from(chain.best_block_number())
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
|
|
|
}
|
2016-02-13 13:05:28 +01:00
|
|
|
|
|
|
|
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option<Vec<BlockNumber>> {
|
|
|
|
match (self.block_number(from_block), self.block_number(to_block)) {
|
|
|
|
(Some(from), Some(to)) => Some(self.chain.read().unwrap().blocks_with_bloom(bloom, from, to)),
|
|
|
|
_ => None
|
|
|
|
}
|
|
|
|
}
|
2016-02-17 12:35:37 +01:00
|
|
|
|
|
|
|
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
|
|
|
|
let mut blocks = filter.bloom_possibilities().iter()
|
2016-02-24 10:23:25 +01:00
|
|
|
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
|
2016-02-17 12:35:37 +01:00
|
|
|
.flat_map(|m| m)
|
|
|
|
// remove duplicate elements
|
|
|
|
.collect::<HashSet<u64>>()
|
|
|
|
.into_iter()
|
|
|
|
.collect::<Vec<u64>>();
|
|
|
|
|
|
|
|
blocks.sort();
|
|
|
|
|
|
|
|
blocks.into_iter()
|
2016-02-24 10:23:25 +01:00
|
|
|
.filter_map(|number| self.chain.read().unwrap().block_hash(number).map(|hash| (number, hash)))
|
|
|
|
.filter_map(|(number, hash)| self.chain.read().unwrap().block_receipts(&hash).map(|r| (number, hash, r.receipts)))
|
|
|
|
.filter_map(|(number, hash, receipts)| self.chain.read().unwrap().block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes())))
|
|
|
|
.flat_map(|(number, hash, receipts, hashes)| {
|
2016-02-17 12:35:37 +01:00
|
|
|
let mut log_index = 0;
|
|
|
|
receipts.into_iter()
|
|
|
|
.enumerate()
|
2016-02-24 10:23:25 +01:00
|
|
|
.flat_map(|(index, receipt)| {
|
2016-02-17 12:35:37 +01:00
|
|
|
log_index += receipt.logs.len();
|
|
|
|
receipt.logs.into_iter()
|
|
|
|
.enumerate()
|
|
|
|
.filter(|tuple| filter.matches(&tuple.1))
|
|
|
|
.map(|(i, log)| LocalizedLogEntry {
|
|
|
|
entry: log,
|
|
|
|
block_hash: hash.clone(),
|
|
|
|
block_number: number as usize,
|
2016-02-17 14:57:54 +01:00
|
|
|
transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::new),
|
2016-02-17 12:35:37 +01:00
|
|
|
transaction_index: index,
|
2016-02-17 14:13:51 +01:00
|
|
|
log_index: log_index + i
|
2016-02-17 12:35:37 +01:00
|
|
|
})
|
|
|
|
.collect::<Vec<LocalizedLogEntry>>()
|
|
|
|
})
|
|
|
|
.collect::<Vec<LocalizedLogEntry>>()
|
2016-02-26 19:43:06 +01:00
|
|
|
|
2016-02-17 12:35:37 +01:00
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
2016-01-07 21:35:06 +01:00
|
|
|
}
|
2016-02-10 12:50:27 +01:00
|
|
|
|
2016-02-10 15:28:43 +01:00
|
|
|
impl MayPanic for Client {
|
|
|
|
fn on_panic<F>(&self, closure: F) where F: OnPanicListener {
|
2016-02-10 12:50:27 +01:00
|
|
|
self.panic_handler.on_panic(closure);
|
|
|
|
}
|
|
|
|
}
|