openethereum/src/client.rs

393 lines
12 KiB
Rust
Raw Normal View History

use util::*;
2016-01-16 11:52:28 +01:00
use rocksdb::{Options, DB};
2016-01-18 19:23:28 +01:00
use blockchain::{BlockChain, BlockProvider, CacheSize};
2016-01-07 21:35:06 +01:00
use views::BlockView;
use error::*;
use header::BlockNumber;
use spec::Spec;
2016-01-11 11:51:31 +01:00
use engine::Engine;
2016-01-21 23:33:52 +01:00
use block_queue::BlockQueue;
use db_queue::{DbQueue, StateDBCommit};
use service::NetSyncMessage;
2016-01-14 01:28:37 +01:00
use env_info::LastHashes;
use verification::*;
use block::*;
2016-01-07 21:35:06 +01:00
/// General block status
2016-01-16 13:30:27 +01:00
#[derive(Debug)]
2016-01-07 21:35:06 +01:00
pub enum BlockStatus {
/// Part of the blockchain.
InChain,
/// Queued for import.
2016-01-10 23:37:09 +01:00
Queued,
2016-01-07 21:35:06 +01:00
/// Known as bad.
Bad,
/// Unknown.
Unknown,
}
/// Information about the blockchain gthered together.
2016-01-16 13:30:27 +01:00
#[derive(Debug)]
2016-01-07 21:35:06 +01:00
pub struct BlockChainInfo {
/// Blockchain difficulty.
pub total_difficulty: U256,
/// Block queue difficulty.
pub pending_total_difficulty: U256,
/// Genesis block hash.
pub genesis_hash: H256,
/// Best blockchain block hash.
pub best_block_hash: H256,
/// Best blockchain block number.
pub best_block_number: BlockNumber
}
2016-01-18 19:23:28 +01:00
impl fmt::Display for BlockChainInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#{}.{}", self.best_block_number, self.best_block_hash)
}
}
2016-01-07 21:35:06 +01:00
/// Block queue status
2016-01-16 13:30:27 +01:00
#[derive(Debug)]
2016-01-07 21:35:06 +01:00
pub struct BlockQueueStatus {
2016-01-19 17:02:01 +01:00
/// TODO [arkpar] Please document me
2016-01-07 21:35:06 +01:00
pub full: bool,
}
2016-01-19 17:02:01 +01:00
/// TODO [arkpar] Please document me
2016-01-07 21:35:06 +01:00
pub type TreeRoute = ::blockchain::TreeRoute;
/// Blockchain database client. Owns and manages a blockchain and a block queue.
2016-01-14 19:03:48 +01:00
pub trait BlockChainClient : Sync + Send {
2016-01-07 21:35:06 +01:00
/// Get raw block header data by block header hash.
fn block_header(&self, hash: &H256) -> Option<Bytes>;
/// Get raw block body data by block header hash.
/// Block body is an RLP list of two items: uncles and transactions.
fn block_body(&self, hash: &H256) -> Option<Bytes>;
/// Get raw block data by block header hash.
fn block(&self, hash: &H256) -> Option<Bytes>;
/// Get block status by block header hash.
fn block_status(&self, hash: &H256) -> BlockStatus;
/// Get raw block header data by block number.
fn block_header_at(&self, n: BlockNumber) -> Option<Bytes>;
/// Get raw block body data by block number.
/// Block body is an RLP list of two items: uncles and transactions.
fn block_body_at(&self, n: BlockNumber) -> Option<Bytes>;
/// Get raw block data by block number.
fn block_at(&self, n: BlockNumber) -> Option<Bytes>;
/// Get block status by block number.
fn block_status_at(&self, n: BlockNumber) -> BlockStatus;
/// Get a tree route between `from` and `to`.
/// See `BlockChain::tree_route`.
2016-01-10 23:37:09 +01:00
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute>;
2016-01-07 21:35:06 +01:00
/// Get latest state node
fn state_data(&self, hash: &H256) -> Option<Bytes>;
/// Get raw block receipts data by block header hash.
fn block_receipts(&self, hash: &H256) -> Option<Bytes>;
/// Import a block into the blockchain.
2016-01-21 23:33:52 +01:00
fn import_block(&self, bytes: Bytes) -> ImportResult;
2016-01-07 21:35:06 +01:00
/// Get block queue information.
fn queue_status(&self) -> BlockQueueStatus;
2016-01-09 10:16:35 +01:00
/// Clear block queue and abort all import activity.
2016-01-21 23:33:52 +01:00
fn clear_queue(&self);
2016-01-07 21:35:06 +01:00
/// Get blockchain information.
fn chain_info(&self) -> BlockChainInfo;
}
2016-01-18 23:23:32 +01:00
#[derive(Default, Clone, Debug, Eq, PartialEq)]
2016-01-19 17:02:01 +01:00
/// TODO [Gav Wood] Please document me
2016-01-18 23:23:32 +01:00
pub struct ClientReport {
2016-01-19 17:02:01 +01:00
/// TODO [Gav Wood] Please document me
2016-01-18 23:23:32 +01:00
pub blocks_imported: usize,
2016-01-19 17:02:01 +01:00
/// TODO [Gav Wood] Please document me
2016-01-18 23:23:32 +01:00
pub transactions_applied: usize,
2016-01-19 17:02:01 +01:00
/// TODO [Gav Wood] Please document me
2016-01-18 23:23:32 +01:00
pub gas_processed: U256,
}
impl ClientReport {
2016-01-19 17:02:01 +01:00
/// TODO [Gav Wood] Please document me
2016-01-18 23:23:32 +01:00
pub fn accrue_block(&mut self, block: &PreVerifiedBlock) {
self.blocks_imported += 1;
self.transactions_applied += block.transactions.len();
self.gas_processed += block.header.gas_used;
}
}
2016-01-07 21:35:06 +01:00
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
pub struct Client {
chain: Arc<RwLock<BlockChain>>,
2016-01-14 01:28:37 +01:00
engine: Arc<Box<Engine>>,
2016-01-18 13:54:46 +01:00
state_db: JournalDB,
2016-01-21 23:33:52 +01:00
block_queue: RwLock<BlockQueue>,
db_queue: RwLock<DbQueue>,
report: RwLock<ClientReport>,
uncommited_states: RwLock<HashMap<H256, JournalDB>>,
import_lock: Mutex<()>
2016-01-07 21:35:06 +01:00
}
2016-01-18 13:54:46 +01:00
const HISTORY: u64 = 1000;
2016-01-07 21:35:06 +01:00
impl Client {
2016-01-15 01:03:29 +01:00
/// Create a new client with given spec and DB path.
2016-01-21 23:33:52 +01:00
pub fn new(spec: Spec, path: &Path, message_channel: IoChannel<NetSyncMessage> ) -> Result<Arc<Client>, Error> {
let chain = Arc::new(RwLock::new(BlockChain::new(&spec.genesis_block(), path)));
2016-01-16 11:52:28 +01:00
let mut opts = Options::new();
2016-01-21 16:48:37 +01:00
opts.set_max_open_files(256);
2016-01-16 11:52:28 +01:00
opts.create_if_missing(true);
2016-01-21 23:33:52 +01:00
opts.set_disable_data_sync(true);
opts.set_disable_auto_compactions(true);
2016-01-18 14:44:06 +01:00
/*opts.set_use_fsync(false);
2016-01-16 11:52:28 +01:00
opts.set_bytes_per_sync(8388608);
opts.set_disable_data_sync(false);
opts.set_block_cache_size_mb(1024);
opts.set_table_cache_num_shard_bits(6);
opts.set_max_write_buffer_number(32);
opts.set_write_buffer_size(536870912);
opts.set_target_file_size_base(1073741824);
opts.set_min_write_buffer_number_to_merge(4);
opts.set_level_zero_stop_writes_trigger(2000);
opts.set_level_zero_slowdown_writes_trigger(0);
opts.set_compaction_style(DBUniversalCompaction);
opts.set_max_background_compactions(4);
opts.set_max_background_flushes(4);
opts.set_filter_deletes(false);
2016-01-18 14:44:06 +01:00
opts.set_disable_auto_compactions(false);*/
2016-01-16 11:52:28 +01:00
2016-01-14 01:28:37 +01:00
let mut state_path = path.to_path_buf();
state_path.push("state");
2016-01-21 23:33:52 +01:00
let db = Arc::new(DB::open(&opts, state_path.to_str().unwrap()).unwrap());
2016-01-16 11:52:28 +01:00
let engine = Arc::new(try!(spec.to_engine()));
2016-01-21 23:33:52 +01:00
{
let mut state_db = JournalDB::new_with_arc(db.clone());
if engine.spec().ensure_db_good(&mut state_db) {
state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
}
2016-01-18 13:54:46 +01:00
}
2016-01-21 23:33:52 +01:00
let state_db = JournalDB::new_with_arc(db);
2016-01-14 01:28:37 +01:00
2016-01-21 23:33:52 +01:00
let client = Arc::new(Client {
2016-01-16 11:52:28 +01:00
chain: chain,
2016-01-14 01:28:37 +01:00
engine: engine.clone(),
2016-01-14 19:03:48 +01:00
state_db: state_db,
2016-01-21 23:33:52 +01:00
block_queue: RwLock::new(BlockQueue::new(engine, message_channel)),
db_queue: RwLock::new(DbQueue::new()),
report: RwLock::new(Default::default()),
uncommited_states: RwLock::new(HashMap::new()),
import_lock: Mutex::new(()),
});
let weak = Arc::downgrade(&client);
client.db_queue.read().unwrap().start(weak);
Ok(client)
2016-01-07 21:35:06 +01:00
}
2016-01-13 23:15:44 +01:00
2016-01-15 01:03:29 +01:00
/// This is triggered by a message coming from a block queue when the block is ready for insertion
2016-01-21 23:33:52 +01:00
pub fn import_verified_blocks(&self, _io: &IoChannel<NetSyncMessage>) {
2016-01-17 23:07:58 +01:00
let mut bad = HashSet::new();
2016-01-21 23:33:52 +01:00
let _import_lock = self.import_lock.lock();
let blocks = self.block_queue.write().unwrap().drain(128);
2016-01-17 23:07:58 +01:00
if blocks.is_empty() {
2016-01-14 01:28:37 +01:00
return;
2016-01-17 23:07:58 +01:00
}
for block in blocks {
if bad.contains(&block.header.parent_hash) {
2016-01-21 23:33:52 +01:00
self.block_queue.write().unwrap().mark_as_bad(&block.header.hash());
2016-01-17 23:07:58 +01:00
bad.insert(block.header.hash());
continue;
}
let header = &block.header;
if let Err(e) = verify_block_family(&header, &block.bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) {
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
2016-01-21 23:33:52 +01:00
self.block_queue.write().unwrap().mark_as_bad(&header.hash());
2016-01-17 23:07:58 +01:00
bad.insert(block.header.hash());
2016-01-14 01:28:37 +01:00
return;
2016-01-17 23:07:58 +01:00
};
let parent = match self.chain.read().unwrap().block_header(&header.parent_hash) {
Some(p) => p,
None => {
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash);
2016-01-21 23:33:52 +01:00
self.block_queue.write().unwrap().mark_as_bad(&header.hash());
2016-01-17 23:07:58 +01:00
bad.insert(block.header.hash());
return;
2016-01-14 01:28:37 +01:00
},
2016-01-17 23:07:58 +01:00
};
// build last hashes
let mut last_hashes = LastHashes::new();
last_hashes.resize(256, H256::new());
last_hashes[0] = header.parent_hash.clone();
for i in 0..255 {
match self.chain.read().unwrap().block_details(&last_hashes[i]) {
Some(details) => {
last_hashes[i + 1] = details.parent.clone();
},
None => break,
}
2016-01-14 01:28:37 +01:00
}
2016-01-21 23:33:52 +01:00
let db = match self.uncommited_states.read().unwrap().get(&header.parent_hash) {
Some(db) => db.clone(),
None => self.state_db.clone(),
};
let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) {
2016-01-17 23:07:58 +01:00
Ok(b) => b,
Err(e) => {
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
bad.insert(block.header.hash());
2016-01-21 23:33:52 +01:00
self.block_queue.write().unwrap().mark_as_bad(&header.hash());
2016-01-17 23:07:58 +01:00
return;
}
};
if let Err(e) = verify_block_final(&header, result.block().header()) {
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
2016-01-21 23:33:52 +01:00
self.block_queue.write().unwrap().mark_as_bad(&header.hash());
2016-01-14 19:03:48 +01:00
return;
}
2016-01-14 01:28:37 +01:00
2016-01-17 23:07:58 +01:00
self.chain.write().unwrap().insert_block(&block.bytes); //TODO: err here?
2016-01-18 16:20:35 +01:00
let ancient = if header.number() >= HISTORY { Some(header.number() - HISTORY) } else { None };
match result.drain().commit(header.number(), &header.hash(), ancient.map(|n|(n, self.chain.read().unwrap().block_hash(n).unwrap()))) {
2016-01-17 23:07:58 +01:00
Ok(_) => (),
Err(e) => {
warn!(target: "client", "State DB commit failed: {:?}", e);
return;
}
2016-01-14 19:03:48 +01:00
}
2016-01-21 23:33:52 +01:00
/*
let db = result.drain();
self.uncommited_states.write().unwrap().insert(header.hash(), db.clone());
self.db_queue.write().unwrap().queue(StateDBCommit {
now: header.number(),
hash: header.hash().clone(),
end: ancient.map(|n|(n, self.chain.read().unwrap().block_hash(n).unwrap())),
db: db,
});*/
self.report.write().unwrap().accrue_block(&block);
2016-01-18 23:23:32 +01:00
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
2016-01-14 01:28:37 +01:00
}
2016-01-13 23:15:44 +01:00
}
2016-01-18 19:23:28 +01:00
2016-01-21 23:33:52 +01:00
/// Clear cached state overlay
pub fn clear_state(&self, hash: &H256) {
self.uncommited_states.write().unwrap().remove(hash);
}
2016-01-18 19:23:28 +01:00
/// Get info on the cache.
pub fn cache_info(&self) -> CacheSize {
self.chain.read().unwrap().cache_size()
}
2016-01-18 23:23:32 +01:00
/// Get the report.
pub fn report(&self) -> ClientReport {
2016-01-21 23:33:52 +01:00
self.report.read().unwrap().clone()
2016-01-18 23:23:32 +01:00
}
2016-01-18 19:23:28 +01:00
/// Tick the client.
pub fn tick(&self) {
self.chain.read().unwrap().collect_garbage(false);
}
2016-01-07 21:35:06 +01:00
}
impl BlockChainClient for Client {
fn block_header(&self, hash: &H256) -> Option<Bytes> {
self.chain.read().unwrap().block(hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())
2016-01-07 21:35:06 +01:00
}
fn block_body(&self, hash: &H256) -> Option<Bytes> {
self.chain.read().unwrap().block(hash).map(|bytes| {
2016-01-07 21:35:06 +01:00
let rlp = Rlp::new(&bytes);
let mut body = RlpStream::new();
2016-01-08 16:00:32 +01:00
body.append_raw(rlp.at(1).as_raw(), 1);
body.append_raw(rlp.at(2).as_raw(), 1);
2016-01-07 21:35:06 +01:00
body.out()
})
}
fn block(&self, hash: &H256) -> Option<Bytes> {
self.chain.read().unwrap().block(hash)
2016-01-07 21:35:06 +01:00
}
fn block_status(&self, hash: &H256) -> BlockStatus {
if self.chain.read().unwrap().is_known(&hash) { BlockStatus::InChain } else { BlockStatus::Unknown }
2016-01-07 21:35:06 +01:00
}
fn block_header_at(&self, n: BlockNumber) -> Option<Bytes> {
self.chain.read().unwrap().block_hash(n).and_then(|h| self.block_header(&h))
2016-01-07 21:35:06 +01:00
}
fn block_body_at(&self, n: BlockNumber) -> Option<Bytes> {
self.chain.read().unwrap().block_hash(n).and_then(|h| self.block_body(&h))
2016-01-07 21:35:06 +01:00
}
fn block_at(&self, n: BlockNumber) -> Option<Bytes> {
self.chain.read().unwrap().block_hash(n).and_then(|h| self.block(&h))
2016-01-07 21:35:06 +01:00
}
fn block_status_at(&self, n: BlockNumber) -> BlockStatus {
match self.chain.read().unwrap().block_hash(n) {
2016-01-07 21:35:06 +01:00
Some(h) => self.block_status(&h),
None => BlockStatus::Unknown
}
}
2016-01-10 23:37:09 +01:00
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
self.chain.read().unwrap().tree_route(from.clone(), to.clone())
2016-01-07 21:35:06 +01:00
}
fn state_data(&self, _hash: &H256) -> Option<Bytes> {
unimplemented!();
}
fn block_receipts(&self, _hash: &H256) -> Option<Bytes> {
unimplemented!();
}
2016-01-21 23:33:52 +01:00
fn import_block(&self, bytes: Bytes) -> ImportResult {
2016-01-17 23:07:58 +01:00
let header = BlockView::new(&bytes).header();
2016-01-14 01:28:37 +01:00
if self.chain.read().unwrap().is_known(&header.hash()) {
return Err(ImportError::AlreadyInChain);
}
2016-01-21 23:33:52 +01:00
self.block_queue.write().unwrap().import_block(bytes)
2016-01-07 21:35:06 +01:00
}
fn queue_status(&self) -> BlockQueueStatus {
BlockQueueStatus {
full: false
}
}
2016-01-21 23:33:52 +01:00
fn clear_queue(&self) {
self.block_queue.write().unwrap().clear();
2016-01-07 21:35:06 +01:00
}
fn chain_info(&self) -> BlockChainInfo {
let chain = self.chain.read().unwrap();
2016-01-07 21:35:06 +01:00
BlockChainInfo {
total_difficulty: chain.best_block_total_difficulty(),
pending_total_difficulty: chain.best_block_total_difficulty(),
genesis_hash: chain.genesis_hash(),
best_block_hash: chain.best_block_hash(),
best_block_number: From::from(chain.best_block_number())
2016-01-07 21:35:06 +01:00
}
}
}