2016-02-05 13:40:41 +01:00
|
|
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2016-02-02 15:29:53 +01:00
|
|
|
//! Blockchain database.
|
2015-12-17 17:20:10 +01:00
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
use bloomchain as bc;
|
2016-01-09 12:30:41 +01:00
|
|
|
use util::*;
|
2015-12-21 02:34:41 +01:00
|
|
|
use header::*;
|
2016-05-26 18:24:51 +02:00
|
|
|
use super::extras::*;
|
2015-12-14 17:12:47 +01:00
|
|
|
use transaction::*;
|
2015-12-17 02:13:14 +01:00
|
|
|
use views::*;
|
2016-02-11 14:35:03 +01:00
|
|
|
use receipt::Receipt;
|
2016-05-26 18:24:51 +02:00
|
|
|
use blooms::{Bloom, BloomGroup};
|
2016-04-17 17:18:25 +02:00
|
|
|
use blockchain::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData};
|
2016-02-27 01:37:12 +01:00
|
|
|
use blockchain::best_block::BestBlock;
|
2016-05-16 18:33:32 +02:00
|
|
|
use types::tree_route::TreeRoute;
|
2016-02-27 02:16:39 +01:00
|
|
|
use blockchain::update::ExtrasUpdate;
|
2016-05-26 18:24:51 +02:00
|
|
|
use blockchain::{CacheSize, ImportRoute, Config};
|
2016-07-25 10:21:02 +02:00
|
|
|
use db::{Writable, Readable, CacheUpdatePolicy};
|
2016-07-28 23:46:24 +02:00
|
|
|
use client::{DB_COL_EXTRA, DB_COL_HEADERS, DB_COL_BODIES};
|
2016-07-31 00:19:27 +02:00
|
|
|
use cache_manager::CacheManager;
|
2016-02-16 14:46:21 +01:00
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
const LOG_BLOOMS_LEVELS: usize = 3;
|
|
|
|
const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16;
|
2015-12-21 02:57:02 +01:00
|
|
|
|
2016-01-12 13:14:01 +01:00
|
|
|
/// Interface for querying blocks by hash and by number.
|
|
|
|
pub trait BlockProvider {
|
|
|
|
/// Returns true if the given block is known
|
|
|
|
/// (though not necessarily a part of the canon chain).
|
|
|
|
fn is_known(&self, hash: &H256) -> bool;
|
|
|
|
|
|
|
|
/// Get raw block data
|
|
|
|
fn block(&self, hash: &H256) -> Option<Bytes>;
|
|
|
|
|
|
|
|
/// Get the familial details concerning a block.
|
|
|
|
fn block_details(&self, hash: &H256) -> Option<BlockDetails>;
|
|
|
|
|
|
|
|
/// Get the hash of given block's number.
|
|
|
|
fn block_hash(&self, index: BlockNumber) -> Option<H256>;
|
|
|
|
|
2016-02-08 15:53:22 +01:00
|
|
|
/// Get the address of transaction with given hash.
|
|
|
|
fn transaction_address(&self, hash: &H256) -> Option<TransactionAddress>;
|
|
|
|
|
2016-02-17 12:35:37 +01:00
|
|
|
/// Get receipts of block with given hash.
|
|
|
|
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts>;
|
|
|
|
|
2016-01-12 13:14:01 +01:00
|
|
|
/// Get the partial-header of a block.
|
|
|
|
fn block_header(&self, hash: &H256) -> Option<Header> {
|
2016-07-28 23:46:24 +02:00
|
|
|
self.block_header_data(hash).map(|header| decode(&header))
|
2016-01-12 13:14:01 +01:00
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
/// Get the header RLP of a block.
|
|
|
|
fn block_header_data(&self, hash: &H256) -> Option<Bytes>;
|
|
|
|
|
|
|
|
/// Get the block body (uncles and transactions).
|
|
|
|
fn block_body(&self, hash: &H256) -> Option<Bytes>;
|
|
|
|
|
2016-01-12 13:14:01 +01:00
|
|
|
/// Get a list of uncles for a given block.
|
2016-03-02 18:05:47 +01:00
|
|
|
/// Returns None if block does not exist.
|
2016-01-12 13:14:01 +01:00
|
|
|
fn uncles(&self, hash: &H256) -> Option<Vec<Header>> {
|
2016-07-28 23:46:24 +02:00
|
|
|
self.block_body(hash).map(|bytes| BodyView::new(&bytes).uncles())
|
2016-01-12 13:14:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get a list of uncle hashes for a given block.
|
|
|
|
/// Returns None if block does not exist.
|
|
|
|
fn uncle_hashes(&self, hash: &H256) -> Option<Vec<H256>> {
|
2016-07-28 23:46:24 +02:00
|
|
|
self.block_body(hash).map(|bytes| BodyView::new(&bytes).uncle_hashes())
|
2016-01-12 13:14:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the number of given block's hash.
|
|
|
|
fn block_number(&self, hash: &H256) -> Option<BlockNumber> {
|
2016-07-28 23:46:24 +02:00
|
|
|
self.block_details(hash).map(|details| details.number)
|
2016-01-12 13:14:01 +01:00
|
|
|
}
|
|
|
|
|
2016-02-08 15:53:22 +01:00
|
|
|
/// Get transaction with given transaction hash.
|
2016-02-10 19:29:27 +01:00
|
|
|
fn transaction(&self, address: &TransactionAddress) -> Option<LocalizedTransaction> {
|
2016-07-28 23:46:24 +02:00
|
|
|
self.block_body(&address.block_hash)
|
|
|
|
.and_then(|bytes| self.block_number(&address.block_hash)
|
|
|
|
.and_then(|n| BodyView::new(&bytes).localized_transaction_at(&address.block_hash, n, address.index)))
|
2016-02-08 15:53:22 +01:00
|
|
|
}
|
|
|
|
|
2016-03-20 17:29:39 +01:00
|
|
|
/// Get transaction receipt.
|
|
|
|
fn transaction_receipt(&self, address: &TransactionAddress) -> Option<Receipt> {
|
|
|
|
self.block_receipts(&address.block_hash).and_then(|br| br.receipts.into_iter().nth(address.index))
|
|
|
|
}
|
|
|
|
|
2016-01-12 13:14:01 +01:00
|
|
|
/// Get a list of transactions for a given block.
|
2016-02-10 11:28:40 +01:00
|
|
|
/// Returns None if block does not exist.
|
2016-02-09 15:17:01 +01:00
|
|
|
fn transactions(&self, hash: &H256) -> Option<Vec<LocalizedTransaction>> {
|
2016-07-28 23:46:24 +02:00
|
|
|
self.block_body(hash)
|
|
|
|
.and_then(|bytes| self.block_number(hash)
|
|
|
|
.map(|n| BodyView::new(&bytes).localized_transactions(hash, n)))
|
2016-01-12 13:14:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns reference to genesis hash.
|
|
|
|
fn genesis_hash(&self) -> H256 {
|
|
|
|
self.block_hash(0).expect("Genesis hash should always exist")
|
|
|
|
}
|
2016-01-26 15:00:22 +01:00
|
|
|
|
|
|
|
/// Returns the header of the genesis block.
|
|
|
|
fn genesis_header(&self) -> Header {
|
|
|
|
self.block_header(&self.genesis_hash()).unwrap()
|
|
|
|
}
|
2016-02-12 00:40:45 +01:00
|
|
|
|
|
|
|
/// Returns numbers of blocks containing given bloom.
|
|
|
|
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber>;
|
2016-01-12 13:14:01 +01:00
|
|
|
}
|
|
|
|
|
2016-01-18 15:48:38 +01:00
|
|
|
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
|
2016-01-18 19:23:28 +01:00
|
|
|
enum CacheID {
|
2016-07-28 23:46:24 +02:00
|
|
|
BlockHeader(H256),
|
|
|
|
BlockBody(H256),
|
2016-05-26 18:24:51 +02:00
|
|
|
BlockDetails(H256),
|
|
|
|
BlockHashes(BlockNumber),
|
|
|
|
TransactionAddresses(H256),
|
|
|
|
BlocksBlooms(LogGroupPosition),
|
|
|
|
BlockReceipts(H256),
|
2016-01-18 15:48:38 +01:00
|
|
|
}
|
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
impl bc::group::BloomGroupDatabase for BlockChain {
|
|
|
|
fn blooms_at(&self, position: &bc::group::GroupPosition) -> Option<bc::group::BloomGroup> {
|
|
|
|
let position = LogGroupPosition::from(position.clone());
|
2016-08-08 16:14:37 +02:00
|
|
|
let result = self.db.read_with_cache(DB_COL_EXTRA, &self.blocks_blooms, &position).map(Into::into);
|
|
|
|
self.note_used(CacheID::BlocksBlooms(position));
|
|
|
|
result
|
2016-05-26 18:24:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-17 17:20:10 +01:00
|
|
|
/// Structure providing fast access to blockchain data.
|
2015-12-26 15:47:07 +01:00
|
|
|
///
|
2015-12-21 15:25:58 +01:00
|
|
|
/// **Does not do input data verification.**
|
2015-12-09 19:03:25 +01:00
|
|
|
pub struct BlockChain {
|
2016-02-22 00:36:59 +01:00
|
|
|
// All locks must be captured in the order declared here.
|
2016-05-26 18:24:51 +02:00
|
|
|
blooms_config: bc::Config,
|
2016-02-02 01:59:14 +01:00
|
|
|
|
2016-01-07 16:08:12 +01:00
|
|
|
best_block: RwLock<BestBlock>,
|
2015-12-13 22:39:01 +01:00
|
|
|
|
2015-12-14 14:15:27 +01:00
|
|
|
// block cache
|
2016-07-28 23:46:24 +02:00
|
|
|
block_headers: RwLock<HashMap<H256, Bytes>>,
|
|
|
|
block_bodies: RwLock<HashMap<H256, Bytes>>,
|
2015-12-14 13:32:22 +01:00
|
|
|
|
2015-12-14 14:15:27 +01:00
|
|
|
// extra caches
|
2016-01-07 16:08:12 +01:00
|
|
|
block_details: RwLock<HashMap<H256, BlockDetails>>,
|
2016-01-11 01:07:58 +01:00
|
|
|
block_hashes: RwLock<HashMap<BlockNumber, H256>>,
|
2016-01-07 16:08:12 +01:00
|
|
|
transaction_addresses: RwLock<HashMap<H256, TransactionAddress>>,
|
2016-05-26 18:24:51 +02:00
|
|
|
blocks_blooms: RwLock<HashMap<LogGroupPosition, BloomGroup>>,
|
2016-02-17 12:35:37 +01:00
|
|
|
block_receipts: RwLock<HashMap<H256, BlockReceipts>>,
|
2015-12-14 14:15:27 +01:00
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
db: Arc<Database>,
|
2016-01-18 15:48:38 +01:00
|
|
|
|
2016-07-31 00:19:27 +02:00
|
|
|
cache_man: RwLock<CacheManager<CacheID>>,
|
2016-08-01 19:10:13 +02:00
|
|
|
|
|
|
|
pending_best_block: RwLock<Option<BestBlock>>,
|
|
|
|
pending_block_hashes: RwLock<HashMap<BlockNumber, H256>>,
|
|
|
|
pending_transaction_addresses: RwLock<HashMap<H256, TransactionAddress>>,
|
2015-12-09 19:03:25 +01:00
|
|
|
}
|
|
|
|
|
2016-01-12 13:14:01 +01:00
|
|
|
impl BlockProvider for BlockChain {
|
|
|
|
/// Returns true if the given block is known
|
|
|
|
/// (though not necessarily a part of the canon chain).
|
|
|
|
fn is_known(&self, hash: &H256) -> bool {
|
2016-07-28 23:46:24 +02:00
|
|
|
self.db.exists_with_cache(DB_COL_EXTRA, &self.block_details, hash)
|
2016-01-12 13:14:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get raw block data
|
|
|
|
fn block(&self, hash: &H256) -> Option<Bytes> {
|
2016-07-28 23:46:24 +02:00
|
|
|
match (self.block_header_data(hash), self.block_body(hash)) {
|
|
|
|
(Some(header), Some(body)) => {
|
|
|
|
let mut block = RlpStream::new_list(3);
|
|
|
|
let body_rlp = Rlp::new(&body);
|
|
|
|
block.append_raw(&header, 1);
|
|
|
|
block.append_raw(body_rlp.at(0).as_raw(), 1);
|
|
|
|
block.append_raw(body_rlp.at(1).as_raw(), 1);
|
|
|
|
Some(block.out())
|
|
|
|
},
|
|
|
|
_ => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get block header data
|
|
|
|
fn block_header_data(&self, hash: &H256) -> Option<Bytes> {
|
|
|
|
// Check cache first
|
|
|
|
{
|
|
|
|
let read = self.block_headers.read();
|
|
|
|
if let Some(v) = read.get(hash) {
|
|
|
|
return Some(v.clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if it's the best block
|
|
|
|
{
|
|
|
|
let best_block = self.best_block.read();
|
|
|
|
if &best_block.hash == hash {
|
|
|
|
return Some(Rlp::new(&best_block.block).at(0).as_raw().to_vec());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read from DB and populate cache
|
|
|
|
let opt = self.db.get(DB_COL_HEADERS, hash)
|
|
|
|
.expect("Low level database error. Some issue with disk?");
|
|
|
|
|
2016-08-08 16:14:37 +02:00
|
|
|
let result = match opt {
|
2016-07-28 23:46:24 +02:00
|
|
|
Some(b) => {
|
|
|
|
let bytes: Bytes = UntrustedRlp::new(&b).decompress(RlpType::Blocks).to_vec();
|
|
|
|
let mut write = self.block_headers.write();
|
|
|
|
write.insert(hash.clone(), bytes.clone());
|
|
|
|
Some(bytes)
|
|
|
|
},
|
|
|
|
None => None
|
2016-08-08 16:14:37 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
self.note_used(CacheID::BlockHeader(hash.clone()));
|
|
|
|
result
|
2016-07-28 23:46:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get block body data
|
|
|
|
fn block_body(&self, hash: &H256) -> Option<Bytes> {
|
|
|
|
// Check cache first
|
2016-01-12 13:14:01 +01:00
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
let read = self.block_bodies.read();
|
2016-01-17 15:56:09 +01:00
|
|
|
if let Some(v) = read.get(hash) {
|
|
|
|
return Some(v.clone());
|
2016-01-12 13:14:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
// Check if it's the best block
|
|
|
|
{
|
|
|
|
let best_block = self.best_block.read();
|
|
|
|
if &best_block.hash == hash {
|
|
|
|
return Some(Self::block_to_body(&best_block.block));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read from DB and populate cache
|
|
|
|
let opt = self.db.get(DB_COL_BODIES, hash)
|
2016-01-12 13:14:01 +01:00
|
|
|
.expect("Low level database error. Some issue with disk?");
|
|
|
|
|
2016-08-08 16:14:37 +02:00
|
|
|
let result = match opt {
|
2016-01-12 13:14:01 +01:00
|
|
|
Some(b) => {
|
Blocks and snapshot compression (#1687)
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* new Compressible rlp trait
* new Compressible rlp trait
* new Compressible rlp trait
* make compressed rlp iterable
* make compressed rlp iterable
* make compressed rlp iterable
* invalid rlp slice swapper
* invalid rlp slice swapper
* invalid rlp slice swapper
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* switch compress to swapper, add reverse swapper test case
* add basic account compression test
* add new rlp trait
* add account compress/ decompress test
* make compressor cleaner, use hashmaps for swapper
* improve compression tests
* add a DecompressingDecoder, change Decoder to take refernce
* separate rlp compression related stuff
* DecompressingDecoder test
* initial compressing HashDB wrapper
* remove unused test
* change CompressedDB to struct wrapper with overlay
* simplify compressor
* failed RefCell attempt
* use denote to return reference
* compiled compresseddb
* compressdb test, add overlay emplace
* fix overlay reference count handling
* add immutable compresseddb, make account use hashdb
* simplify using trait objects
* enable hashdb for account
* initial state compression attempt
* wrap state db
* add tests for analyzing db
* add account predicate
* try to compress data fields as rlp too
* remove compression for storage trie
* add a compressing migration
* more compression stats tests
* fix migration import
* nested encoding compression test
* fix decompression, move db stats tests to rlpcompression
* added malformed rlp tests, cover a few edge cases
* new CompressingEncoder struct
* extend migrations to state
* first version working on the whole db
* clean up Compressible impl
* tests cleanup
* add a testing migration
* refactor deep compression using option, add simple compression
* put tests in a module
* fix compressed overlay loading
* simple compression for snapshots
* remove unused DecompressingDecoder
* add a general compressing migration
* add more common rlps to compress
* use static slices for swapper
* add precomputed hashes and invalid rlps
* make decoder private again
* cover more cases with tests
* style
* fix weird indentation
* remove possible panic in payload_info
* make prefix checking safe
* fix db existence check
* remove db dir from test
* pass usize by value [ci skip]
* Improve comment on panic removal.
* add common blocks db rlps
* add compression to blockchain db
* add blocks db migration
* fix the migrations
* remove state compression
* add a separate snapshot swapper
* ability to use different swappers and traversal
* update tests to new interface
* clean up code ordering
* update usage
* fix compilation
* remove unnecessary changes
* move methods to functions to reduce interface
* move test to module
* update common rlps to blocks db
* move tests to tests modules
* remove redundant &
2016-07-27 17:11:41 +02:00
|
|
|
let bytes: Bytes = UntrustedRlp::new(&b).decompress(RlpType::Blocks).to_vec();
|
2016-07-28 23:46:24 +02:00
|
|
|
let mut write = self.block_bodies.write();
|
2016-01-12 13:14:01 +01:00
|
|
|
write.insert(hash.clone(), bytes.clone());
|
|
|
|
Some(bytes)
|
|
|
|
},
|
|
|
|
None => None
|
2016-08-08 16:14:37 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
self.note_used(CacheID::BlockBody(hash.clone()));
|
|
|
|
|
|
|
|
result
|
2016-01-12 13:14:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the familial details concerning a block.
|
|
|
|
fn block_details(&self, hash: &H256) -> Option<BlockDetails> {
|
2016-08-08 16:14:37 +02:00
|
|
|
let result = self.db.read_with_cache(DB_COL_EXTRA, &self.block_details, hash);
|
2016-05-26 18:24:51 +02:00
|
|
|
self.note_used(CacheID::BlockDetails(hash.clone()));
|
2016-08-08 16:14:37 +02:00
|
|
|
result
|
2016-01-12 13:14:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the hash of given block's number.
|
|
|
|
fn block_hash(&self, index: BlockNumber) -> Option<H256> {
|
2016-08-08 16:14:37 +02:00
|
|
|
let result = self.db.read_with_cache(DB_COL_EXTRA, &self.block_hashes, &index);
|
2016-05-26 18:24:51 +02:00
|
|
|
self.note_used(CacheID::BlockHashes(index));
|
2016-08-08 16:14:37 +02:00
|
|
|
result
|
2016-01-12 13:14:01 +01:00
|
|
|
}
|
2016-02-08 15:53:22 +01:00
|
|
|
|
|
|
|
/// Get the address of transaction with given hash.
|
|
|
|
fn transaction_address(&self, hash: &H256) -> Option<TransactionAddress> {
|
2016-08-08 16:14:37 +02:00
|
|
|
let result = self.db.read_with_cache(DB_COL_EXTRA, &self.transaction_addresses, hash);
|
2016-05-26 18:24:51 +02:00
|
|
|
self.note_used(CacheID::TransactionAddresses(hash.clone()));
|
2016-08-08 16:14:37 +02:00
|
|
|
result
|
2016-02-08 15:53:22 +01:00
|
|
|
}
|
2016-02-12 00:40:45 +01:00
|
|
|
|
2016-02-17 12:35:37 +01:00
|
|
|
/// Get receipts of block with given hash.
|
|
|
|
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> {
|
2016-08-08 16:14:37 +02:00
|
|
|
let result = self.db.read_with_cache(DB_COL_EXTRA, &self.block_receipts, hash);
|
2016-05-26 18:24:51 +02:00
|
|
|
self.note_used(CacheID::BlockReceipts(hash.clone()));
|
2016-08-08 16:14:37 +02:00
|
|
|
result
|
2016-02-17 12:35:37 +01:00
|
|
|
}
|
|
|
|
|
2016-02-12 00:40:45 +01:00
|
|
|
/// Returns numbers of blocks containing given bloom.
|
|
|
|
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber> {
|
2016-05-26 18:24:51 +02:00
|
|
|
let range = from_block as bc::Number..to_block as bc::Number;
|
|
|
|
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
|
|
|
|
chain.with_bloom(&range, &Bloom::from(bloom.clone()).into())
|
|
|
|
.into_iter()
|
|
|
|
.map(|b| b as BlockNumber)
|
|
|
|
.collect()
|
2016-02-12 00:40:45 +01:00
|
|
|
}
|
2016-01-12 13:14:01 +01:00
|
|
|
}
|
|
|
|
|
2016-03-02 17:04:44 +01:00
|
|
|
pub struct AncestryIter<'a> {
|
|
|
|
current: H256,
|
|
|
|
chain: &'a BlockChain,
|
|
|
|
}
|
2016-03-02 17:31:42 +01:00
|
|
|
|
2016-03-02 17:04:44 +01:00
|
|
|
impl<'a> Iterator for AncestryIter<'a> {
|
|
|
|
type Item = H256;
|
|
|
|
fn next(&mut self) -> Option<H256> {
|
|
|
|
if self.current.is_zero() {
|
|
|
|
Option::None
|
|
|
|
} else {
|
2016-03-02 17:31:42 +01:00
|
|
|
let mut n = self.chain.block_details(&self.current).unwrap().parent;
|
|
|
|
mem::swap(&mut self.current, &mut n);
|
|
|
|
Some(n)
|
2016-03-02 17:04:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-09 19:03:25 +01:00
|
|
|
impl BlockChain {
|
2015-12-12 15:52:37 +01:00
|
|
|
/// Create new instance of blockchain from given Genesis
|
2016-07-28 23:46:24 +02:00
|
|
|
pub fn new(config: Config, genesis: &[u8], db: Arc<Database>) -> BlockChain {
|
2016-07-31 00:19:27 +02:00
|
|
|
// 400 is the avarage size of the key
|
|
|
|
let cache_man = CacheManager::new(config.pref_cache_size, config.max_cache_size, 400);
|
2016-01-18 19:23:28 +01:00
|
|
|
|
2015-12-17 01:54:24 +01:00
|
|
|
let bc = BlockChain {
|
2016-05-26 18:24:51 +02:00
|
|
|
blooms_config: bc::Config {
|
|
|
|
levels: LOG_BLOOMS_LEVELS,
|
|
|
|
elements_per_index: LOG_BLOOMS_ELEMENTS_PER_INDEX,
|
|
|
|
},
|
2016-02-27 02:16:39 +01:00
|
|
|
best_block: RwLock::new(BestBlock::default()),
|
2016-07-28 23:46:24 +02:00
|
|
|
block_headers: RwLock::new(HashMap::new()),
|
|
|
|
block_bodies: RwLock::new(HashMap::new()),
|
2016-01-07 16:08:12 +01:00
|
|
|
block_details: RwLock::new(HashMap::new()),
|
|
|
|
block_hashes: RwLock::new(HashMap::new()),
|
|
|
|
transaction_addresses: RwLock::new(HashMap::new()),
|
|
|
|
blocks_blooms: RwLock::new(HashMap::new()),
|
2016-02-17 12:35:37 +01:00
|
|
|
block_receipts: RwLock::new(HashMap::new()),
|
2016-07-28 23:46:24 +02:00
|
|
|
db: db.clone(),
|
2016-01-18 19:23:28 +01:00
|
|
|
cache_man: RwLock::new(cache_man),
|
2016-08-01 19:10:13 +02:00
|
|
|
pending_best_block: RwLock::new(None),
|
|
|
|
pending_block_hashes: RwLock::new(HashMap::new()),
|
|
|
|
pending_transaction_addresses: RwLock::new(HashMap::new()),
|
2015-12-17 01:54:24 +01:00
|
|
|
};
|
|
|
|
|
2015-12-17 15:11:42 +01:00
|
|
|
// load best block
|
2016-07-28 23:46:24 +02:00
|
|
|
let best_block_hash = match bc.db.get(DB_COL_EXTRA, b"best").unwrap() {
|
2016-07-04 18:24:14 +02:00
|
|
|
Some(best) => {
|
2016-07-28 23:46:24 +02:00
|
|
|
H256::from_slice(&best)
|
2016-07-04 18:24:14 +02:00
|
|
|
}
|
2015-12-17 15:11:42 +01:00
|
|
|
None => {
|
|
|
|
// best block does not exist
|
|
|
|
// we need to insert genesis into the cache
|
2015-12-17 17:20:10 +01:00
|
|
|
let block = BlockView::new(genesis);
|
2015-12-17 15:11:42 +01:00
|
|
|
let header = block.header_view();
|
|
|
|
let hash = block.sha3();
|
|
|
|
|
|
|
|
let details = BlockDetails {
|
|
|
|
number: header.number(),
|
|
|
|
total_difficulty: header.difficulty(),
|
|
|
|
parent: header.parent_hash(),
|
|
|
|
children: vec![]
|
|
|
|
};
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
let batch = DBTransaction::new(&db);
|
|
|
|
batch.put(DB_COL_HEADERS, &hash, block.header_rlp().as_raw()).unwrap();
|
2016-08-03 19:01:48 +02:00
|
|
|
batch.put(DB_COL_BODIES, &hash, &Self::block_to_body(genesis)).unwrap();
|
2015-12-26 15:47:07 +01:00
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
batch.write(DB_COL_EXTRA, &hash, &details);
|
|
|
|
batch.write(DB_COL_EXTRA, &header.number(), &hash);
|
|
|
|
batch.put(DB_COL_EXTRA, b"best", &hash).unwrap();
|
|
|
|
bc.db.write(batch).expect("Low level database error. Some issue with disk?");
|
2015-12-17 15:11:42 +01:00
|
|
|
hash
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-12-21 02:57:02 +01:00
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
// Fetch best block details
|
|
|
|
let best_block_number = bc.block_number(&best_block_hash).unwrap();
|
|
|
|
let best_block_total_difficulty = bc.block_details(&best_block_hash).unwrap().total_difficulty;
|
|
|
|
let best_block_rlp = bc.block(&best_block_hash).unwrap();
|
|
|
|
|
|
|
|
// and write them
|
2016-07-13 19:59:59 +02:00
|
|
|
let mut best_block = bc.best_block.write();
|
2016-07-28 23:46:24 +02:00
|
|
|
*best_block = BestBlock {
|
|
|
|
number: best_block_number,
|
|
|
|
total_difficulty: best_block_total_difficulty,
|
|
|
|
hash: best_block_hash,
|
|
|
|
block: best_block_rlp,
|
|
|
|
};
|
2015-12-21 02:57:02 +01:00
|
|
|
}
|
2015-12-17 15:11:42 +01:00
|
|
|
|
2015-12-17 01:54:24 +01:00
|
|
|
bc
|
2015-12-11 03:51:23 +01:00
|
|
|
}
|
|
|
|
|
2016-07-17 09:18:15 +02:00
|
|
|
/// Returns true if the given parent block has given child
|
|
|
|
/// (though not necessarily a part of the canon chain).
|
|
|
|
fn is_known_child(&self, parent: &H256, hash: &H256) -> bool {
|
2016-07-28 23:46:24 +02:00
|
|
|
self.db.read_with_cache(DB_COL_EXTRA, &self.block_details, parent).map_or(false, |d| d.children.contains(hash))
|
2016-07-17 09:18:15 +02:00
|
|
|
}
|
|
|
|
|
2016-07-17 23:03:29 +02:00
|
|
|
/// Rewind to a previous block
|
2016-07-25 10:21:02 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
fn rewind(&self) -> Option<H256> {
|
|
|
|
use db::Key;
|
2016-07-28 23:46:24 +02:00
|
|
|
let batch = self.db.transaction();
|
2016-07-17 23:03:29 +02:00
|
|
|
// track back to the best block we have in the blocks database
|
2016-07-28 23:46:24 +02:00
|
|
|
if let Some(best_block_hash) = self.db.get(DB_COL_EXTRA, b"best").unwrap() {
|
2016-07-17 23:03:29 +02:00
|
|
|
let best_block_hash = H256::from_slice(&best_block_hash);
|
|
|
|
if best_block_hash == self.genesis_hash() {
|
|
|
|
return None;
|
|
|
|
}
|
2016-07-28 23:46:24 +02:00
|
|
|
if let Some(extras) = self.db.read(DB_COL_EXTRA, &best_block_hash) as Option<BlockDetails> {
|
2016-07-17 23:03:29 +02:00
|
|
|
type DetailsKey = Key<BlockDetails, Target=H264>;
|
2016-07-28 23:46:24 +02:00
|
|
|
batch.delete(DB_COL_EXTRA, &(DetailsKey::key(&best_block_hash))).unwrap();
|
2016-07-17 23:03:29 +02:00
|
|
|
let hash = extras.parent;
|
|
|
|
let range = extras.number as bc::Number .. extras.number as bc::Number;
|
|
|
|
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
|
|
|
|
let changes = chain.replace(&range, vec![]);
|
|
|
|
for (k, v) in changes.into_iter() {
|
2016-07-28 23:46:24 +02:00
|
|
|
batch.write(DB_COL_EXTRA, &LogGroupPosition::from(k), &BloomGroup::from(v));
|
2016-07-17 23:03:29 +02:00
|
|
|
}
|
2016-07-28 23:46:24 +02:00
|
|
|
batch.put(DB_COL_EXTRA, b"best", &hash).unwrap();
|
|
|
|
|
|
|
|
let best_block_total_difficulty = self.block_details(&hash).unwrap().total_difficulty;
|
|
|
|
let best_block_rlp = self.block(&hash).unwrap();
|
|
|
|
|
2016-07-17 23:03:29 +02:00
|
|
|
let mut best_block = self.best_block.write();
|
2016-07-28 23:46:24 +02:00
|
|
|
*best_block = BestBlock {
|
|
|
|
number: extras.number - 1,
|
|
|
|
total_difficulty: best_block_total_difficulty,
|
|
|
|
hash: hash,
|
|
|
|
block: best_block_rlp,
|
|
|
|
};
|
2016-07-17 23:03:29 +02:00
|
|
|
// update parent extras
|
2016-07-28 23:46:24 +02:00
|
|
|
if let Some(mut details) = self.db.read(DB_COL_EXTRA, &hash) as Option<BlockDetails> {
|
2016-07-17 23:03:29 +02:00
|
|
|
details.children.clear();
|
2016-07-28 23:46:24 +02:00
|
|
|
batch.write(DB_COL_EXTRA, &hash, &details);
|
2016-07-17 23:03:29 +02:00
|
|
|
}
|
2016-07-28 23:46:24 +02:00
|
|
|
self.db.write(batch).expect("Writing to db failed");
|
2016-07-17 23:03:29 +02:00
|
|
|
self.block_details.write().clear();
|
|
|
|
self.block_hashes.write().clear();
|
2016-07-28 23:46:24 +02:00
|
|
|
self.block_headers.write().clear();
|
|
|
|
self.block_bodies.write().clear();
|
2016-07-17 23:03:29 +02:00
|
|
|
self.block_receipts.write().clear();
|
|
|
|
return Some(hash);
|
|
|
|
}
|
|
|
|
}
|
2016-07-19 09:25:51 +02:00
|
|
|
|
|
|
|
None
|
2016-07-17 23:03:29 +02:00
|
|
|
}
|
|
|
|
|
2015-12-17 15:11:42 +01:00
|
|
|
/// Returns a tree route between `from` and `to`, which is a tuple of:
|
2015-12-26 15:47:07 +01:00
|
|
|
///
|
2015-12-17 15:11:42 +01:00
|
|
|
/// - a vector of hashes of all blocks, ordered from `from` to `to`.
|
2015-12-17 17:20:10 +01:00
|
|
|
///
|
2015-12-17 15:11:42 +01:00
|
|
|
/// - common ancestor of these blocks.
|
2015-12-17 17:20:10 +01:00
|
|
|
///
|
2015-12-17 15:11:42 +01:00
|
|
|
/// - an index where best common ancestor would be
|
2015-12-26 15:47:07 +01:00
|
|
|
///
|
2015-12-17 17:20:10 +01:00
|
|
|
/// 1.) from newer to older
|
2015-12-26 15:47:07 +01:00
|
|
|
///
|
2015-12-17 17:20:10 +01:00
|
|
|
/// - bc: `A1 -> A2 -> A3 -> A4 -> A5`
|
|
|
|
/// - from: A5, to: A4
|
2015-12-26 15:47:07 +01:00
|
|
|
/// - route:
|
2015-12-17 17:20:10 +01:00
|
|
|
///
|
|
|
|
/// ```json
|
|
|
|
/// { blocks: [A5], ancestor: A4, index: 1 }
|
|
|
|
/// ```
|
2015-12-26 15:47:07 +01:00
|
|
|
///
|
2015-12-17 17:20:10 +01:00
|
|
|
/// 2.) from older to newer
|
2015-12-26 15:47:07 +01:00
|
|
|
///
|
2015-12-17 17:20:10 +01:00
|
|
|
/// - bc: `A1 -> A2 -> A3 -> A4 -> A5`
|
|
|
|
/// - from: A3, to: A4
|
2015-12-26 15:47:07 +01:00
|
|
|
/// - route:
|
|
|
|
///
|
2015-12-17 17:20:10 +01:00
|
|
|
/// ```json
|
|
|
|
/// { blocks: [A4], ancestor: A3, index: 0 }
|
|
|
|
/// ```
|
2015-12-17 15:11:42 +01:00
|
|
|
///
|
|
|
|
/// 3.) fork:
|
|
|
|
///
|
2015-12-26 15:47:07 +01:00
|
|
|
/// - bc:
|
2015-12-17 17:20:10 +01:00
|
|
|
///
|
|
|
|
/// ```text
|
|
|
|
/// A1 -> A2 -> A3 -> A4
|
2015-12-17 15:11:42 +01:00
|
|
|
/// -> B3 -> B4
|
2015-12-26 15:47:07 +01:00
|
|
|
/// ```
|
2015-12-17 17:20:10 +01:00
|
|
|
/// - from: B4, to: A4
|
2015-12-26 15:47:07 +01:00
|
|
|
/// - route:
|
|
|
|
///
|
2015-12-17 17:20:10 +01:00
|
|
|
/// ```json
|
|
|
|
/// { blocks: [B4, B3, A3, A4], ancestor: A2, index: 2 }
|
|
|
|
/// ```
|
2016-02-27 01:37:12 +01:00
|
|
|
pub fn tree_route(&self, from: H256, to: H256) -> TreeRoute {
|
2015-12-17 15:11:42 +01:00
|
|
|
let mut from_branch = vec![];
|
|
|
|
let mut to_branch = vec![];
|
|
|
|
|
2016-07-19 09:23:53 +02:00
|
|
|
let mut from_details = self.block_details(&from).unwrap_or_else(|| panic!("0. Expected to find details for block {:?}", from));
|
|
|
|
let mut to_details = self.block_details(&to).unwrap_or_else(|| panic!("1. Expected to find details for block {:?}", to));
|
2016-02-27 01:37:12 +01:00
|
|
|
let mut current_from = from;
|
|
|
|
let mut current_to = to;
|
2015-12-17 15:11:42 +01:00
|
|
|
|
|
|
|
// reset from && to to the same level
|
|
|
|
while from_details.number > to_details.number {
|
|
|
|
from_branch.push(current_from);
|
|
|
|
current_from = from_details.parent.clone();
|
2016-07-19 09:23:53 +02:00
|
|
|
from_details = self.block_details(&from_details.parent).unwrap_or_else(|| panic!("2. Expected to find details for block {:?}", from_details.parent));
|
2015-12-17 15:11:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
while to_details.number > from_details.number {
|
|
|
|
to_branch.push(current_to);
|
|
|
|
current_to = to_details.parent.clone();
|
2016-07-19 09:23:53 +02:00
|
|
|
to_details = self.block_details(&to_details.parent).unwrap_or_else(|| panic!("3. Expected to find details for block {:?}", to_details.parent));
|
2015-12-17 15:11:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(from_details.number, to_details.number);
|
|
|
|
|
|
|
|
// move to shared parent
|
2015-12-18 11:34:55 +01:00
|
|
|
while current_from != current_to {
|
2015-12-17 15:11:42 +01:00
|
|
|
from_branch.push(current_from);
|
|
|
|
current_from = from_details.parent.clone();
|
2016-07-19 09:23:53 +02:00
|
|
|
from_details = self.block_details(&from_details.parent).unwrap_or_else(|| panic!("4. Expected to find details for block {:?}", from_details.parent));
|
2015-12-17 15:11:42 +01:00
|
|
|
|
|
|
|
to_branch.push(current_to);
|
|
|
|
current_to = to_details.parent.clone();
|
2016-07-19 09:23:53 +02:00
|
|
|
to_details = self.block_details(&to_details.parent).unwrap_or_else(|| panic!("5. Expected to find details for block {:?}", from_details.parent));
|
2015-12-17 15:11:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
let index = from_branch.len();
|
|
|
|
|
2015-12-17 20:37:04 +01:00
|
|
|
from_branch.extend(to_branch.into_iter().rev());
|
2015-12-17 15:11:42 +01:00
|
|
|
|
|
|
|
TreeRoute {
|
|
|
|
blocks: from_branch,
|
2015-12-21 16:31:51 +01:00
|
|
|
ancestor: current_from,
|
2015-12-17 15:11:42 +01:00
|
|
|
index: index
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
/// Inserts a verified, known block from the canonical chain.
|
|
|
|
///
|
|
|
|
/// Can be performed out-of-order, but care must be taken that the final chain is in a correct state.
|
|
|
|
/// This is used by snapshot restoration.
|
|
|
|
///
|
|
|
|
/// Supply a dummy parent total difficulty when the parent block may not be in the chain.
|
|
|
|
/// Returns true if the block is disconnected.
|
|
|
|
pub fn insert_snapshot_block(&self, bytes: &[u8], receipts: Vec<Receipt>, parent_td: Option<U256>, is_best: bool) -> bool {
|
|
|
|
let block = BlockView::new(bytes);
|
|
|
|
let header = block.header_view();
|
|
|
|
let hash = header.sha3();
|
|
|
|
|
|
|
|
if self.is_known(&hash) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(self.pending_best_block.read().is_none());
|
|
|
|
|
|
|
|
let batch = self.db.transaction();
|
|
|
|
|
|
|
|
let block_rlp = UntrustedRlp::new(bytes);
|
|
|
|
let compressed_header = block_rlp.at(0).unwrap().compress(RlpType::Blocks);
|
|
|
|
let compressed_body = UntrustedRlp::new(&Self::block_to_body(bytes)).compress(RlpType::Blocks);
|
|
|
|
|
|
|
|
// store block in db
|
|
|
|
batch.put(DB_COL_HEADERS, &hash, &compressed_header).unwrap();
|
|
|
|
batch.put(DB_COL_BODIES, &hash, &compressed_body).unwrap();
|
|
|
|
|
|
|
|
let maybe_parent = self.block_details(&header.parent_hash());
|
|
|
|
|
|
|
|
if let Some(parent_details) = maybe_parent {
|
|
|
|
// parent known to be in chain.
|
|
|
|
let info = BlockInfo {
|
|
|
|
hash: hash,
|
|
|
|
number: header.number(),
|
|
|
|
total_difficulty: parent_details.total_difficulty + header.difficulty(),
|
|
|
|
location: BlockLocation::CanonChain,
|
|
|
|
};
|
|
|
|
|
|
|
|
self.prepare_update(&batch, ExtrasUpdate {
|
|
|
|
block_hashes: self.prepare_block_hashes_update(bytes, &info),
|
|
|
|
block_details: self.prepare_block_details_update(bytes, &info),
|
|
|
|
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
|
|
|
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
|
|
|
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
|
|
|
info: info,
|
|
|
|
block: bytes
|
|
|
|
}, is_best);
|
|
|
|
self.db.write(batch).unwrap();
|
|
|
|
|
|
|
|
false
|
|
|
|
} else {
|
|
|
|
// parent not in the chain yet. we need the parent difficulty to proceed.
|
|
|
|
let d = parent_td
|
|
|
|
.expect("parent total difficulty always supplied for first block in chunk. only first block can have missing parent; qed");
|
|
|
|
|
|
|
|
let info = BlockInfo {
|
|
|
|
hash: hash,
|
|
|
|
number: header.number(),
|
|
|
|
total_difficulty: d + header.difficulty(),
|
|
|
|
location: BlockLocation::CanonChain,
|
|
|
|
};
|
|
|
|
|
|
|
|
let block_details = BlockDetails {
|
|
|
|
number: header.number(),
|
|
|
|
total_difficulty: info.total_difficulty,
|
|
|
|
parent: header.parent_hash(),
|
|
|
|
children: Vec::new(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut update = HashMap::new();
|
|
|
|
update.insert(hash, block_details);
|
|
|
|
|
|
|
|
self.prepare_update(&batch, ExtrasUpdate {
|
|
|
|
block_hashes: self.prepare_block_hashes_update(bytes, &info),
|
|
|
|
block_details: update,
|
|
|
|
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
|
|
|
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
|
|
|
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
|
|
|
info: info,
|
|
|
|
block: bytes,
|
|
|
|
}, is_best);
|
|
|
|
self.db.write(batch).unwrap();
|
|
|
|
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Add a child to a given block. Assumes that the block hash is in
|
|
|
|
/// the chain and the child's parent is this block.
|
|
|
|
///
|
|
|
|
/// Used in snapshots to glue the chunks together at the end.
|
|
|
|
pub fn add_child(&self, block_hash: H256, child_hash: H256) {
|
|
|
|
let mut parent_details = self.block_details(&block_hash)
|
|
|
|
.unwrap_or_else(|| panic!("Invalid block hash: {:?}", block_hash));
|
|
|
|
|
|
|
|
let batch = self.db.transaction();
|
|
|
|
parent_details.children.push(child_hash);
|
|
|
|
|
|
|
|
let mut update = HashMap::new();
|
|
|
|
update.insert(block_hash, parent_details);
|
|
|
|
|
|
|
|
|
|
|
|
let mut write_details = self.block_details.write();
|
|
|
|
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_details, update, CacheUpdatePolicy::Overwrite);
|
|
|
|
|
2016-08-08 16:14:37 +02:00
|
|
|
self.note_used(CacheID::BlockDetails(block_hash));
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
self.db.write(batch).unwrap();
|
|
|
|
}
|
|
|
|
|
2016-04-06 10:07:24 +02:00
|
|
|
#[cfg_attr(feature="dev", allow(similar_names))]
|
2015-12-17 01:54:24 +01:00
|
|
|
/// Inserts the block into backing cache database.
|
|
|
|
/// Expects the block to be valid and already verified.
|
|
|
|
/// If the block is already known, does nothing.
|
2016-07-28 23:46:24 +02:00
|
|
|
pub fn insert_block(&self, batch: &DBTransaction, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute {
|
2015-12-17 15:11:42 +01:00
|
|
|
// create views onto rlp
|
2015-12-17 01:54:24 +01:00
|
|
|
let block = BlockView::new(bytes);
|
|
|
|
let header = block.header_view();
|
2016-01-07 16:08:12 +01:00
|
|
|
let hash = header.sha3();
|
2015-12-17 01:54:24 +01:00
|
|
|
|
2016-07-17 09:18:15 +02:00
|
|
|
if self.is_known_child(&header.parent_hash(), &hash) {
|
2016-03-09 21:55:23 +01:00
|
|
|
return ImportRoute::none();
|
2015-12-17 01:54:24 +01:00
|
|
|
}
|
|
|
|
|
2016-08-01 19:10:13 +02:00
|
|
|
assert!(self.pending_best_block.read().is_none());
|
|
|
|
|
2015-12-21 15:22:24 +01:00
|
|
|
// store block in db
|
2016-08-03 22:03:40 +02:00
|
|
|
batch.put_compressed(DB_COL_HEADERS, &hash, block.header_rlp().as_raw().to_vec()).unwrap();
|
|
|
|
batch.put_compressed(DB_COL_BODIES, &hash, Self::block_to_body(bytes)).unwrap();
|
2016-02-27 01:37:12 +01:00
|
|
|
|
2016-08-03 22:03:40 +02:00
|
|
|
let info = self.block_info(&header);
|
2016-02-27 01:37:12 +01:00
|
|
|
|
2016-07-26 00:20:37 +02:00
|
|
|
if let BlockLocation::BranchBecomingCanonChain(ref d) = info.location {
|
2016-07-28 23:45:56 +02:00
|
|
|
info!(target: "reorg", "Reorg to {} ({} {} {})",
|
|
|
|
Colour::Yellow.bold().paint(format!("#{} {}", info.number, info.hash)),
|
2016-07-29 00:22:46 +02:00
|
|
|
Colour::Red.paint(d.retracted.iter().join(" ")),
|
|
|
|
Colour::White.paint(format!("#{} {}", self.block_details(&d.ancestor).expect("`ancestor` is in the route; qed").number, d.ancestor)),
|
|
|
|
Colour::Green.paint(d.enacted.iter().join(" "))
|
2016-07-28 23:45:56 +02:00
|
|
|
);
|
2016-07-26 00:20:37 +02:00
|
|
|
}
|
|
|
|
|
2016-08-01 19:10:13 +02:00
|
|
|
self.prepare_update(batch, ExtrasUpdate {
|
2016-02-27 10:19:33 +01:00
|
|
|
block_hashes: self.prepare_block_hashes_update(bytes, &info),
|
|
|
|
block_details: self.prepare_block_details_update(bytes, &info),
|
|
|
|
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
|
|
|
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
|
|
|
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
2016-03-09 21:55:23 +01:00
|
|
|
info: info.clone(),
|
2016-07-28 23:46:24 +02:00
|
|
|
block: bytes,
|
2016-08-05 17:00:46 +02:00
|
|
|
}, true);
|
2016-03-09 21:55:23 +01:00
|
|
|
|
|
|
|
ImportRoute::from(info)
|
2016-02-16 14:46:21 +01:00
|
|
|
}
|
2015-12-21 15:22:24 +01:00
|
|
|
|
2016-07-26 00:20:37 +02:00
|
|
|
/// Get inserted block info which is critical to prepare extras updates.
|
2016-08-03 22:03:40 +02:00
|
|
|
fn block_info(&self, header: &HeaderView) -> BlockInfo {
|
|
|
|
let hash = header.sha3();
|
2016-07-26 00:20:37 +02:00
|
|
|
let number = header.number();
|
|
|
|
let parent_hash = header.parent_hash();
|
|
|
|
let parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash));
|
|
|
|
let total_difficulty = parent_details.total_difficulty + header.difficulty();
|
|
|
|
let is_new_best = total_difficulty > self.best_block_total_difficulty();
|
|
|
|
|
|
|
|
BlockInfo {
|
|
|
|
hash: hash,
|
|
|
|
number: number,
|
|
|
|
total_difficulty: total_difficulty,
|
|
|
|
location: if is_new_best {
|
|
|
|
// on new best block we need to make sure that all ancestors
|
|
|
|
// are moved to "canon chain"
|
|
|
|
// find the route between old best block and the new one
|
|
|
|
let best_hash = self.best_block_hash();
|
|
|
|
let route = self.tree_route(best_hash, parent_hash);
|
|
|
|
|
|
|
|
assert_eq!(number, parent_details.number + 1);
|
|
|
|
|
|
|
|
match route.blocks.len() {
|
|
|
|
0 => BlockLocation::CanonChain,
|
|
|
|
_ => {
|
|
|
|
let retracted = route.blocks.iter().take(route.index).cloned().collect::<Vec<_>>().into_iter().collect::<Vec<_>>();
|
|
|
|
let enacted = route.blocks.into_iter().skip(route.index).collect::<Vec<_>>();
|
|
|
|
BlockLocation::BranchBecomingCanonChain(BranchBecomingCanonChainData {
|
|
|
|
ancestor: route.ancestor,
|
|
|
|
enacted: enacted,
|
|
|
|
retracted: retracted,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
BlockLocation::Branch
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-01 19:10:13 +02:00
|
|
|
/// Prepares extras update.
|
2016-08-05 17:00:46 +02:00
|
|
|
fn prepare_update(&self, batch: &DBTransaction, update: ExtrasUpdate, is_best: bool) {
|
2016-03-10 21:01:17 +01:00
|
|
|
{
|
2016-08-08 16:14:37 +02:00
|
|
|
let block_hashes: Vec<_> = update.block_details.keys().cloned().collect();
|
2016-04-18 18:15:03 +02:00
|
|
|
|
2016-07-13 19:59:59 +02:00
|
|
|
let mut write_details = self.block_details.write();
|
2016-07-28 23:46:24 +02:00
|
|
|
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_details, update.block_details, CacheUpdatePolicy::Overwrite);
|
2016-08-08 16:14:37 +02:00
|
|
|
|
|
|
|
for hash in block_hashes.into_iter() {
|
|
|
|
self.note_used(CacheID::BlockDetails(hash));
|
|
|
|
}
|
2016-03-10 21:01:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2016-07-13 19:59:59 +02:00
|
|
|
let mut write_receipts = self.block_receipts.write();
|
2016-07-28 23:46:24 +02:00
|
|
|
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_receipts, update.block_receipts, CacheUpdatePolicy::Remove);
|
2016-03-10 21:01:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2016-07-13 19:59:59 +02:00
|
|
|
let mut write_blocks_blooms = self.blocks_blooms.write();
|
2016-07-28 23:46:24 +02:00
|
|
|
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_blocks_blooms, update.blocks_blooms, CacheUpdatePolicy::Remove);
|
2016-03-10 21:01:17 +01:00
|
|
|
}
|
|
|
|
|
2016-07-17 23:03:29 +02:00
|
|
|
// These cached values must be updated last with all three locks taken to avoid
|
|
|
|
// cache decoherence
|
2016-02-29 19:49:29 +01:00
|
|
|
{
|
2016-08-01 19:10:13 +02:00
|
|
|
let mut best_block = self.pending_best_block.write();
|
2016-02-29 19:49:29 +01:00
|
|
|
// update best block
|
|
|
|
match update.info.location {
|
|
|
|
BlockLocation::Branch => (),
|
2016-08-05 17:00:46 +02:00
|
|
|
_ => if is_best {
|
2016-07-28 23:46:24 +02:00
|
|
|
batch.put(DB_COL_EXTRA, b"best", &update.info.hash).unwrap();
|
2016-08-01 19:10:13 +02:00
|
|
|
*best_block = Some(BestBlock {
|
2016-02-29 19:49:29 +01:00
|
|
|
hash: update.info.hash,
|
|
|
|
number: update.info.number,
|
2016-07-28 23:46:24 +02:00
|
|
|
total_difficulty: update.info.total_difficulty,
|
|
|
|
block: update.block.to_vec(),
|
2016-08-01 19:10:13 +02:00
|
|
|
});
|
2016-08-05 17:00:46 +02:00
|
|
|
},
|
2016-02-22 00:36:59 +01:00
|
|
|
}
|
2016-08-01 19:10:13 +02:00
|
|
|
let mut write_hashes = self.pending_block_hashes.write();
|
|
|
|
let mut write_txs = self.pending_transaction_addresses.write();
|
2016-07-14 19:16:01 +02:00
|
|
|
|
2016-08-03 10:35:04 +02:00
|
|
|
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Overwrite);
|
|
|
|
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Overwrite);
|
2016-03-15 10:59:58 +01:00
|
|
|
}
|
2015-12-21 15:22:24 +01:00
|
|
|
}
|
|
|
|
|
2016-08-01 19:10:13 +02:00
|
|
|
/// Applt pending insertion updates
|
|
|
|
pub fn commit(&self) {
|
|
|
|
let mut pending_best_block = self.pending_best_block.write();
|
|
|
|
let mut pending_write_hashes = self.pending_block_hashes.write();
|
|
|
|
let mut pending_write_txs = self.pending_transaction_addresses.write();
|
2016-08-08 13:47:00 +02:00
|
|
|
|
|
|
|
let mut best_block = self.best_block.write();
|
|
|
|
let mut write_hashes = self.block_hashes.write();
|
|
|
|
let mut write_txs = self.transaction_addresses.write();
|
2016-08-01 19:10:13 +02:00
|
|
|
// update best block
|
|
|
|
if let Some(block) = pending_best_block.take() {
|
|
|
|
*best_block = block;
|
|
|
|
}
|
|
|
|
|
2016-08-08 16:14:37 +02:00
|
|
|
let pending_hashes_keys: Vec<_> = pending_write_hashes.keys().cloned().collect();
|
|
|
|
let pending_txs_keys: Vec<_> = pending_write_txs.keys().cloned().collect();
|
|
|
|
|
2016-08-01 19:10:13 +02:00
|
|
|
write_hashes.extend(mem::replace(&mut *pending_write_hashes, HashMap::new()));
|
|
|
|
write_txs.extend(mem::replace(&mut *pending_write_txs, HashMap::new()));
|
2016-08-08 16:14:37 +02:00
|
|
|
|
|
|
|
for n in pending_hashes_keys.into_iter() {
|
|
|
|
self.note_used(CacheID::BlockHashes(n));
|
|
|
|
}
|
|
|
|
|
|
|
|
for hash in pending_txs_keys.into_iter() {
|
|
|
|
self.note_used(CacheID::TransactionAddresses(hash));
|
|
|
|
}
|
2016-08-01 19:10:13 +02:00
|
|
|
}
|
|
|
|
|
2016-03-02 18:32:54 +01:00
|
|
|
/// Iterator that lists `first` and then all of `first`'s ancestors, by hash.
|
2016-03-02 18:05:47 +01:00
|
|
|
pub fn ancestry_iter(&self, first: H256) -> Option<AncestryIter> {
|
2016-03-02 18:32:54 +01:00
|
|
|
if self.is_known(&first) {
|
|
|
|
Some(AncestryIter {
|
|
|
|
current: first,
|
2016-07-26 20:31:25 +02:00
|
|
|
chain: self,
|
2016-03-02 18:32:54 +01:00
|
|
|
})
|
|
|
|
} else {
|
|
|
|
None
|
2016-03-02 17:04:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-02 19:38:00 +01:00
|
|
|
/// Given a block's `parent`, find every block header which represents a valid possible uncle.
|
|
|
|
pub fn find_uncle_headers(&self, parent: &H256, uncle_generations: usize) -> Option<Vec<Header>> {
|
2016-05-24 21:56:17 +02:00
|
|
|
self.find_uncle_hashes(parent, uncle_generations).map(|v| v.into_iter().filter_map(|h| self.block_header(&h)).collect())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Given a block's `parent`, find every block hash which represents a valid possible uncle.
|
|
|
|
pub fn find_uncle_hashes(&self, parent: &H256, uncle_generations: usize) -> Option<Vec<H256>> {
|
2016-03-02 18:32:54 +01:00
|
|
|
if !self.is_known(parent) { return None; }
|
2016-03-02 19:38:00 +01:00
|
|
|
|
|
|
|
let mut excluded = HashSet::new();
|
2016-03-02 18:32:54 +01:00
|
|
|
for a in self.ancestry_iter(parent.clone()).unwrap().take(uncle_generations) {
|
2016-03-02 19:38:00 +01:00
|
|
|
excluded.extend(self.uncle_hashes(&a).unwrap().into_iter());
|
|
|
|
excluded.insert(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut ret = Vec::new();
|
|
|
|
for a in self.ancestry_iter(parent.clone()).unwrap().skip(1).take(uncle_generations) {
|
|
|
|
ret.extend(self.block_details(&a).unwrap().children.iter()
|
2016-05-24 21:56:17 +02:00
|
|
|
.filter(|h| !excluded.contains(h))
|
2016-03-02 19:38:00 +01:00
|
|
|
);
|
2016-03-02 18:32:54 +01:00
|
|
|
}
|
2016-03-02 19:38:00 +01:00
|
|
|
Some(ret)
|
2016-03-01 19:59:12 +01:00
|
|
|
}
|
|
|
|
|
2016-02-27 10:19:33 +01:00
|
|
|
/// This function returns modified block hashes.
|
|
|
|
fn prepare_block_hashes_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<BlockNumber, H256> {
|
|
|
|
let mut block_hashes = HashMap::new();
|
2016-02-27 01:37:12 +01:00
|
|
|
let block = BlockView::new(block_bytes);
|
|
|
|
let header = block.header_view();
|
|
|
|
let number = header.number();
|
|
|
|
|
|
|
|
match info.location {
|
2016-02-27 10:19:33 +01:00
|
|
|
BlockLocation::Branch => (),
|
2016-02-27 01:37:12 +01:00
|
|
|
BlockLocation::CanonChain => {
|
2016-02-27 10:19:33 +01:00
|
|
|
block_hashes.insert(number, info.hash.clone());
|
2016-02-27 01:37:12 +01:00
|
|
|
},
|
2016-04-17 17:18:25 +02:00
|
|
|
BlockLocation::BranchBecomingCanonChain(ref data) => {
|
2016-07-28 23:46:24 +02:00
|
|
|
let ancestor_number = self.block_number(&data.ancestor).expect("Block number of ancestor is always in DB");
|
2016-02-27 01:37:12 +01:00
|
|
|
let start_number = ancestor_number + 1;
|
|
|
|
|
2016-04-17 17:18:25 +02:00
|
|
|
for (index, hash) in data.enacted.iter().cloned().enumerate() {
|
2016-02-27 10:19:33 +01:00
|
|
|
block_hashes.insert(start_number + index as BlockNumber, hash);
|
2016-02-27 01:37:12 +01:00
|
|
|
}
|
|
|
|
|
2016-02-27 10:19:33 +01:00
|
|
|
block_hashes.insert(number, info.hash.clone());
|
2016-02-27 01:37:12 +01:00
|
|
|
}
|
2016-02-27 10:19:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
block_hashes
|
2016-02-27 01:37:12 +01:00
|
|
|
}
|
|
|
|
|
2016-02-27 10:19:33 +01:00
|
|
|
/// This function returns modified block details.
|
2016-08-05 17:00:46 +02:00
|
|
|
/// Uses the given parent details or attempts to load them from the database.
|
2016-02-27 10:19:33 +01:00
|
|
|
fn prepare_block_details_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<H256, BlockDetails> {
|
2016-02-27 01:37:12 +01:00
|
|
|
let block = BlockView::new(block_bytes);
|
|
|
|
let header = block.header_view();
|
2015-12-17 17:20:10 +01:00
|
|
|
let parent_hash = header.parent_hash();
|
2015-12-17 15:11:42 +01:00
|
|
|
|
2016-02-27 01:37:12 +01:00
|
|
|
// update parent
|
2016-07-19 09:23:53 +02:00
|
|
|
let mut parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash));
|
2016-02-27 01:37:12 +01:00
|
|
|
parent_details.children.push(info.hash.clone());
|
|
|
|
|
2015-12-17 15:11:42 +01:00
|
|
|
// create current block details
|
|
|
|
let details = BlockDetails {
|
|
|
|
number: header.number(),
|
2016-02-27 01:37:12 +01:00
|
|
|
total_difficulty: info.total_difficulty,
|
2015-12-17 17:20:10 +01:00
|
|
|
parent: parent_hash.clone(),
|
2015-12-17 15:11:42 +01:00
|
|
|
children: vec![]
|
|
|
|
};
|
2015-12-26 15:47:07 +01:00
|
|
|
|
2016-02-27 01:37:12 +01:00
|
|
|
// write to batch
|
2016-02-27 10:19:33 +01:00
|
|
|
let mut block_details = HashMap::new();
|
|
|
|
block_details.insert(parent_hash, parent_details);
|
|
|
|
block_details.insert(info.hash.clone(), details);
|
|
|
|
block_details
|
2016-02-27 01:37:12 +01:00
|
|
|
}
|
2015-12-17 15:11:42 +01:00
|
|
|
|
2016-02-27 10:19:33 +01:00
|
|
|
/// This function returns modified block receipts.
|
|
|
|
fn prepare_block_receipts_update(&self, receipts: Vec<Receipt>, info: &BlockInfo) -> HashMap<H256, BlockReceipts> {
|
|
|
|
let mut block_receipts = HashMap::new();
|
|
|
|
block_receipts.insert(info.hash.clone(), BlockReceipts::new(receipts));
|
|
|
|
block_receipts
|
2016-02-27 01:37:12 +01:00
|
|
|
}
|
2015-12-21 15:22:24 +01:00
|
|
|
|
2016-02-27 10:19:33 +01:00
|
|
|
/// This function returns modified transaction addresses.
|
|
|
|
fn prepare_transaction_addresses_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<H256, TransactionAddress> {
|
2016-02-27 01:37:12 +01:00
|
|
|
let block = BlockView::new(block_bytes);
|
2016-02-29 19:49:29 +01:00
|
|
|
let transaction_hashes = block.transaction_hashes();
|
2015-12-17 15:11:42 +01:00
|
|
|
|
2016-02-27 10:19:33 +01:00
|
|
|
transaction_hashes.into_iter()
|
|
|
|
.enumerate()
|
|
|
|
.fold(HashMap::new(), |mut acc, (i ,tx_hash)| {
|
|
|
|
acc.insert(tx_hash, TransactionAddress {
|
|
|
|
block_hash: info.hash.clone(),
|
|
|
|
index: i
|
|
|
|
});
|
|
|
|
acc
|
|
|
|
})
|
2016-02-27 01:37:12 +01:00
|
|
|
}
|
2016-02-12 14:03:23 +01:00
|
|
|
|
2016-02-27 10:19:33 +01:00
|
|
|
/// This functions returns modified blocks blooms.
|
2016-02-27 01:37:12 +01:00
|
|
|
///
|
2016-03-01 13:44:09 +01:00
|
|
|
/// To accelerate blooms lookups, blomms are stored in multiple
|
|
|
|
/// layers (BLOOM_LEVELS, currently 3).
|
2016-02-27 19:17:29 +01:00
|
|
|
/// ChainFilter is responsible for building and rebuilding these layers.
|
|
|
|
/// It returns them in HashMap, where values are Blooms and
|
|
|
|
/// keys are BloomIndexes. BloomIndex represents bloom location on one
|
|
|
|
/// of these layers.
|
2016-03-01 13:44:09 +01:00
|
|
|
///
|
2016-02-27 19:17:29 +01:00
|
|
|
/// To reduce number of queries to databse, block blooms are stored
|
2016-03-01 13:44:09 +01:00
|
|
|
/// in BlocksBlooms structure which contains info about several
|
2016-02-27 19:17:29 +01:00
|
|
|
/// (BLOOM_INDEX_SIZE, currently 16) consecutive blocks blooms.
|
2016-03-01 13:44:09 +01:00
|
|
|
///
|
2016-02-27 19:17:29 +01:00
|
|
|
/// Later, BloomIndexer is used to map bloom location on filter layer (BloomIndex)
|
|
|
|
/// to bloom location in database (BlocksBloomLocation).
|
2016-03-01 13:44:09 +01:00
|
|
|
///
|
2016-05-26 18:24:51 +02:00
|
|
|
fn prepare_block_blooms_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<LogGroupPosition, BloomGroup> {
|
2016-02-27 01:37:12 +01:00
|
|
|
let block = BlockView::new(block_bytes);
|
|
|
|
let header = block.header_view();
|
2016-02-12 02:03:04 +01:00
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
let log_blooms = match info.location {
|
2016-02-27 01:37:12 +01:00
|
|
|
BlockLocation::Branch => HashMap::new(),
|
|
|
|
BlockLocation::CanonChain => {
|
2016-05-26 18:24:51 +02:00
|
|
|
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
|
|
|
|
chain.insert(info.number as bc::Number, Bloom::from(header.log_bloom()).into())
|
2016-02-12 02:03:04 +01:00
|
|
|
},
|
2016-04-17 17:18:25 +02:00
|
|
|
BlockLocation::BranchBecomingCanonChain(ref data) => {
|
|
|
|
let ancestor_number = self.block_number(&data.ancestor).unwrap();
|
2016-01-10 22:55:07 +01:00
|
|
|
let start_number = ancestor_number + 1;
|
2016-05-26 18:24:51 +02:00
|
|
|
let range = start_number as bc::Number..self.best_block_number() as bc::Number;
|
2016-02-12 02:03:04 +01:00
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
let mut blooms: Vec<bc::Bloom> = data.enacted.iter()
|
2016-07-28 23:46:24 +02:00
|
|
|
.map(|hash| self.block_header_data(hash).unwrap())
|
|
|
|
.map(|bytes| HeaderView::new(&bytes).log_bloom())
|
2016-05-26 18:24:51 +02:00
|
|
|
.map(Bloom::from)
|
|
|
|
.map(Into::into)
|
2016-02-12 14:03:23 +01:00
|
|
|
.collect();
|
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
blooms.push(Bloom::from(header.log_bloom()).into());
|
2015-12-17 15:11:42 +01:00
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
|
|
|
|
chain.replace(&range, blooms)
|
2016-02-27 01:37:12 +01:00
|
|
|
}
|
|
|
|
};
|
2016-02-16 14:46:21 +01:00
|
|
|
|
2016-05-26 18:24:51 +02:00
|
|
|
log_blooms.into_iter()
|
|
|
|
.map(|p| (From::from(p.0), From::from(p.1)))
|
|
|
|
.collect()
|
2015-12-14 17:12:47 +01:00
|
|
|
}
|
|
|
|
|
2015-12-17 17:20:10 +01:00
|
|
|
/// Get best block hash.
|
2015-12-17 15:11:42 +01:00
|
|
|
pub fn best_block_hash(&self) -> H256 {
|
2016-07-13 19:59:59 +02:00
|
|
|
self.best_block.read().hash.clone()
|
2015-12-17 15:11:42 +01:00
|
|
|
}
|
|
|
|
|
2015-12-17 17:20:10 +01:00
|
|
|
/// Get best block number.
|
2016-01-11 01:07:58 +01:00
|
|
|
pub fn best_block_number(&self) -> BlockNumber {
|
2016-07-13 19:59:59 +02:00
|
|
|
self.best_block.read().number
|
2015-12-17 15:11:42 +01:00
|
|
|
}
|
|
|
|
|
2015-12-17 17:20:10 +01:00
|
|
|
/// Get best block total difficulty.
|
2015-12-17 15:11:42 +01:00
|
|
|
pub fn best_block_total_difficulty(&self) -> U256 {
|
2016-07-13 19:59:59 +02:00
|
|
|
self.best_block.read().total_difficulty
|
2015-12-16 17:39:15 +01:00
|
|
|
}
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
/// Get best block header
|
|
|
|
pub fn best_block_header(&self) -> Bytes {
|
|
|
|
let block = self.best_block.read();
|
|
|
|
BlockView::new(&block.block).header_view().rlp().as_raw().to_vec()
|
|
|
|
}
|
|
|
|
|
2015-12-17 17:20:10 +01:00
|
|
|
/// Get current cache size.
|
2015-12-16 17:39:15 +01:00
|
|
|
pub fn cache_size(&self) -> CacheSize {
|
|
|
|
CacheSize {
|
2016-07-28 23:46:24 +02:00
|
|
|
blocks: self.block_headers.read().heap_size_of_children() + self.block_bodies.read().heap_size_of_children(),
|
2016-07-13 19:59:59 +02:00
|
|
|
block_details: self.block_details.read().heap_size_of_children(),
|
|
|
|
transaction_addresses: self.transaction_addresses.read().heap_size_of_children(),
|
|
|
|
blocks_blooms: self.blocks_blooms.read().heap_size_of_children(),
|
|
|
|
block_receipts: self.block_receipts.read().heap_size_of_children(),
|
2015-12-16 17:39:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-18 19:23:28 +01:00
|
|
|
/// Let the cache system know that a cacheable item has been used.
|
2016-01-18 15:48:38 +01:00
|
|
|
fn note_used(&self, id: CacheID) {
|
2016-07-13 19:59:59 +02:00
|
|
|
let mut cache_man = self.cache_man.write();
|
2016-07-31 00:19:27 +02:00
|
|
|
cache_man.note_used(id);
|
2016-01-18 15:48:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Ticks our cache system and throws out any old data.
|
2016-02-02 01:59:14 +01:00
|
|
|
pub fn collect_garbage(&self) {
|
2016-08-08 16:14:37 +02:00
|
|
|
let current_size = self.cache_size().total();
|
2016-07-31 00:19:27 +02:00
|
|
|
|
2016-08-08 16:14:37 +02:00
|
|
|
let mut block_headers = self.block_headers.write();
|
|
|
|
let mut block_bodies = self.block_bodies.write();
|
|
|
|
let mut block_details = self.block_details.write();
|
|
|
|
let mut block_hashes = self.block_hashes.write();
|
|
|
|
let mut transaction_addresses = self.transaction_addresses.write();
|
|
|
|
let mut blocks_blooms = self.blocks_blooms.write();
|
|
|
|
let mut block_receipts = self.block_receipts.write();
|
|
|
|
|
|
|
|
let mut cache_man = self.cache_man.write();
|
|
|
|
cache_man.collect_garbage(current_size, | ids | {
|
2016-07-31 00:19:27 +02:00
|
|
|
for id in &ids {
|
|
|
|
match *id {
|
|
|
|
CacheID::BlockHeader(ref h) => { block_headers.remove(h); },
|
|
|
|
CacheID::BlockBody(ref h) => { block_bodies.remove(h); },
|
|
|
|
CacheID::BlockDetails(ref h) => { block_details.remove(h); }
|
|
|
|
CacheID::BlockHashes(ref h) => { block_hashes.remove(h); }
|
|
|
|
CacheID::TransactionAddresses(ref h) => { transaction_addresses.remove(h); }
|
|
|
|
CacheID::BlocksBlooms(ref h) => { blocks_blooms.remove(h); }
|
|
|
|
CacheID::BlockReceipts(ref h) => { block_receipts.remove(h); }
|
2016-02-02 01:59:14 +01:00
|
|
|
}
|
2016-01-18 19:23:28 +01:00
|
|
|
}
|
2016-08-08 16:14:37 +02:00
|
|
|
|
2016-07-31 00:19:27 +02:00
|
|
|
block_headers.shrink_to_fit();
|
|
|
|
block_bodies.shrink_to_fit();
|
|
|
|
block_details.shrink_to_fit();
|
|
|
|
block_hashes.shrink_to_fit();
|
|
|
|
transaction_addresses.shrink_to_fit();
|
|
|
|
blocks_blooms.shrink_to_fit();
|
|
|
|
block_receipts.shrink_to_fit();
|
2016-08-08 16:14:37 +02:00
|
|
|
|
|
|
|
block_headers.heap_size_of_children() +
|
|
|
|
block_bodies.heap_size_of_children() +
|
|
|
|
block_details.heap_size_of_children() +
|
|
|
|
block_hashes.heap_size_of_children() +
|
|
|
|
transaction_addresses.heap_size_of_children() +
|
|
|
|
blocks_blooms.heap_size_of_children() +
|
|
|
|
block_receipts.heap_size_of_children()
|
2016-07-31 00:19:27 +02:00
|
|
|
});
|
2016-01-18 15:48:38 +01:00
|
|
|
}
|
2016-07-28 23:46:24 +02:00
|
|
|
|
|
|
|
/// Create a block body from a block.
|
|
|
|
pub fn block_to_body(block: &[u8]) -> Bytes {
|
|
|
|
let mut body = RlpStream::new_list(2);
|
|
|
|
let block_rlp = Rlp::new(block);
|
|
|
|
body.append_raw(block_rlp.at(1).as_raw(), 1);
|
|
|
|
body.append_raw(block_rlp.at(2).as_raw(), 1);
|
|
|
|
body.out()
|
|
|
|
}
|
2015-12-09 19:03:25 +01:00
|
|
|
}
|
2015-12-13 22:39:01 +01:00
|
|
|
|
2015-12-17 17:20:10 +01:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2016-04-06 10:07:24 +02:00
|
|
|
#![cfg_attr(feature="dev", allow(similar_names))]
|
2015-12-17 17:20:10 +01:00
|
|
|
use std::str::FromStr;
|
2016-07-28 23:46:24 +02:00
|
|
|
use std::sync::Arc;
|
2015-12-17 17:20:10 +01:00
|
|
|
use rustc_serialize::hex::FromHex;
|
2016-07-28 23:46:24 +02:00
|
|
|
use util::{Database, DatabaseConfig};
|
2015-12-17 17:20:10 +01:00
|
|
|
use util::hash::*;
|
2016-03-01 13:44:09 +01:00
|
|
|
use util::sha3::Hashable;
|
2016-07-28 23:46:24 +02:00
|
|
|
use receipt::Receipt;
|
2016-05-26 18:24:51 +02:00
|
|
|
use blockchain::{BlockProvider, BlockChain, Config, ImportRoute};
|
2016-01-27 16:41:50 +01:00
|
|
|
use tests::helpers::*;
|
2016-02-19 15:18:20 +01:00
|
|
|
use devtools::*;
|
2016-03-02 04:25:03 +01:00
|
|
|
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
|
2016-03-01 13:44:09 +01:00
|
|
|
use views::BlockView;
|
2016-07-28 23:46:24 +02:00
|
|
|
use client;
|
|
|
|
|
|
|
|
fn new_db(path: &str) -> Arc<Database> {
|
|
|
|
Arc::new(Database::open(&DatabaseConfig::with_columns(client::DB_NO_OF_COLUMNS), path).unwrap())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn should_cache_best_block() {
|
|
|
|
// given
|
|
|
|
let mut canon_chain = ChainGenerator::default();
|
|
|
|
let mut finalizer = BlockFinalizer::default();
|
|
|
|
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let first = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
|
|
|
|
let temp = RandomTempPath::new();
|
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
|
|
|
assert_eq!(bc.best_block_number(), 0);
|
|
|
|
|
|
|
|
// when
|
|
|
|
let batch = db.transaction();
|
|
|
|
bc.insert_block(&batch, &first, vec![]);
|
2016-08-01 19:10:13 +02:00
|
|
|
assert_eq!(bc.best_block_number(), 0);
|
|
|
|
bc.commit();
|
2016-07-28 23:46:24 +02:00
|
|
|
// NOTE no db.write here (we want to check if best block is cached)
|
|
|
|
|
|
|
|
// then
|
|
|
|
assert_eq!(bc.best_block_number(), 1);
|
|
|
|
assert!(bc.block(&bc.best_block_hash()).is_some(), "Best block should be queryable even without DB write.");
|
|
|
|
}
|
2015-12-17 17:20:10 +01:00
|
|
|
|
|
|
|
#[test]
|
2016-03-01 13:44:09 +01:00
|
|
|
fn basic_blockchain_insert() {
|
|
|
|
let mut canon_chain = ChainGenerator::default();
|
2016-03-02 04:25:03 +01:00
|
|
|
let mut finalizer = BlockFinalizer::default();
|
|
|
|
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let first = canon_chain.generate(&mut finalizer).unwrap();
|
2016-03-01 13:44:09 +01:00
|
|
|
let genesis_hash = BlockView::new(&genesis).header_view().sha3();
|
|
|
|
let first_hash = BlockView::new(&first).header_view().sha3();
|
2015-12-17 17:20:10 +01:00
|
|
|
|
2016-01-27 16:41:50 +01:00
|
|
|
let temp = RandomTempPath::new();
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
2015-12-26 15:47:07 +01:00
|
|
|
|
2015-12-17 20:37:04 +01:00
|
|
|
assert_eq!(bc.genesis_hash(), genesis_hash.clone());
|
|
|
|
assert_eq!(bc.best_block_hash(), genesis_hash.clone());
|
2016-01-10 22:55:07 +01:00
|
|
|
assert_eq!(bc.block_hash(0), Some(genesis_hash.clone()));
|
|
|
|
assert_eq!(bc.block_hash(1), None);
|
2016-02-12 00:40:45 +01:00
|
|
|
assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![]);
|
2015-12-17 17:20:10 +01:00
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
let batch = db.transaction();
|
|
|
|
bc.insert_block(&batch, &first, vec![]);
|
|
|
|
db.write(batch).unwrap();
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2015-12-17 17:20:10 +01:00
|
|
|
|
2016-01-10 22:55:07 +01:00
|
|
|
assert_eq!(bc.block_hash(0), Some(genesis_hash.clone()));
|
|
|
|
assert_eq!(bc.best_block_number(), 1);
|
2015-12-17 20:37:04 +01:00
|
|
|
assert_eq!(bc.best_block_hash(), first_hash.clone());
|
2016-01-10 22:55:07 +01:00
|
|
|
assert_eq!(bc.block_hash(1), Some(first_hash.clone()));
|
2015-12-17 20:37:04 +01:00
|
|
|
assert_eq!(bc.block_details(&first_hash).unwrap().parent, genesis_hash.clone());
|
|
|
|
assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![first_hash.clone()]);
|
2016-01-10 22:55:07 +01:00
|
|
|
assert_eq!(bc.block_hash(2), None);
|
2015-12-17 17:20:10 +01:00
|
|
|
}
|
2015-12-21 16:31:51 +01:00
|
|
|
|
2016-03-02 17:31:42 +01:00
|
|
|
#[test]
|
|
|
|
fn check_ancestry_iter() {
|
|
|
|
let mut canon_chain = ChainGenerator::default();
|
|
|
|
let mut finalizer = BlockFinalizer::default();
|
|
|
|
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let genesis_hash = BlockView::new(&genesis).header_view().sha3();
|
|
|
|
|
|
|
|
let temp = RandomTempPath::new();
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
2016-03-02 17:31:42 +01:00
|
|
|
|
|
|
|
let mut block_hashes = vec![genesis_hash.clone()];
|
2016-07-28 23:46:24 +02:00
|
|
|
let batch = db.transaction();
|
2016-03-02 17:31:42 +01:00
|
|
|
for _ in 0..10 {
|
|
|
|
let block = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
block_hashes.push(BlockView::new(&block).header_view().sha3());
|
2016-07-28 23:46:24 +02:00
|
|
|
bc.insert_block(&batch, &block, vec![]);
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2016-03-02 17:31:42 +01:00
|
|
|
}
|
2016-07-28 23:46:24 +02:00
|
|
|
db.write(batch).unwrap();
|
2016-03-02 17:31:42 +01:00
|
|
|
|
|
|
|
block_hashes.reverse();
|
|
|
|
|
2016-03-02 18:32:54 +01:00
|
|
|
assert_eq!(bc.ancestry_iter(block_hashes[0].clone()).unwrap().collect::<Vec<_>>(), block_hashes)
|
2016-03-02 17:31:42 +01:00
|
|
|
}
|
|
|
|
|
2015-12-21 16:31:51 +01:00
|
|
|
#[test]
|
2016-03-11 11:16:49 +01:00
|
|
|
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
2016-03-02 23:41:15 +01:00
|
|
|
fn test_find_uncles() {
|
2016-03-01 16:22:06 +01:00
|
|
|
let mut canon_chain = ChainGenerator::default();
|
2016-03-02 23:41:15 +01:00
|
|
|
let mut finalizer = BlockFinalizer::default();
|
|
|
|
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let b1b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
|
|
|
|
let b1a = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let b2b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
|
|
|
|
let b2a = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let b3b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
|
|
|
|
let b3a = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let b4b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
|
|
|
|
let b4a = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let b5b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
|
|
|
|
let b5a = canon_chain.generate(&mut finalizer).unwrap();
|
2016-03-01 16:22:06 +01:00
|
|
|
|
2016-03-02 23:41:15 +01:00
|
|
|
let temp = RandomTempPath::new();
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
|
|
|
|
|
|
|
let batch = db.transaction();
|
2016-08-03 19:01:48 +02:00
|
|
|
for b in &[&b1a, &b1b, &b2a, &b2b, &b3a, &b3b, &b4a, &b4b, &b5a, &b5b] {
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.insert_block(&batch, b, vec![]);
|
|
|
|
bc.commit();
|
|
|
|
}
|
2016-07-28 23:46:24 +02:00
|
|
|
bc.insert_block(&batch, &b1b, vec![]);
|
|
|
|
bc.insert_block(&batch, &b2a, vec![]);
|
|
|
|
bc.insert_block(&batch, &b2b, vec![]);
|
|
|
|
bc.insert_block(&batch, &b3a, vec![]);
|
|
|
|
bc.insert_block(&batch, &b3b, vec![]);
|
|
|
|
bc.insert_block(&batch, &b4a, vec![]);
|
|
|
|
bc.insert_block(&batch, &b4b, vec![]);
|
|
|
|
bc.insert_block(&batch, &b5a, vec![]);
|
|
|
|
bc.insert_block(&batch, &b5b, vec![]);
|
|
|
|
db.write(batch).unwrap();
|
2016-03-02 23:41:15 +01:00
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
[&b4b, &b3b, &b2b].iter().map(|b| BlockView::new(b).header()).collect::<Vec<_>>(),
|
|
|
|
bc.find_uncle_headers(&BlockView::new(&b4a).header_view().sha3(), 3).unwrap()
|
|
|
|
);
|
|
|
|
|
|
|
|
// TODO: insert block that already includes one of them as an uncle to check it's not allowed.
|
|
|
|
}
|
|
|
|
|
2015-12-21 16:31:51 +01:00
|
|
|
#[test]
|
2016-03-11 11:16:49 +01:00
|
|
|
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
2015-12-21 16:31:51 +01:00
|
|
|
fn test_small_fork() {
|
2016-03-01 16:22:06 +01:00
|
|
|
let mut canon_chain = ChainGenerator::default();
|
2016-03-02 04:25:03 +01:00
|
|
|
let mut finalizer = BlockFinalizer::default();
|
|
|
|
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let b1 = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let b2 = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let b3b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
|
|
|
|
let b3a = canon_chain.generate(&mut finalizer).unwrap();
|
2016-03-01 16:22:06 +01:00
|
|
|
|
|
|
|
let genesis_hash = BlockView::new(&genesis).header_view().sha3();
|
|
|
|
let b1_hash= BlockView::new(&b1).header_view().sha3();
|
|
|
|
let b2_hash= BlockView::new(&b2).header_view().sha3();
|
|
|
|
let b3a_hash= BlockView::new(&b3a).header_view().sha3();
|
|
|
|
let b3b_hash= BlockView::new(&b3b).header_view().sha3();
|
2015-12-21 16:31:51 +01:00
|
|
|
|
|
|
|
// b3a is a part of canon chain, whereas b3b is part of sidechain
|
2016-03-01 16:22:06 +01:00
|
|
|
let best_block_hash = b3a_hash.clone();
|
2015-12-21 16:31:51 +01:00
|
|
|
|
2016-01-27 16:41:50 +01:00
|
|
|
let temp = RandomTempPath::new();
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
|
|
|
|
|
|
|
let batch = db.transaction();
|
|
|
|
let ir1 = bc.insert_block(&batch, &b1, vec![]);
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2016-07-28 23:46:24 +02:00
|
|
|
let ir2 = bc.insert_block(&batch, &b2, vec![]);
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2016-07-28 23:46:24 +02:00
|
|
|
let ir3b = bc.insert_block(&batch, &b3b, vec![]);
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2016-07-28 23:46:24 +02:00
|
|
|
db.write(batch).unwrap();
|
2016-08-03 10:35:04 +02:00
|
|
|
assert_eq!(bc.block_hash(3).unwrap(), b3b_hash);
|
2016-07-28 23:46:24 +02:00
|
|
|
let batch = db.transaction();
|
|
|
|
let ir3a = bc.insert_block(&batch, &b3a, vec![]);
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2016-07-28 23:46:24 +02:00
|
|
|
db.write(batch).unwrap();
|
2016-03-09 21:55:23 +01:00
|
|
|
|
|
|
|
assert_eq!(ir1, ImportRoute {
|
2016-03-10 10:17:17 +01:00
|
|
|
enacted: vec![b1_hash],
|
|
|
|
retracted: vec![],
|
2016-04-24 23:16:06 +02:00
|
|
|
omitted: vec![],
|
2016-03-09 21:55:23 +01:00
|
|
|
});
|
|
|
|
|
|
|
|
assert_eq!(ir2, ImportRoute {
|
2016-03-10 10:17:17 +01:00
|
|
|
enacted: vec![b2_hash],
|
|
|
|
retracted: vec![],
|
2016-04-24 23:16:06 +02:00
|
|
|
omitted: vec![],
|
2016-03-09 21:55:23 +01:00
|
|
|
});
|
|
|
|
|
|
|
|
assert_eq!(ir3b, ImportRoute {
|
2016-03-10 10:17:17 +01:00
|
|
|
enacted: vec![b3b_hash],
|
|
|
|
retracted: vec![],
|
2016-04-24 23:16:06 +02:00
|
|
|
omitted: vec![],
|
2016-03-09 21:55:23 +01:00
|
|
|
});
|
|
|
|
|
|
|
|
assert_eq!(ir3a, ImportRoute {
|
2016-03-10 10:17:17 +01:00
|
|
|
enacted: vec![b3a_hash],
|
|
|
|
retracted: vec![b3b_hash],
|
2016-04-24 23:16:06 +02:00
|
|
|
omitted: vec![],
|
2016-03-09 21:55:23 +01:00
|
|
|
});
|
2015-12-21 16:31:51 +01:00
|
|
|
|
|
|
|
assert_eq!(bc.best_block_hash(), best_block_hash);
|
2016-01-10 22:55:07 +01:00
|
|
|
assert_eq!(bc.block_number(&genesis_hash).unwrap(), 0);
|
|
|
|
assert_eq!(bc.block_number(&b1_hash).unwrap(), 1);
|
|
|
|
assert_eq!(bc.block_number(&b2_hash).unwrap(), 2);
|
|
|
|
assert_eq!(bc.block_number(&b3a_hash).unwrap(), 3);
|
|
|
|
assert_eq!(bc.block_number(&b3b_hash).unwrap(), 3);
|
|
|
|
|
|
|
|
assert_eq!(bc.block_hash(0).unwrap(), genesis_hash);
|
|
|
|
assert_eq!(bc.block_hash(1).unwrap(), b1_hash);
|
|
|
|
assert_eq!(bc.block_hash(2).unwrap(), b2_hash);
|
|
|
|
assert_eq!(bc.block_hash(3).unwrap(), b3a_hash);
|
2015-12-21 16:31:51 +01:00
|
|
|
|
|
|
|
// test trie route
|
2016-02-27 01:37:12 +01:00
|
|
|
let r0_1 = bc.tree_route(genesis_hash.clone(), b1_hash.clone());
|
2015-12-21 16:31:51 +01:00
|
|
|
assert_eq!(r0_1.ancestor, genesis_hash);
|
|
|
|
assert_eq!(r0_1.blocks, [b1_hash.clone()]);
|
|
|
|
assert_eq!(r0_1.index, 0);
|
|
|
|
|
2016-02-27 01:37:12 +01:00
|
|
|
let r0_2 = bc.tree_route(genesis_hash.clone(), b2_hash.clone());
|
2015-12-21 16:31:51 +01:00
|
|
|
assert_eq!(r0_2.ancestor, genesis_hash);
|
|
|
|
assert_eq!(r0_2.blocks, [b1_hash.clone(), b2_hash.clone()]);
|
|
|
|
assert_eq!(r0_2.index, 0);
|
|
|
|
|
2016-02-27 01:37:12 +01:00
|
|
|
let r1_3a = bc.tree_route(b1_hash.clone(), b3a_hash.clone());
|
2015-12-21 16:31:51 +01:00
|
|
|
assert_eq!(r1_3a.ancestor, b1_hash);
|
|
|
|
assert_eq!(r1_3a.blocks, [b2_hash.clone(), b3a_hash.clone()]);
|
|
|
|
assert_eq!(r1_3a.index, 0);
|
|
|
|
|
2016-02-27 01:37:12 +01:00
|
|
|
let r1_3b = bc.tree_route(b1_hash.clone(), b3b_hash.clone());
|
2015-12-21 16:31:51 +01:00
|
|
|
assert_eq!(r1_3b.ancestor, b1_hash);
|
|
|
|
assert_eq!(r1_3b.blocks, [b2_hash.clone(), b3b_hash.clone()]);
|
|
|
|
assert_eq!(r1_3b.index, 0);
|
|
|
|
|
2016-02-27 01:37:12 +01:00
|
|
|
let r3a_3b = bc.tree_route(b3a_hash.clone(), b3b_hash.clone());
|
2015-12-21 16:31:51 +01:00
|
|
|
assert_eq!(r3a_3b.ancestor, b2_hash);
|
|
|
|
assert_eq!(r3a_3b.blocks, [b3a_hash.clone(), b3b_hash.clone()]);
|
|
|
|
assert_eq!(r3a_3b.index, 1);
|
|
|
|
|
2016-02-27 01:37:12 +01:00
|
|
|
let r1_0 = bc.tree_route(b1_hash.clone(), genesis_hash.clone());
|
2015-12-21 16:31:51 +01:00
|
|
|
assert_eq!(r1_0.ancestor, genesis_hash);
|
|
|
|
assert_eq!(r1_0.blocks, [b1_hash.clone()]);
|
|
|
|
assert_eq!(r1_0.index, 1);
|
|
|
|
|
2016-02-27 01:37:12 +01:00
|
|
|
let r2_0 = bc.tree_route(b2_hash.clone(), genesis_hash.clone());
|
2015-12-21 16:31:51 +01:00
|
|
|
assert_eq!(r2_0.ancestor, genesis_hash);
|
|
|
|
assert_eq!(r2_0.blocks, [b2_hash.clone(), b1_hash.clone()]);
|
|
|
|
assert_eq!(r2_0.index, 2);
|
2015-12-26 15:47:07 +01:00
|
|
|
|
2016-02-27 01:37:12 +01:00
|
|
|
let r3a_1 = bc.tree_route(b3a_hash.clone(), b1_hash.clone());
|
2015-12-21 16:31:51 +01:00
|
|
|
assert_eq!(r3a_1.ancestor, b1_hash);
|
|
|
|
assert_eq!(r3a_1.blocks, [b3a_hash.clone(), b2_hash.clone()]);
|
|
|
|
assert_eq!(r3a_1.index, 2);
|
|
|
|
|
2016-02-27 01:37:12 +01:00
|
|
|
let r3b_1 = bc.tree_route(b3b_hash.clone(), b1_hash.clone());
|
2015-12-21 16:31:51 +01:00
|
|
|
assert_eq!(r3b_1.ancestor, b1_hash);
|
|
|
|
assert_eq!(r3b_1.blocks, [b3b_hash.clone(), b2_hash.clone()]);
|
|
|
|
assert_eq!(r3b_1.index, 2);
|
|
|
|
|
2016-02-27 01:37:12 +01:00
|
|
|
let r3b_3a = bc.tree_route(b3b_hash.clone(), b3a_hash.clone());
|
2015-12-21 16:31:51 +01:00
|
|
|
assert_eq!(r3b_3a.ancestor, b2_hash);
|
|
|
|
assert_eq!(r3b_3a.blocks, [b3b_hash.clone(), b3a_hash.clone()]);
|
|
|
|
assert_eq!(r3b_3a.index, 1);
|
|
|
|
}
|
2015-12-21 16:38:31 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_reopen_blockchain_db() {
|
2016-03-02 04:25:03 +01:00
|
|
|
let mut canon_chain = ChainGenerator::default();
|
|
|
|
let mut finalizer = BlockFinalizer::default();
|
|
|
|
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let first = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let genesis_hash = BlockView::new(&genesis).header_view().sha3();
|
|
|
|
let first_hash = BlockView::new(&first).header_view().sha3();
|
2015-12-21 16:38:31 +01:00
|
|
|
|
2016-01-27 16:41:50 +01:00
|
|
|
let temp = RandomTempPath::new();
|
2015-12-21 16:38:31 +01:00
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
2015-12-21 16:38:31 +01:00
|
|
|
assert_eq!(bc.best_block_hash(), genesis_hash);
|
2016-07-28 23:46:24 +02:00
|
|
|
let batch = db.transaction();
|
|
|
|
bc.insert_block(&batch, &first, vec![]);
|
|
|
|
db.write(batch).unwrap();
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2016-03-02 04:25:03 +01:00
|
|
|
assert_eq!(bc.best_block_hash(), first_hash);
|
2015-12-21 16:38:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
|
|
|
|
2016-03-02 04:25:03 +01:00
|
|
|
assert_eq!(bc.best_block_hash(), first_hash);
|
2015-12-21 16:38:31 +01:00
|
|
|
}
|
|
|
|
}
|
2016-01-27 17:32:53 +01:00
|
|
|
|
|
|
|
#[test]
|
2016-01-27 18:31:14 +01:00
|
|
|
fn can_contain_arbitrary_block_sequence() {
|
2016-01-28 15:38:42 +01:00
|
|
|
let bc_result = generate_dummy_blockchain(50);
|
|
|
|
let bc = bc_result.reference();
|
2016-01-27 18:31:14 +01:00
|
|
|
assert_eq!(bc.best_block_number(), 49);
|
2016-01-27 17:32:53 +01:00
|
|
|
}
|
2016-01-28 11:55:03 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn can_collect_garbage() {
|
2016-01-28 15:38:42 +01:00
|
|
|
let bc_result = generate_dummy_blockchain(3000);
|
|
|
|
let bc = bc_result.reference();
|
|
|
|
|
2016-01-28 11:55:03 +01:00
|
|
|
assert_eq!(bc.best_block_number(), 2999);
|
|
|
|
let best_hash = bc.best_block_hash();
|
|
|
|
let mut block_header = bc.block_header(&best_hash);
|
|
|
|
|
|
|
|
while !block_header.is_none() {
|
|
|
|
block_header = bc.block_header(&block_header.unwrap().parent_hash);
|
|
|
|
}
|
|
|
|
assert!(bc.cache_size().blocks > 1024 * 1024);
|
|
|
|
|
2016-01-29 16:28:13 +01:00
|
|
|
for _ in 0..2 {
|
2016-02-02 01:59:14 +01:00
|
|
|
bc.collect_garbage();
|
2016-01-29 16:28:13 +01:00
|
|
|
}
|
2016-01-28 11:55:03 +01:00
|
|
|
assert!(bc.cache_size().blocks < 1024 * 1024);
|
|
|
|
}
|
2016-01-28 15:38:42 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn can_contain_arbitrary_block_sequence_with_extra() {
|
|
|
|
let bc_result = generate_dummy_blockchain_with_extra(25);
|
|
|
|
let bc = bc_result.reference();
|
|
|
|
assert_eq!(bc.best_block_number(), 24);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn can_contain_only_genesis_block() {
|
|
|
|
let bc_result = generate_dummy_empty_blockchain();
|
|
|
|
let bc = bc_result.reference();
|
|
|
|
assert_eq!(bc.best_block_number(), 0);
|
|
|
|
}
|
2016-02-08 15:53:22 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn find_transaction_by_hash() {
|
|
|
|
let genesis = "f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0af81e09f8c46ca322193edfda764fa7e88e81923f802f1d325ec0b0308ac2cd0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200008083023e38808454c98c8142a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421880102030405060708c0c0".from_hex().unwrap();
|
|
|
|
let b1 = "f904a8f901faa0ce1f26f798dd03c8782d63b3e42e79a64eaea5694ea686ac5d7ce3df5171d1aea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0a65c2364cd0f1542d761823dc0109c6b072f14c20459598c5455c274601438f4a070616ebd7ad2ed6fb7860cf7e9df00163842351c38a87cac2c1cb193895035a2a05c5b4fc43c2d45787f54e1ae7d27afdb4ad16dfc567c5692070d5c4556e0b1d7b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000183023ec683021536845685109780a029f07836e4e59229b3a065913afc27702642c683bba689910b2b2fd45db310d3888957e6d004a31802f902a7f85f800a8255f094aaaf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca0575da4e21b66fa764be5f74da9389e67693d066fb0d1312e19e17e501da00ecda06baf5a5327595f6619dfc2fcb3f2e6fb410b5810af3cb52d0e7508038e91a188f85f010a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba04fa966bf34b93abc1bcd665554b7f316b50f928477b50be0f3285ead29d18c5ba017bba0eeec1625ab433746955e125d46d80b7fdc97386c51266f842d8e02192ef85f020a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca004377418ae981cc32b1312b4a427a1d69a821b28db8584f5f2bd8c6d42458adaa053a1dba1af177fac92f3b6af0a9fa46a22adf56e686c93794b6a012bf254abf5f85f030a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ca04fe13febd28a05f4fcb2f451d7ddc2dda56486d9f8c79a62b0ba4da775122615a0651b2382dd402df9ebc27f8cb4b2e0f3cea68dda2dca0ee9603608f0b6f51668f85f040a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba078e6a0ba086a08f8450e208a399bb2f2d2a0d984acd2517c7c7df66ccfab567da013254002cd45a97fac049ae00afbc43ed0d9961d0c56a3b2382c80ce41c198ddf85f050a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba0a7174d8f43ea71c8e3ca9477691add8d80ac8e0ed89d8d8b572041eef81f4a54a0534ea2e28ec4da3b5b944b18c51ec84a5cf35f5b3343c5fb86521fd2d388f506f85f060a82520894bbbf5374fce5edbc8e2a8697c15331677e6ebf0b0a801ba034bd04065833536a10c77ee2a43a5371bc6d34837088b861dd9d4b7f44074b59a078807715786a13876d3455716a6b9cb2186b7a4887a5c31160fc877454958616c0".from_hex().unwrap();
|
|
|
|
let b1_hash = H256::from_str("f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3").unwrap();
|
|
|
|
|
|
|
|
let temp = RandomTempPath::new();
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
|
|
|
let batch = db.transaction();
|
|
|
|
bc.insert_block(&batch, &b1, vec![]);
|
|
|
|
db.write(batch).unwrap();
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2016-03-01 13:44:09 +01:00
|
|
|
|
2016-02-08 15:53:22 +01:00
|
|
|
let transactions = bc.transactions(&b1_hash).unwrap();
|
|
|
|
assert_eq!(transactions.len(), 7);
|
|
|
|
for t in transactions {
|
2016-02-10 22:16:25 +01:00
|
|
|
assert_eq!(bc.transaction(&bc.transaction_address(&t.hash()).unwrap()).unwrap(), t);
|
2016-02-08 15:53:22 +01:00
|
|
|
}
|
|
|
|
}
|
2016-02-16 11:41:34 +01:00
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
fn insert_block(db: &Arc<Database>, bc: &BlockChain, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute {
|
|
|
|
let batch = db.transaction();
|
|
|
|
let res = bc.insert_block(&batch, bytes, receipts);
|
|
|
|
db.write(batch).unwrap();
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2016-07-28 23:46:24 +02:00
|
|
|
res
|
|
|
|
}
|
|
|
|
|
2016-02-16 11:41:34 +01:00
|
|
|
#[test]
|
|
|
|
fn test_bloom_filter_simple() {
|
2016-03-02 04:25:03 +01:00
|
|
|
// TODO: From here
|
2016-02-16 16:54:58 +01:00
|
|
|
let bloom_b1 = H2048::from_str("00000020000000000000000000000000000000000000000002000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000400000000000000000000002000").unwrap();
|
|
|
|
|
|
|
|
let bloom_b2 = H2048::from_str("00000000000000000000000000000000000000000000020000001000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap();
|
2016-02-16 11:41:34 +01:00
|
|
|
|
2016-02-22 09:12:15 +01:00
|
|
|
let bloom_ba = H2048::from_str("00000000000000000000000000000000000000000000020000000800000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap();
|
|
|
|
|
2016-03-02 04:25:03 +01:00
|
|
|
let mut canon_chain = ChainGenerator::default();
|
|
|
|
let mut finalizer = BlockFinalizer::default();
|
|
|
|
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let mut fork = canon_chain.fork(1);
|
|
|
|
let mut fork_finalizer = finalizer.fork();
|
|
|
|
let b1 = fork.with_bloom(bloom_b1.clone()).generate(&mut fork_finalizer).unwrap();
|
|
|
|
let b2 = fork.with_bloom(bloom_b2.clone()).generate(&mut fork_finalizer).unwrap();
|
|
|
|
let b3 = fork.with_bloom(bloom_ba.clone()).generate(&mut fork_finalizer).unwrap();
|
|
|
|
let b1a = canon_chain.with_bloom(bloom_ba.clone()).generate(&mut finalizer).unwrap();
|
|
|
|
let b2a = canon_chain.with_bloom(bloom_ba.clone()).generate(&mut finalizer).unwrap();
|
|
|
|
|
2016-02-16 11:41:34 +01:00
|
|
|
let temp = RandomTempPath::new();
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
2016-02-16 16:54:58 +01:00
|
|
|
|
2016-02-22 09:54:56 +01:00
|
|
|
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
|
|
|
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
2016-02-16 16:54:58 +01:00
|
|
|
assert_eq!(blocks_b1, vec![]);
|
|
|
|
assert_eq!(blocks_b2, vec![]);
|
2016-03-01 13:44:09 +01:00
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
insert_block(&db, &bc, &b1, vec![]);
|
2016-02-22 09:54:56 +01:00
|
|
|
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
|
|
|
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
2016-02-16 16:54:58 +01:00
|
|
|
assert_eq!(blocks_b1, vec![1]);
|
|
|
|
assert_eq!(blocks_b2, vec![]);
|
2016-02-16 14:46:21 +01:00
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
insert_block(&db, &bc, &b2, vec![]);
|
2016-02-22 09:54:56 +01:00
|
|
|
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
|
|
|
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
2016-02-16 16:54:58 +01:00
|
|
|
assert_eq!(blocks_b1, vec![1]);
|
|
|
|
assert_eq!(blocks_b2, vec![2]);
|
2016-02-22 09:12:15 +01:00
|
|
|
|
|
|
|
// hasn't been forked yet
|
2016-07-28 23:46:24 +02:00
|
|
|
insert_block(&db, &bc, &b1a, vec![]);
|
2016-02-22 09:54:56 +01:00
|
|
|
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
|
|
|
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
|
|
|
let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5);
|
2016-02-22 09:12:15 +01:00
|
|
|
assert_eq!(blocks_b1, vec![1]);
|
|
|
|
assert_eq!(blocks_b2, vec![2]);
|
|
|
|
assert_eq!(blocks_ba, vec![]);
|
|
|
|
|
|
|
|
// fork has happend
|
2016-07-28 23:46:24 +02:00
|
|
|
insert_block(&db, &bc, &b2a, vec![]);
|
2016-02-22 09:54:56 +01:00
|
|
|
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
|
|
|
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
|
|
|
let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5);
|
2016-02-22 09:12:15 +01:00
|
|
|
assert_eq!(blocks_b1, vec![]);
|
|
|
|
assert_eq!(blocks_b2, vec![]);
|
|
|
|
assert_eq!(blocks_ba, vec![1, 2]);
|
2016-02-22 09:54:56 +01:00
|
|
|
|
|
|
|
// fork back
|
2016-07-28 23:46:24 +02:00
|
|
|
insert_block(&db, &bc, &b3, vec![]);
|
2016-02-22 09:54:56 +01:00
|
|
|
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
|
|
|
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
|
|
|
let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5);
|
|
|
|
assert_eq!(blocks_b1, vec![1]);
|
|
|
|
assert_eq!(blocks_b2, vec![2]);
|
|
|
|
assert_eq!(blocks_ba, vec![3]);
|
2016-02-16 14:46:21 +01:00
|
|
|
}
|
2016-07-14 19:16:01 +02:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_best_block_update() {
|
|
|
|
let mut canon_chain = ChainGenerator::default();
|
|
|
|
let mut finalizer = BlockFinalizer::default();
|
|
|
|
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
|
|
|
|
let temp = RandomTempPath::new();
|
|
|
|
|
|
|
|
{
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
2016-07-14 19:16:01 +02:00
|
|
|
let uncle = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
|
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
let batch = db.transaction();
|
2016-07-14 19:16:01 +02:00
|
|
|
// create a longer fork
|
|
|
|
for _ in 0..5 {
|
|
|
|
let canon_block = canon_chain.generate(&mut finalizer).unwrap();
|
2016-07-28 23:46:24 +02:00
|
|
|
bc.insert_block(&batch, &canon_block, vec![]);
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2016-07-14 19:16:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(bc.best_block_number(), 5);
|
2016-07-28 23:46:24 +02:00
|
|
|
bc.insert_block(&batch, &uncle, vec![]);
|
|
|
|
db.write(batch).unwrap();
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2016-07-14 19:16:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// re-loading the blockchain should load the correct best block.
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
2016-07-14 19:16:01 +02:00
|
|
|
assert_eq!(bc.best_block_number(), 5);
|
|
|
|
}
|
2016-07-17 23:03:29 +02:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_rewind() {
|
|
|
|
let mut canon_chain = ChainGenerator::default();
|
|
|
|
let mut finalizer = BlockFinalizer::default();
|
|
|
|
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let first = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let second = canon_chain.generate(&mut finalizer).unwrap();
|
|
|
|
let genesis_hash = BlockView::new(&genesis).header_view().sha3();
|
|
|
|
let first_hash = BlockView::new(&first).header_view().sha3();
|
|
|
|
let second_hash = BlockView::new(&second).header_view().sha3();
|
|
|
|
|
|
|
|
let temp = RandomTempPath::new();
|
2016-07-28 23:46:24 +02:00
|
|
|
let db = new_db(temp.as_str());
|
|
|
|
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
2016-07-17 23:03:29 +02:00
|
|
|
|
2016-07-28 23:46:24 +02:00
|
|
|
let batch = db.transaction();
|
|
|
|
bc.insert_block(&batch, &first, vec![]);
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2016-07-28 23:46:24 +02:00
|
|
|
bc.insert_block(&batch, &second, vec![]);
|
2016-08-01 19:10:13 +02:00
|
|
|
bc.commit();
|
2016-07-28 23:46:24 +02:00
|
|
|
db.write(batch).unwrap();
|
2016-07-17 23:03:29 +02:00
|
|
|
|
|
|
|
assert_eq!(bc.rewind(), Some(first_hash.clone()));
|
|
|
|
assert!(!bc.is_known(&second_hash));
|
|
|
|
assert_eq!(bc.best_block_number(), 1);
|
|
|
|
assert_eq!(bc.best_block_hash(), first_hash.clone());
|
|
|
|
|
|
|
|
assert_eq!(bc.rewind(), Some(genesis_hash.clone()));
|
|
|
|
assert_eq!(bc.rewind(), None);
|
|
|
|
}
|
2015-12-17 17:20:10 +01:00
|
|
|
}
|