From 86f6cea29de72ce33a874b26557fd1f6373eb2a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 28 Feb 2018 14:59:04 +0100 Subject: [PATCH] [beta] Backports (#8011) * Hardware-wallet/usb-subscribe-refactor (#7860) * Hardware-wallet fix * More fine-grained initilization of callbacks by vendorID, productID and usb class * Each device manufacturer gets a seperate handle thread each * Replaced "dummy for loop" with a delay to wait for the device to boot-up properly * Haven't been very carefully with checking dependencies cycles etc * Inline comments explaining where shortcuts have been taken * Need to test this on Windows machine and with Ledger (both models) Signed-off-by: niklasad1 * Validate product_id of detected ledger devices * closed_device => unlocked_device * address comments * add target in debug * Address feedback * Remove thread joining in HardwareWalletManager * Remove thread handlers in HardwareWalletManager because this makes them unused * fixed broken logs (#7934) * fixed broken logs * bring back old lock order * removed bloom groups from blockchain * revert unrelated changes * simplify blockchain_block_blooms * Bump WS (#7952) * Calculate proper keccak256/sha3 using parity. (#7953) * Increase max download limit to 128MB (#7965) * fetch: increase max download limit to 64MB * parity: increase download size limit for updater service * Detect too large packets in snapshot sync. (#7977) * fix traces, removed bloomchain crate, closes #7228, closes #7167 (#7979) * Remvoe generator.rs * Make block generator easier to use (#7888) * Make block generator easier to use * applied review suggestions * rename BlockMetadata -> BlockOptions * removed redundant uses of blockchain generator and genereator.next().unwrap() calls --- Cargo.lock | 15 +- ethcore/Cargo.toml | 1 - ethcore/src/blockchain/blockchain.rs | 812 +++++++----------- ethcore/src/blockchain/cache.rs | 4 +- ethcore/src/blockchain/extras.rs | 44 - ethcore/src/blockchain/generator.rs | 218 +++++ ethcore/src/blockchain/generator/block.rs | 72 -- ethcore/src/blockchain/generator/bloom.rs | 35 - ethcore/src/blockchain/generator/complete.rs | 52 -- ethcore/src/blockchain/generator/fork.rs | 42 - ethcore/src/blockchain/generator/generator.rs | 179 ---- ethcore/src/blockchain/generator/mod.rs | 27 - .../src/blockchain/generator/transaction.rs | 35 - ethcore/src/blockchain/update.rs | 5 +- ethcore/src/blooms/bloom_group.rs | 74 -- ethcore/src/blooms/group_position.rs | 42 - ethcore/src/client/client.rs | 13 +- ethcore/src/lib.rs | 2 - ethcore/src/snapshot/error.rs | 3 + ethcore/src/snapshot/mod.rs | 5 + ethcore/src/snapshot/service.rs | 12 +- ethcore/src/snapshot/tests/proof_of_work.rs | 37 +- ethcore/src/trace/bloom.rs | 77 -- ethcore/src/trace/config.rs | 7 - ethcore/src/trace/db.rs | 157 +--- ethcore/src/trace/mod.rs | 1 - ethcore/src/trace/types/filter.rs | 17 +- ethcore/src/verification/verification.rs | 2 +- hw/src/ledger.rs | 82 +- hw/src/lib.rs | 114 ++- hw/src/trezor.rs | 80 +- parity/run.rs | 6 +- scripts/gitlab-build.sh | 11 +- util/fetch/src/client.rs | 9 +- 34 files changed, 819 insertions(+), 1473 deletions(-) create mode 100644 ethcore/src/blockchain/generator.rs delete mode 100644 ethcore/src/blockchain/generator/block.rs delete mode 100644 ethcore/src/blockchain/generator/bloom.rs delete mode 100644 ethcore/src/blockchain/generator/complete.rs delete mode 100644 ethcore/src/blockchain/generator/fork.rs delete mode 100644 ethcore/src/blockchain/generator/generator.rs delete mode 100644 ethcore/src/blockchain/generator/mod.rs delete mode 100644 ethcore/src/blockchain/generator/transaction.rs delete mode 100644 ethcore/src/blooms/bloom_group.rs delete mode 100644 ethcore/src/blooms/group_position.rs delete mode 100644 ethcore/src/trace/bloom.rs diff --git a/Cargo.lock b/Cargo.lock index 981ba1b31..345a7fb06 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -181,11 +181,6 @@ dependencies = [ "keccak-hash 0.1.0", ] -[[package]] -name = "bloomchain" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "bn" version = "0.4.4" @@ -476,7 +471,6 @@ version = "1.9.0" dependencies = [ "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomable 0.1.0", - "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bn 0.4.4 (git+https://github.com/paritytech/bn)", "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "common-types 0.1.0", @@ -1333,7 +1327,7 @@ dependencies = [ "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ws 0.7.1 (git+https://github.com/tomusdrw/ws-rs)", + "ws 0.7.5 (git+https://github.com/tomusdrw/ws-rs)", ] [[package]] @@ -3529,8 +3523,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "ws" -version = "0.7.1" -source = "git+https://github.com/tomusdrw/ws-rs#f8306a798b7541d64624299a83a2c934f173beed" +version = "0.7.5" +source = "git+https://github.com/tomusdrw/ws-rs#368ce39e2aa8700d568ca29dbacaecdf1bf749d1" dependencies = [ "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3608,7 +3602,6 @@ dependencies = [ "checksum bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1370e9fc2a6ae53aea8b7a5110edbd08836ed87c88736dfabccade1c2b44bff4" "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" "checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf" -"checksum bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f421095d2a76fc24cd3fb3f912b90df06be7689912b1bdb423caefae59c258d" "checksum bn 0.4.4 (git+https://github.com/paritytech/bn)" = "" "checksum byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff81738b726f5d099632ceaffe7fb65b90212e8dce59d518729e7e8634032d3d" "checksum bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d828f97b58cc5de3e40c421d0cf2132d6b2da4ee0e11b8632fa838f0f9333ad6" @@ -3860,7 +3853,7 @@ dependencies = [ "checksum wasmi 0.0.0 (git+https://github.com/pepyakin/wasmi)" = "" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" -"checksum ws 0.7.1 (git+https://github.com/tomusdrw/ws-rs)" = "" +"checksum ws 0.7.5 (git+https://github.com/tomusdrw/ws-rs)" = "" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum xdg 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a66b7c2281ebde13cf4391d70d4c7e5946c3c25e72a7b859ca8f677dcd0b0c61" "checksum xml-rs 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7ec6c39eaa68382c8e31e35239402c0a9489d4141a8ceb0c716099a0b515b562" diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 35b2e9bc7..883fc0020 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -8,7 +8,6 @@ authors = ["Parity Technologies "] [dependencies] ansi_term = "0.9" -bloomchain = "0.1" bn = { git = "https://github.com/paritytech/bn" } byteorder = "1.0" common-types = { path = "types" } diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index cdd8f82c3..5b0a54212 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -20,7 +20,6 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; use std::mem; use itertools::Itertools; -use bloomchain as bc; use heapsize::HeapSizeOf; use bigint::prelude::U256; use bigint::hash::{H256, H2048}; @@ -33,9 +32,8 @@ use transaction::*; use views::*; use log_entry::{LogEntry, LocalizedLogEntry}; use receipt::Receipt; -use blooms::{Bloom, BloomGroup}; -use blockchain::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData}; use blockchain::best_block::{BestBlock, BestAncientBlock}; +use blockchain::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData}; use types::blockchain_info::BlockChainInfo; use types::tree_route::TreeRoute; use blockchain::update::ExtrasUpdate; @@ -48,9 +46,6 @@ use rayon::prelude::*; use ansi_term::Colour; use kvdb::{DBTransaction, KeyValueDB}; -const LOG_BLOOMS_LEVELS: usize = 3; -const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16; - /// Interface for querying blocks by hash and by number. pub trait BlockProvider { /// Returns true if the given block is known @@ -150,7 +145,7 @@ pub trait BlockProvider { } /// Returns numbers of blocks containing given bloom. - fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockNumber, to_block: BlockNumber) -> Vec; + fn blocks_with_blooms(&self, bloom: &[H2048], from_block: BlockNumber, to_block: BlockNumber) -> Vec; /// Returns logs matching given filter. fn logs(&self, blocks: Vec, matches: F, limit: Option) -> Vec @@ -168,26 +163,14 @@ enum CacheId { BlockDetails(H256), BlockHashes(BlockNumber), TransactionAddresses(H256), - BlocksBlooms(LogGroupPosition), BlockReceipts(H256), } -impl bc::group::BloomGroupDatabase for BlockChain { - fn blooms_at(&self, position: &bc::group::GroupPosition) -> Option { - let position = LogGroupPosition::from(position.clone()); - let result = self.db.read_with_cache(db::COL_EXTRA, &self.blocks_blooms, &position).map(Into::into); - self.cache_man.lock().note_used(CacheId::BlocksBlooms(position)); - result - } -} - /// Structure providing fast access to blockchain data. /// /// **Does not do input data verification.** pub struct BlockChain { // All locks must be captured in the order declared here. - blooms_config: bc::Config, - best_block: RwLock, // Stores best block of the first uninterrupted sequence of blocks. `None` if there are no gaps. // Only updated with `insert_unordered_block`. @@ -204,7 +187,6 @@ pub struct BlockChain { block_details: RwLock>, block_hashes: RwLock>, transaction_addresses: RwLock>, - blocks_blooms: RwLock>, block_receipts: RwLock>, db: Arc, @@ -229,7 +211,7 @@ impl BlockProvider for BlockChain { } fn best_ancient_block(&self) -> Option { - self.best_ancient_block.read().as_ref().map(|b| b.hash.clone()) + self.best_ancient_block.read().as_ref().map(|b| b.hash) } fn best_ancient_number(&self) -> Option { @@ -279,13 +261,13 @@ impl BlockProvider for BlockChain { Some(b) => { let bytes: Bytes = UntrustedRlp::new(&b).decompress(RlpType::Blocks).into_vec(); let mut write = self.block_headers.write(); - write.insert(hash.clone(), bytes.clone()); + write.insert(*hash, bytes.clone()); Some(encoded::Header::new(bytes)) }, None => None }; - self.cache_man.lock().note_used(CacheId::BlockHeader(hash.clone())); + self.cache_man.lock().note_used(CacheId::BlockHeader(*hash)); result } @@ -315,13 +297,13 @@ impl BlockProvider for BlockChain { Some(b) => { let bytes: Bytes = UntrustedRlp::new(&b).decompress(RlpType::Blocks).into_vec(); let mut write = self.block_bodies.write(); - write.insert(hash.clone(), bytes.clone()); + write.insert(*hash, bytes.clone()); Some(encoded::Body::new(bytes)) }, None => None }; - self.cache_man.lock().note_used(CacheId::BlockBody(hash.clone())); + self.cache_man.lock().note_used(CacheId::BlockBody(*hash)); result } @@ -329,7 +311,7 @@ impl BlockProvider for BlockChain { /// Get the familial details concerning a block. fn block_details(&self, hash: &H256) -> Option { let result = self.db.read_with_cache(db::COL_EXTRA, &self.block_details, hash); - self.cache_man.lock().note_used(CacheId::BlockDetails(hash.clone())); + self.cache_man.lock().note_used(CacheId::BlockDetails(*hash)); result } @@ -343,24 +325,25 @@ impl BlockProvider for BlockChain { /// Get the address of transaction with given hash. fn transaction_address(&self, hash: &H256) -> Option { let result = self.db.read_with_cache(db::COL_EXTRA, &self.transaction_addresses, hash); - self.cache_man.lock().note_used(CacheId::TransactionAddresses(hash.clone())); + self.cache_man.lock().note_used(CacheId::TransactionAddresses(*hash)); result } /// Get receipts of block with given hash. fn block_receipts(&self, hash: &H256) -> Option { let result = self.db.read_with_cache(db::COL_EXTRA, &self.block_receipts, hash); - self.cache_man.lock().note_used(CacheId::BlockReceipts(hash.clone())); + self.cache_man.lock().note_used(CacheId::BlockReceipts(*hash)); result } - /// Returns numbers of blocks containing given bloom. - fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockNumber, to_block: BlockNumber) -> Vec { - let range = from_block as bc::Number..to_block as bc::Number; - let chain = bc::group::BloomGroupChain::new(self.blooms_config, self); - chain.with_bloom(&range, &Bloom::from(bloom.clone()).into()) - .into_iter() - .map(|b| b as BlockNumber) + fn blocks_with_blooms(&self, blooms: &[H2048], from_block: BlockNumber, to_block: BlockNumber) -> Vec { + // +1, cause it's inclusive range + (from_block..to_block + 1) + .into_par_iter() + .filter_map(|number| self.block_hash(number).map(|hash| (number, hash))) + .map(|(number, hash)| (number, self.block_header_data(&hash).expect("hash exists; qed"))) + .filter(|&(_, ref header)| blooms.iter().any(|bloom| header.view().log_bloom().contains(bloom))) + .map(|(number, _)| number) .collect() } @@ -376,19 +359,15 @@ impl BlockProvider for BlockChain { .filter_map(|number| self.block_hash(*number).map(|hash| (*number, hash))) .filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts))) .filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, b.transaction_hashes()))) - .flat_map(|(number, hash, mut receipts, mut hashes)| { - if receipts.len() != hashes.len() { - warn!("Block {} ({}) has different number of receipts ({}) to transactions ({}). Database corrupt?", number, hash, receipts.len(), hashes.len()); - assert!(false); - } - let mut log_index = receipts.iter().fold(0, |sum, receipt| sum + receipt.logs.len()); + .flat_map(|(number, hash, receipts, hashes)| { + assert_eq!(receipts.len(), hashes.len(), "Block {} ({}) has different number of receipts ({}) to transactions ({})", number, hash, receipts.len(), hashes.len()); + let mut log_index: usize = receipts.iter().map(|r| r.logs.len()).sum(); let receipts_len = receipts.len(); - hashes.reverse(); - receipts.reverse(); receipts.into_iter() .map(|receipt| receipt.logs) .zip(hashes) + .rev() .enumerate() .flat_map(move |(index, (mut logs, tx_hash))| { let current_log_index = log_index; @@ -492,10 +471,6 @@ impl BlockChain { let cache_man = CacheManager::new(config.pref_cache_size, config.max_cache_size, 400); let mut bc = BlockChain { - blooms_config: bc::Config { - levels: LOG_BLOOMS_LEVELS, - elements_per_index: LOG_BLOOMS_ELEMENTS_PER_INDEX, - }, first_block: None, best_block: RwLock::new(BestBlock::default()), best_ancient_block: RwLock::new(None), @@ -504,7 +479,6 @@ impl BlockChain { block_details: RwLock::new(HashMap::new()), block_hashes: RwLock::new(HashMap::new()), transaction_addresses: RwLock::new(HashMap::new()), - blocks_blooms: RwLock::new(HashMap::new()), block_receipts: RwLock::new(HashMap::new()), db: db.clone(), cache_man: Mutex::new(cache_man), @@ -622,58 +596,6 @@ impl BlockChain { self.db.read_with_cache(db::COL_EXTRA, &self.block_details, parent).map_or(false, |d| d.children.contains(hash)) } - /// Rewind to a previous block - #[cfg(test)] - fn rewind(&self) -> Option { - use db::Key; - let mut batch =self.db.transaction(); - // track back to the best block we have in the blocks database - if let Some(best_block_hash) = self.db.get(db::COL_EXTRA, b"best").unwrap() { - let best_block_hash = H256::from_slice(&best_block_hash); - if best_block_hash == self.genesis_hash() { - return None; - } - if let Some(extras) = self.db.read(db::COL_EXTRA, &best_block_hash) as Option { - type DetailsKey = Key; - batch.delete(db::COL_EXTRA, &(DetailsKey::key(&best_block_hash))); - let hash = extras.parent; - let range = extras.number as bc::Number .. extras.number as bc::Number; - let chain = bc::group::BloomGroupChain::new(self.blooms_config, self); - let changes = chain.replace(&range, vec![]); - for (k, v) in changes { - batch.write(db::COL_EXTRA, &LogGroupPosition::from(k), &BloomGroup::from(v)); - } - batch.put(db::COL_EXTRA, b"best", &hash); - - let best_block_total_difficulty = self.block_details(&hash).unwrap().total_difficulty; - let best_block_rlp = self.block(&hash).unwrap().into_inner(); - - let mut best_block = self.best_block.write(); - *best_block = BestBlock { - number: extras.number - 1, - total_difficulty: best_block_total_difficulty, - hash: hash, - timestamp: BlockView::new(&best_block_rlp).header().timestamp(), - block: best_block_rlp, - }; - // update parent extras - if let Some(mut details) = self.db.read(db::COL_EXTRA, &hash) as Option { - details.children.clear(); - batch.write(db::COL_EXTRA, &hash, &details); - } - self.db.write(batch).expect("Writing to db failed"); - self.block_details.write().clear(); - self.block_hashes.write().clear(); - self.block_headers.write().clear(); - self.block_bodies.write().clear(); - self.block_receipts.write().clear(); - return Some(hash); - } - } - - None - } - /// Returns a tree route between `from` and `to`, which is a tuple of: /// /// - a vector of hashes of all blocks, ordered from `from` to `to`. @@ -771,6 +693,7 @@ impl BlockChain { /// This is used by snapshot restoration and when downloading missing blocks for the chain gap. /// `is_best` forces the best block to be updated to this block. /// `is_ancient` forces the best block of the first block sequence to be updated to this block. + /// `parent_td` is a parent total diffuculty /// Supply a dummy parent total difficulty when the parent block may not be in the chain. /// Returns true if the block is disconnected. pub fn insert_unordered_block(&self, batch: &mut DBTransaction, bytes: &[u8], receipts: Vec, parent_td: Option, is_best: bool, is_ancient: bool) -> bool { @@ -797,7 +720,7 @@ impl BlockChain { if let Some(parent_details) = maybe_parent { // parent known to be in chain. let info = BlockInfo { - hash: hash.clone(), + hash: hash, number: header.number(), total_difficulty: parent_details.total_difficulty + header.difficulty(), location: BlockLocation::CanonChain, @@ -807,7 +730,6 @@ impl BlockChain { block_hashes: self.prepare_block_hashes_update(bytes, &info), block_details: self.prepare_block_details_update(bytes, &info), block_receipts: self.prepare_block_receipts_update(receipts, &info), - blocks_blooms: self.prepare_block_blooms_update(bytes, &info), transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), info: info, timestamp: header.timestamp(), @@ -856,7 +778,6 @@ impl BlockChain { block_hashes: self.prepare_block_hashes_update(bytes, &info), block_details: update, block_receipts: self.prepare_block_receipts_update(receipts, &info), - blocks_blooms: self.prepare_block_blooms_update(bytes, &info), transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), info: info, timestamp: header.timestamp(), @@ -1004,7 +925,6 @@ impl BlockChain { block_hashes: self.prepare_block_hashes_update(bytes, &info), block_details: self.prepare_block_details_update(bytes, &info), block_receipts: self.prepare_block_receipts_update(receipts, &info), - blocks_blooms: self.prepare_block_blooms_update(bytes, &info), transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), info: info.clone(), timestamp: header.timestamp(), @@ -1062,16 +982,10 @@ impl BlockChain { batch.extend_with_cache(db::COL_EXTRA, &mut *write_receipts, update.block_receipts, CacheUpdatePolicy::Remove); } - { - let mut write_blocks_blooms = self.blocks_blooms.write(); - batch.extend_with_cache(db::COL_EXTRA, &mut *write_blocks_blooms, update.blocks_blooms, CacheUpdatePolicy::Remove); - } - // These cached values must be updated last with all four locks taken to avoid // cache decoherence { let mut best_block = self.pending_best_block.write(); - // update best block match update.info.location { BlockLocation::Branch => (), _ => if is_best { @@ -1083,8 +997,9 @@ impl BlockChain { timestamp: update.timestamp, block: update.block.to_vec(), }); - }, + } } + let mut write_hashes = self.pending_block_hashes.write(); let mut write_details = self.pending_block_details.write(); let mut write_txs = self.pending_transaction_addresses.write(); @@ -1198,7 +1113,7 @@ impl BlockChain { match info.location { BlockLocation::Branch => (), BlockLocation::CanonChain => { - block_hashes.insert(number, info.hash.clone()); + block_hashes.insert(number, info.hash); }, BlockLocation::BranchBecomingCanonChain(ref data) => { let ancestor_number = self.block_number(&data.ancestor).expect("Block number of ancestor is always in DB"); @@ -1208,7 +1123,7 @@ impl BlockChain { block_hashes.insert(start_number + index as BlockNumber, hash); } - block_hashes.insert(number, info.hash.clone()); + block_hashes.insert(number, info.hash); } } @@ -1224,27 +1139,27 @@ impl BlockChain { // update parent let mut parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash)); - parent_details.children.push(info.hash.clone()); + parent_details.children.push(info.hash); // create current block details. let details = BlockDetails { number: header.number(), total_difficulty: info.total_difficulty, - parent: parent_hash.clone(), + parent: parent_hash, children: vec![], }; // write to batch let mut block_details = HashMap::new(); block_details.insert(parent_hash, parent_details); - block_details.insert(info.hash.clone(), details); + block_details.insert(info.hash, details); block_details } /// This function returns modified block receipts. fn prepare_block_receipts_update(&self, receipts: Vec, info: &BlockInfo) -> HashMap { let mut block_receipts = HashMap::new(); - block_receipts.insert(info.hash.clone(), BlockReceipts::new(receipts)); + block_receipts.insert(info.hash, BlockReceipts::new(receipts)); block_receipts } @@ -1259,7 +1174,7 @@ impl BlockChain { .enumerate() .map(|(i ,tx_hash)| { (tx_hash, Some(TransactionAddress { - block_hash: info.hash.clone(), + block_hash: info.hash, index: i })) }) @@ -1273,7 +1188,7 @@ impl BlockChain { hashes.into_iter() .enumerate() .map(|(i, tx_hash)| (tx_hash, Some(TransactionAddress { - block_hash: hash.clone(), + block_hash: *hash, index: i, }))) .collect::>>() @@ -1283,7 +1198,7 @@ impl BlockChain { .enumerate() .map(|(i ,tx_hash)| { (tx_hash, Some(TransactionAddress { - block_hash: info.hash.clone(), + block_hash: info.hash, index: i })) }); @@ -1301,64 +1216,9 @@ impl BlockChain { } } - /// This functions returns modified blocks blooms. - /// - /// To accelerate blooms lookups, blomms are stored in multiple - /// layers (BLOOM_LEVELS, currently 3). - /// ChainFilter is responsible for building and rebuilding these layers. - /// It returns them in HashMap, where values are Blooms and - /// keys are BloomIndexes. BloomIndex represents bloom location on one - /// of these layers. - /// - /// To reduce number of queries to databse, block blooms are stored - /// in BlocksBlooms structure which contains info about several - /// (BLOOM_INDEX_SIZE, currently 16) consecutive blocks blooms. - /// - /// Later, BloomIndexer is used to map bloom location on filter layer (BloomIndex) - /// to bloom location in database (BlocksBloomLocation). - /// - fn prepare_block_blooms_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap { - let block = BlockView::new(block_bytes); - let header = block.header_view(); - - let log_blooms = match info.location { - BlockLocation::Branch => HashMap::new(), - BlockLocation::CanonChain => { - let log_bloom = header.log_bloom(); - if log_bloom.is_zero() { - HashMap::new() - } else { - let chain = bc::group::BloomGroupChain::new(self.blooms_config, self); - chain.insert(info.number as bc::Number, Bloom::from(log_bloom).into()) - } - }, - BlockLocation::BranchBecomingCanonChain(ref data) => { - let ancestor_number = self.block_number(&data.ancestor).unwrap(); - let start_number = ancestor_number + 1; - let range = start_number as bc::Number..self.best_block_number() as bc::Number; - - let mut blooms: Vec = data.enacted.iter() - .map(|hash| self.block_header_data(hash).unwrap()) - .map(|h| h.log_bloom()) - .map(Bloom::from) - .map(Into::into) - .collect(); - - blooms.push(Bloom::from(header.log_bloom()).into()); - - let chain = bc::group::BloomGroupChain::new(self.blooms_config, self); - chain.replace(&range, blooms) - } - }; - - log_blooms.into_iter() - .map(|p| (From::from(p.0), From::from(p.1))) - .collect() - } - /// Get best block hash. pub fn best_block_hash(&self) -> H256 { - self.best_block.read().hash.clone() + self.best_block.read().hash } /// Get best block number. @@ -1389,7 +1249,6 @@ impl BlockChain { blocks: self.block_headers.read().heap_size_of_children() + self.block_bodies.read().heap_size_of_children(), block_details: self.block_details.read().heap_size_of_children(), transaction_addresses: self.transaction_addresses.read().heap_size_of_children(), - blocks_blooms: self.blocks_blooms.read().heap_size_of_children(), block_receipts: self.block_receipts.read().heap_size_of_children(), } } @@ -1403,7 +1262,6 @@ impl BlockChain { let mut block_details = self.block_details.write(); let mut block_hashes = self.block_hashes.write(); let mut transaction_addresses = self.transaction_addresses.write(); - let mut blocks_blooms = self.blocks_blooms.write(); let mut block_receipts = self.block_receipts.write(); let mut cache_man = self.cache_man.lock(); @@ -1415,7 +1273,6 @@ impl BlockChain { CacheId::BlockDetails(ref h) => { block_details.remove(h); } CacheId::BlockHashes(ref h) => { block_hashes.remove(h); } CacheId::TransactionAddresses(ref h) => { transaction_addresses.remove(h); } - CacheId::BlocksBlooms(ref h) => { blocks_blooms.remove(h); } CacheId::BlockReceipts(ref h) => { block_receipts.remove(h); } } } @@ -1425,7 +1282,6 @@ impl BlockChain { block_details.shrink_to_fit(); block_hashes.shrink_to_fit(); transaction_addresses.shrink_to_fit(); - blocks_blooms.shrink_to_fit(); block_receipts.shrink_to_fit(); block_headers.heap_size_of_children() + @@ -1433,7 +1289,6 @@ impl BlockChain { block_details.heap_size_of_children() + block_hashes.heap_size_of_children() + transaction_addresses.heap_size_of_children() + - blocks_blooms.heap_size_of_children() + block_receipts.heap_size_of_children() }); } @@ -1456,12 +1311,12 @@ impl BlockChain { total_difficulty: best_block.total_difficulty.clone(), pending_total_difficulty: best_block.total_difficulty.clone(), genesis_hash: self.genesis_hash(), - best_block_hash: best_block.hash.clone(), + best_block_hash: best_block.hash, best_block_number: best_block.number, best_block_timestamp: best_block.timestamp, first_block_hash: self.first_block(), first_block_number: From::from(self.first_block_number()), - ancient_block_hash: best_ancient_block.as_ref().map(|b| b.hash.clone()), + ancient_block_hash: best_ancient_block.as_ref().map(|b| b.hash), ancient_block_number: best_ancient_block.as_ref().map(|b| b.number), } } @@ -1474,6 +1329,7 @@ impl BlockChain { #[cfg(test)] mod tests { + use std::iter; use std::sync::Arc; use rustc_hex::FromHex; use hash::keccak; @@ -1483,13 +1339,11 @@ mod tests { use receipt::{Receipt, TransactionOutcome}; use blockchain::{BlockProvider, BlockChain, Config, ImportRoute}; use tests::helpers::*; - use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer}; + use blockchain::generator::{BlockGenerator, BlockBuilder, BlockOptions}; use blockchain::extras::TransactionAddress; - use views::BlockView; use transaction::{Transaction, Action}; use log_entry::{LogEntry, LocalizedLogEntry}; use ethkey::Secret; - use header::BlockNumber; fn new_db() -> Arc { Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0))) @@ -1502,18 +1356,16 @@ mod tests { #[test] fn should_cache_best_block() { // given - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); - let first = canon_chain.generate(&mut finalizer).unwrap(); + let genesis = BlockBuilder::genesis(); + let first = genesis.add_block(); let db = new_db(); - let bc = new_chain(&genesis, db.clone()); + let bc = new_chain(&genesis.last().encoded(), db.clone()); assert_eq!(bc.best_block_number(), 0); // when let mut batch = db.transaction(); - bc.insert_block(&mut batch, &first, vec![]); + bc.insert_block(&mut batch, &first.last().encoded(), vec![]); assert_eq!(bc.best_block_number(), 0); bc.commit(); // NOTE no db.write here (we want to check if best block is cached) @@ -1525,101 +1377,95 @@ mod tests { #[test] fn basic_blockchain_insert() { - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); - let first = canon_chain.generate(&mut finalizer).unwrap(); - let genesis_hash = BlockView::new(&genesis).header_view().hash(); - let first_hash = BlockView::new(&first).header_view().hash(); + let genesis = BlockBuilder::genesis(); + let first = genesis.add_block(); + + let genesis = genesis.last(); + let first = first.last(); + let genesis_hash = genesis.hash(); + let first_hash = first.hash(); let db = new_db(); - let bc = new_chain(&genesis, db.clone()); + let bc = new_chain(&genesis.encoded(), db.clone()); - assert_eq!(bc.genesis_hash(), genesis_hash.clone()); - assert_eq!(bc.best_block_hash(), genesis_hash.clone()); - assert_eq!(bc.block_hash(0), Some(genesis_hash.clone())); + assert_eq!(bc.genesis_hash(), genesis_hash); + assert_eq!(bc.best_block_hash(), genesis_hash); + assert_eq!(bc.block_hash(0), Some(genesis_hash)); assert_eq!(bc.block_hash(1), None); assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![]); let mut batch = db.transaction(); - bc.insert_block(&mut batch, &first, vec![]); + bc.insert_block(&mut batch, &first.encoded(), vec![]); db.write(batch).unwrap(); bc.commit(); - assert_eq!(bc.block_hash(0), Some(genesis_hash.clone())); + assert_eq!(bc.block_hash(0), Some(genesis_hash)); assert_eq!(bc.best_block_number(), 1); - assert_eq!(bc.best_block_hash(), first_hash.clone()); - assert_eq!(bc.block_hash(1), Some(first_hash.clone())); - assert_eq!(bc.block_details(&first_hash).unwrap().parent, genesis_hash.clone()); - assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![first_hash.clone()]); + assert_eq!(bc.best_block_hash(), first_hash); + assert_eq!(bc.block_hash(1), Some(first_hash)); + assert_eq!(bc.block_details(&first_hash).unwrap().parent, genesis_hash); + assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![first_hash]); assert_eq!(bc.block_hash(2), None); } #[test] fn check_ancestry_iter() { - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); - let genesis_hash = BlockView::new(&genesis).header_view().hash(); + let genesis = BlockBuilder::genesis(); + let first_10 = genesis.add_blocks(10); + let generator = BlockGenerator::new(vec![first_10]); let db = new_db(); - let bc = new_chain(&genesis, db.clone()); + let bc = new_chain(&genesis.last().encoded(), db.clone()); - let mut block_hashes = vec![genesis_hash.clone()]; + let mut block_hashes = vec![genesis.last().hash()]; let mut batch = db.transaction(); - for _ in 0..10 { - let block = canon_chain.generate(&mut finalizer).unwrap(); - block_hashes.push(BlockView::new(&block).header_view().hash()); - bc.insert_block(&mut batch, &block, vec![]); + for block in generator { + block_hashes.push(block.hash()); + bc.insert_block(&mut batch, &block.encoded(), vec![]); bc.commit(); } db.write(batch).unwrap(); block_hashes.reverse(); - assert_eq!(bc.ancestry_iter(block_hashes[0].clone()).unwrap().collect::>(), block_hashes) + assert_eq!(bc.ancestry_iter(block_hashes[0].clone()).unwrap().collect::>(), block_hashes); + assert_eq!(block_hashes.len(), 11); } #[test] fn test_find_uncles() { - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); - let b1b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap(); - let b1a = canon_chain.generate(&mut finalizer).unwrap(); - let b2b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap(); - let b2a = canon_chain.generate(&mut finalizer).unwrap(); - let b3b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap(); - let b3a = canon_chain.generate(&mut finalizer).unwrap(); - let b4b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap(); - let b4a = canon_chain.generate(&mut finalizer).unwrap(); - let b5b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap(); - let b5a = canon_chain.generate(&mut finalizer).unwrap(); + let genesis = BlockBuilder::genesis(); + let b1a = genesis.add_block(); + let b2a = b1a.add_block(); + let b3a = b2a.add_block(); + let b4a = b3a.add_block(); + let b5a = b4a.add_block(); - let db = new_db(); - let bc = new_chain(&genesis, db.clone()); + let b1b = genesis.add_block_with_difficulty(9); + let b2b = b1a.add_block_with_difficulty(9); + let b3b = b2a.add_block_with_difficulty(9); + let b4b = b3a.add_block_with_difficulty(9); + let b5b = b4a.add_block_with_difficulty(9); - let mut batch =db.transaction(); - for b in &[&b1a, &b1b, &b2a, &b2b, &b3a, &b3b, &b4a, &b4b, &b5a, &b5b] { - bc.insert_block(&mut batch, b, vec![]); - bc.commit(); - } - bc.insert_block(&mut batch, &b1b, vec![]); - bc.insert_block(&mut batch, &b2a, vec![]); - bc.insert_block(&mut batch, &b2b, vec![]); - bc.insert_block(&mut batch, &b3a, vec![]); - bc.insert_block(&mut batch, &b3b, vec![]); - bc.insert_block(&mut batch, &b4a, vec![]); - bc.insert_block(&mut batch, &b4b, vec![]); - bc.insert_block(&mut batch, &b5a, vec![]); - bc.insert_block(&mut batch, &b5b, vec![]); - db.write(batch).unwrap(); + let uncle_headers = vec![b4b.last().header(), b3b.last().header(), b2b.last().header()]; + let b4a_hash = b4a.last().hash(); - assert_eq!( - [&b4b, &b3b, &b2b].iter().map(|b| BlockView::new(b).header()).collect::>(), - bc.find_uncle_headers(&BlockView::new(&b4a).header_view().hash(), 3).unwrap() + let generator = BlockGenerator::new( + vec![b1a, b1b, b2a, b2b, b3a, b3b, b4a, b4b, b5a, b5b] ); + let db = new_db(); + let bc = new_chain(&genesis.last().encoded(), db.clone()); + + let mut batch = db.transaction(); + for b in generator { + bc.insert_block(&mut batch, &b.encoded(), vec![]); + bc.commit(); + } + + db.write(batch).unwrap(); + + assert_eq!(uncle_headers, bc.find_uncle_headers(&b4a_hash, 3).unwrap()); // TODO: insert block that already includes one of them as an uncle to check it's not allowed. } @@ -1629,12 +1475,6 @@ mod tests { #[test] fn test_fork_transaction_addresses() { - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); - let mut fork_chain = canon_chain.fork(1); - let mut fork_finalizer = finalizer.fork(); - let t1 = Transaction { nonce: 0.into(), gas_price: 0.into(), @@ -1644,42 +1484,35 @@ mod tests { data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), }.sign(&secret(), None); - - let b1a = canon_chain - .with_transaction(t1.clone()) - .generate(&mut finalizer).unwrap(); - - // Empty block - let b1b = fork_chain - .generate(&mut fork_finalizer).unwrap(); - - let b2 = fork_chain - .generate(&mut fork_finalizer).unwrap(); - - let b1a_hash = BlockView::new(&b1a).header_view().hash(); - let b2_hash = BlockView::new(&b2).header_view().hash(); - let t1_hash = t1.hash(); + let genesis = BlockBuilder::genesis(); + let b1a = genesis.add_block_with_transactions(iter::once(t1)); + let b1b = genesis.add_block_with_difficulty(9); + let b2 = b1b.add_block(); + + let b1a_hash = b1a.last().hash(); + let b2_hash = b2.last().hash(); + let db = new_db(); - let bc = new_chain(&genesis, db.clone()); + let bc = new_chain(&genesis.last().encoded(), db.clone()); let mut batch = db.transaction(); - let _ = bc.insert_block(&mut batch, &b1a, vec![]); + let _ = bc.insert_block(&mut batch, &b1a.last().encoded(), vec![]); bc.commit(); - let _ = bc.insert_block(&mut batch, &b1b, vec![]); + let _ = bc.insert_block(&mut batch, &b1b.last().encoded(), vec![]); bc.commit(); db.write(batch).unwrap(); assert_eq!(bc.best_block_hash(), b1a_hash); assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress { - block_hash: b1a_hash.clone(), + block_hash: b1a_hash, index: 0, })); // now let's make forked chain the canon chain let mut batch = db.transaction(); - let _ = bc.insert_block(&mut batch, &b2, vec![]); + let _ = bc.insert_block(&mut batch, &b2.last().encoded(), vec![]); bc.commit(); db.write(batch).unwrap(); @@ -1688,16 +1521,8 @@ mod tests { assert_eq!(bc.transaction_address(&t1_hash), None); } - - #[test] fn test_overwriting_transaction_addresses() { - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); - let mut fork_chain = canon_chain.fork(1); - let mut fork_finalizer = finalizer.fork(); - let t1 = Transaction { nonce: 0.into(), gas_price: 0.into(), @@ -1725,103 +1550,97 @@ mod tests { data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), }.sign(&secret(), None); - let b1a = canon_chain - .with_transaction(t1.clone()) - .with_transaction(t2.clone()) - .generate(&mut finalizer).unwrap(); + let genesis = BlockBuilder::genesis(); + let b1a = genesis.add_block_with_transactions(vec![t1.clone(), t2.clone()]); + // insert transactions in different order, + // the block has lower difficulty, so the hash is also different + let b1b = genesis.add_block_with(|| BlockOptions { + difficulty: 9.into(), + transactions: vec![t2.clone(), t1.clone()], + ..Default::default() + }); + let b2 = b1b.add_block_with_transactions(iter::once(t3.clone())); - // insert transactions in different order - let b1b = fork_chain - .with_transaction(t2.clone()) - .with_transaction(t1.clone()) - .generate(&mut fork_finalizer).unwrap(); - - let b2 = fork_chain - .with_transaction(t3.clone()) - .generate(&mut fork_finalizer).unwrap(); - - let b1a_hash = BlockView::new(&b1a).header_view().hash(); - let b1b_hash = BlockView::new(&b1b).header_view().hash(); - let b2_hash = BlockView::new(&b2).header_view().hash(); + let b1a_hash = b1a.last().hash(); + let b1b_hash = b1b.last().hash(); + let b2_hash = b2.last().hash(); let t1_hash = t1.hash(); let t2_hash = t2.hash(); let t3_hash = t3.hash(); let db = new_db(); - let bc = new_chain(&genesis, db.clone()); + let bc = new_chain(&genesis.last().encoded(), db.clone()); let mut batch = db.transaction(); - let _ = bc.insert_block(&mut batch, &b1a, vec![]); + let _ = bc.insert_block(&mut batch, &b1a.last().encoded(), vec![]); bc.commit(); - let _ = bc.insert_block(&mut batch, &b1b, vec![]); + let _ = bc.insert_block(&mut batch, &b1b.last().encoded(), vec![]); bc.commit(); db.write(batch).unwrap(); assert_eq!(bc.best_block_hash(), b1a_hash); assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress { - block_hash: b1a_hash.clone(), + block_hash: b1a_hash, index: 0, })); assert_eq!(bc.transaction_address(&t2_hash), Some(TransactionAddress { - block_hash: b1a_hash.clone(), + block_hash: b1a_hash, index: 1, })); // now let's make forked chain the canon chain let mut batch = db.transaction(); - let _ = bc.insert_block(&mut batch, &b2, vec![]); + let _ = bc.insert_block(&mut batch, &b2.last().encoded(), vec![]); bc.commit(); db.write(batch).unwrap(); assert_eq!(bc.best_block_hash(), b2_hash); assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress { - block_hash: b1b_hash.clone(), + block_hash: b1b_hash, index: 1, })); assert_eq!(bc.transaction_address(&t2_hash), Some(TransactionAddress { - block_hash: b1b_hash.clone(), + block_hash: b1b_hash, index: 0, })); assert_eq!(bc.transaction_address(&t3_hash), Some(TransactionAddress { - block_hash: b2_hash.clone(), + block_hash: b2_hash, index: 0, })); } #[test] fn test_small_fork() { - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); - let b1 = canon_chain.generate(&mut finalizer).unwrap(); - let b2 = canon_chain.generate(&mut finalizer).unwrap(); - let b3b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap(); - let b3a = canon_chain.generate(&mut finalizer).unwrap(); + let genesis = BlockBuilder::genesis(); + let b1 = genesis.add_block(); + let b2 = b1.add_block(); + let b3a = b2.add_block(); + let b3b = b2.add_block_with_difficulty(9); - let genesis_hash = BlockView::new(&genesis).header_view().hash(); - let b1_hash= BlockView::new(&b1).header_view().hash(); - let b2_hash= BlockView::new(&b2).header_view().hash(); - let b3a_hash= BlockView::new(&b3a).header_view().hash(); - let b3b_hash= BlockView::new(&b3b).header_view().hash(); + let genesis_hash = genesis.last().hash(); + let b1_hash = b1.last().hash(); + let b2_hash = b2.last().hash(); + let b3a_hash = b3a.last().hash(); + let b3b_hash = b3b.last().hash(); // b3a is a part of canon chain, whereas b3b is part of sidechain - let best_block_hash = b3a_hash.clone(); + let best_block_hash = b3a_hash; let db = new_db(); - let bc = new_chain(&genesis, db.clone()); + let bc = new_chain(&genesis.last().encoded(), db.clone()); let mut batch = db.transaction(); - let ir1 = bc.insert_block(&mut batch, &b1, vec![]); + let ir1 = bc.insert_block(&mut batch, &b1.last().encoded(), vec![]); bc.commit(); - let ir2 = bc.insert_block(&mut batch, &b2, vec![]); + let ir2 = bc.insert_block(&mut batch, &b2.last().encoded(), vec![]); bc.commit(); - let ir3b = bc.insert_block(&mut batch, &b3b, vec![]); + let ir3b = bc.insert_block(&mut batch, &b3b.last().encoded(), vec![]); bc.commit(); db.write(batch).unwrap(); assert_eq!(bc.block_hash(3).unwrap(), b3b_hash); - let mut batch =db.transaction(); - let ir3a = bc.insert_block(&mut batch, &b3a, vec![]); + let mut batch = db.transaction(); + let ir3a = bc.insert_block(&mut batch, &b3a.last().encoded(), vec![]); bc.commit(); db.write(batch).unwrap(); @@ -1862,79 +1681,78 @@ mod tests { assert_eq!(bc.block_hash(3).unwrap(), b3a_hash); // test trie route - let r0_1 = bc.tree_route(genesis_hash.clone(), b1_hash.clone()).unwrap(); + let r0_1 = bc.tree_route(genesis_hash, b1_hash).unwrap(); assert_eq!(r0_1.ancestor, genesis_hash); - assert_eq!(r0_1.blocks, [b1_hash.clone()]); + assert_eq!(r0_1.blocks, [b1_hash]); assert_eq!(r0_1.index, 0); - let r0_2 = bc.tree_route(genesis_hash.clone(), b2_hash.clone()).unwrap(); + let r0_2 = bc.tree_route(genesis_hash, b2_hash).unwrap(); assert_eq!(r0_2.ancestor, genesis_hash); - assert_eq!(r0_2.blocks, [b1_hash.clone(), b2_hash.clone()]); + assert_eq!(r0_2.blocks, [b1_hash, b2_hash]); assert_eq!(r0_2.index, 0); - let r1_3a = bc.tree_route(b1_hash.clone(), b3a_hash.clone()).unwrap(); + let r1_3a = bc.tree_route(b1_hash, b3a_hash).unwrap(); assert_eq!(r1_3a.ancestor, b1_hash); - assert_eq!(r1_3a.blocks, [b2_hash.clone(), b3a_hash.clone()]); + assert_eq!(r1_3a.blocks, [b2_hash, b3a_hash]); assert_eq!(r1_3a.index, 0); - let r1_3b = bc.tree_route(b1_hash.clone(), b3b_hash.clone()).unwrap(); + let r1_3b = bc.tree_route(b1_hash, b3b_hash).unwrap(); assert_eq!(r1_3b.ancestor, b1_hash); - assert_eq!(r1_3b.blocks, [b2_hash.clone(), b3b_hash.clone()]); + assert_eq!(r1_3b.blocks, [b2_hash, b3b_hash]); assert_eq!(r1_3b.index, 0); - let r3a_3b = bc.tree_route(b3a_hash.clone(), b3b_hash.clone()).unwrap(); + let r3a_3b = bc.tree_route(b3a_hash, b3b_hash).unwrap(); assert_eq!(r3a_3b.ancestor, b2_hash); - assert_eq!(r3a_3b.blocks, [b3a_hash.clone(), b3b_hash.clone()]); + assert_eq!(r3a_3b.blocks, [b3a_hash, b3b_hash]); assert_eq!(r3a_3b.index, 1); - let r1_0 = bc.tree_route(b1_hash.clone(), genesis_hash.clone()).unwrap(); + let r1_0 = bc.tree_route(b1_hash, genesis_hash).unwrap(); assert_eq!(r1_0.ancestor, genesis_hash); - assert_eq!(r1_0.blocks, [b1_hash.clone()]); + assert_eq!(r1_0.blocks, [b1_hash]); assert_eq!(r1_0.index, 1); - let r2_0 = bc.tree_route(b2_hash.clone(), genesis_hash.clone()).unwrap(); + let r2_0 = bc.tree_route(b2_hash, genesis_hash).unwrap(); assert_eq!(r2_0.ancestor, genesis_hash); - assert_eq!(r2_0.blocks, [b2_hash.clone(), b1_hash.clone()]); + assert_eq!(r2_0.blocks, [b2_hash, b1_hash]); assert_eq!(r2_0.index, 2); - let r3a_1 = bc.tree_route(b3a_hash.clone(), b1_hash.clone()).unwrap(); + let r3a_1 = bc.tree_route(b3a_hash, b1_hash).unwrap(); assert_eq!(r3a_1.ancestor, b1_hash); - assert_eq!(r3a_1.blocks, [b3a_hash.clone(), b2_hash.clone()]); + assert_eq!(r3a_1.blocks, [b3a_hash, b2_hash]); assert_eq!(r3a_1.index, 2); - let r3b_1 = bc.tree_route(b3b_hash.clone(), b1_hash.clone()).unwrap(); + let r3b_1 = bc.tree_route(b3b_hash, b1_hash).unwrap(); assert_eq!(r3b_1.ancestor, b1_hash); - assert_eq!(r3b_1.blocks, [b3b_hash.clone(), b2_hash.clone()]); + assert_eq!(r3b_1.blocks, [b3b_hash, b2_hash]); assert_eq!(r3b_1.index, 2); - let r3b_3a = bc.tree_route(b3b_hash.clone(), b3a_hash.clone()).unwrap(); + let r3b_3a = bc.tree_route(b3b_hash, b3a_hash).unwrap(); assert_eq!(r3b_3a.ancestor, b2_hash); - assert_eq!(r3b_3a.blocks, [b3b_hash.clone(), b3a_hash.clone()]); + assert_eq!(r3b_3a.blocks, [b3b_hash, b3a_hash]); assert_eq!(r3b_3a.index, 1); } #[test] fn test_reopen_blockchain_db() { - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); - let first = canon_chain.generate(&mut finalizer).unwrap(); - let genesis_hash = BlockView::new(&genesis).header_view().hash(); - let first_hash = BlockView::new(&first).header_view().hash(); + let genesis = BlockBuilder::genesis(); + let first = genesis.add_block(); + let genesis_hash = genesis.last().hash(); + let first_hash = first.last().hash(); + let db = new_db(); { - let bc = new_chain(&genesis, db.clone()); + let bc = new_chain(&genesis.last().encoded(), db.clone()); assert_eq!(bc.best_block_hash(), genesis_hash); - let mut batch =db.transaction(); - bc.insert_block(&mut batch, &first, vec![]); + let mut batch = db.transaction(); + bc.insert_block(&mut batch, &first.last().encoded(), vec![]); db.write(batch).unwrap(); bc.commit(); assert_eq!(bc.best_block_hash(), first_hash); } { - let bc = new_chain(&genesis, db.clone()); + let bc = new_chain(&genesis.last().encoded(), db.clone()); assert_eq!(bc.best_block_hash(), first_hash); } @@ -2007,11 +1825,6 @@ mod tests { #[test] fn test_logs() { - // given - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); - // just insert dummy transaction so that #transactions=#receipts let t1 = Transaction { nonce: 0.into(), gas_price: 0.into(), @@ -2039,12 +1852,18 @@ mod tests { let tx_hash1 = t1.hash(); let tx_hash2 = t2.hash(); let tx_hash3 = t3.hash(); - let b1 = canon_chain.with_transaction(t1).with_transaction(t2).generate(&mut finalizer).unwrap(); - let b2 = canon_chain.with_transaction(t3).generate(&mut finalizer).unwrap(); + + let genesis = BlockBuilder::genesis(); + let b1 = genesis.add_block_with_transactions(vec![t1, t2]); + let b2 = b1.add_block_with_transactions(iter::once(t3)); + let b1_hash = b1.last().hash(); + let b1_number = b1.last().number(); + let b2_hash = b2.last().hash(); + let b2_number = b2.last().number(); let db = new_db(); - let bc = new_chain(&genesis, db.clone()); - insert_block(&db, &bc, &b1, vec![Receipt { + let bc = new_chain(&genesis.last().encoded(), db.clone()); + insert_block(&db, &bc, &b1.last().encoded(), vec![Receipt { outcome: TransactionOutcome::StateRoot(H256::default()), gas_used: 10_000.into(), log_bloom: Default::default(), @@ -2061,7 +1880,7 @@ mod tests { LogEntry { address: Default::default(), topics: vec![], data: vec![3], }, ], }]); - insert_block(&db, &bc, &b2, vec![ + insert_block(&db, &bc, &b2.last().encoded(), vec![ Receipt { outcome: TransactionOutcome::StateRoot(H256::default()), gas_used: 10_000.into(), @@ -2073,8 +1892,6 @@ mod tests { ]); // when - let block1 = BlockView::new(&b1); - let block2 = BlockView::new(&b2); let logs1 = bc.logs(vec![1, 2], |_| true, None); let logs2 = bc.logs(vec![1, 2], |_| true, Some(1)); @@ -2082,36 +1899,36 @@ mod tests { assert_eq!(logs1, vec![ LocalizedLogEntry { entry: LogEntry { address: Default::default(), topics: vec![], data: vec![1] }, - block_hash: block1.hash(), - block_number: block1.header().number(), - transaction_hash: tx_hash1.clone(), + block_hash: b1_hash, + block_number: b1_number, + transaction_hash: tx_hash1, transaction_index: 0, transaction_log_index: 0, log_index: 0, }, LocalizedLogEntry { entry: LogEntry { address: Default::default(), topics: vec![], data: vec![2] }, - block_hash: block1.hash(), - block_number: block1.header().number(), - transaction_hash: tx_hash1.clone(), + block_hash: b1_hash, + block_number: b1_number, + transaction_hash: tx_hash1, transaction_index: 0, transaction_log_index: 1, log_index: 1, }, LocalizedLogEntry { entry: LogEntry { address: Default::default(), topics: vec![], data: vec![3] }, - block_hash: block1.hash(), - block_number: block1.header().number(), - transaction_hash: tx_hash2.clone(), + block_hash: b1_hash, + block_number: b1_number, + transaction_hash: tx_hash2, transaction_index: 1, transaction_log_index: 0, log_index: 2, }, LocalizedLogEntry { entry: LogEntry { address: Default::default(), topics: vec![], data: vec![4] }, - block_hash: block2.hash(), - block_number: block2.header().number(), - transaction_hash: tx_hash3.clone(), + block_hash: b2_hash, + block_number: b2_number, + transaction_hash: tx_hash3, transaction_index: 0, transaction_log_index: 0, log_index: 0, @@ -2120,9 +1937,9 @@ mod tests { assert_eq!(logs2, vec![ LocalizedLogEntry { entry: LogEntry { address: Default::default(), topics: vec![], data: vec![4] }, - block_hash: block2.hash(), - block_number: block2.header().number(), - transaction_hash: tx_hash3.clone(), + block_hash: b2_hash, + block_number: b2_number, + transaction_hash: tx_hash3, transaction_index: 0, transaction_log_index: 0, log_index: 0, @@ -2139,148 +1956,155 @@ mod tests { let bloom_ba: H2048 = "00000000000000000000000000000000000000000000020000000800000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); - let mut fork = canon_chain.fork(1); - let mut fork_finalizer = finalizer.fork(); - let b1 = fork.with_bloom(bloom_b1.clone()).generate(&mut fork_finalizer).unwrap(); - let b2 = fork.with_bloom(bloom_b2.clone()).generate(&mut fork_finalizer).unwrap(); - let b3 = fork.with_bloom(bloom_ba.clone()).generate(&mut fork_finalizer).unwrap(); - let b1a = canon_chain.with_bloom(bloom_ba.clone()).generate(&mut finalizer).unwrap(); - let b2a = canon_chain.with_bloom(bloom_ba.clone()).generate(&mut finalizer).unwrap(); + let genesis = BlockBuilder::genesis(); + let b1 = genesis.add_block_with(|| BlockOptions { + bloom: bloom_b1.clone(), + difficulty: 9.into(), + ..Default::default() + }); + let b2 = b1.add_block_with_bloom(bloom_b2); + let b3 = b2.add_block_with_bloom(bloom_ba); + + let b1a = genesis.add_block_with_bloom(bloom_ba); + let b2a = b1a.add_block_with_bloom(bloom_ba); let db = new_db(); - let bc = new_chain(&genesis, db.clone()); + let bc = new_chain(&genesis.last().encoded(), db.clone()); - let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); - let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); - assert_eq!(blocks_b1, Vec::::new()); - assert_eq!(blocks_b2, Vec::::new()); + let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 5); + let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 5); + assert!(blocks_b1.is_empty()); + assert!(blocks_b2.is_empty()); - insert_block(&db, &bc, &b1, vec![]); - let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); - let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); + insert_block(&db, &bc, &b1.last().encoded(), vec![]); + let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 5); + let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 5); assert_eq!(blocks_b1, vec![1]); - assert_eq!(blocks_b2, Vec::::new()); + assert!(blocks_b2.is_empty()); - insert_block(&db, &bc, &b2, vec![]); - let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); - let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); + insert_block(&db, &bc, &b2.last().encoded(), vec![]); + let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 5); + let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 5); assert_eq!(blocks_b1, vec![1]); assert_eq!(blocks_b2, vec![2]); // hasn't been forked yet - insert_block(&db, &bc, &b1a, vec![]); - let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); - let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); - let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5); + insert_block(&db, &bc, &b1a.last().encoded(), vec![]); + let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 5); + let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 5); + let blocks_ba = bc.blocks_with_blooms(&[bloom_ba], 0, 5); assert_eq!(blocks_b1, vec![1]); assert_eq!(blocks_b2, vec![2]); - assert_eq!(blocks_ba, Vec::::new()); + assert!(blocks_ba.is_empty()); // fork has happend - insert_block(&db, &bc, &b2a, vec![]); - let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); - let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); - let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5); - assert_eq!(blocks_b1, Vec::::new()); - assert_eq!(blocks_b2, Vec::::new()); + insert_block(&db, &bc, &b2a.last().encoded(), vec![]); + let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 5); + let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 5); + let blocks_ba = bc.blocks_with_blooms(&[bloom_ba], 0, 5); + assert!(blocks_b1.is_empty()); + assert!(blocks_b2.is_empty()); assert_eq!(blocks_ba, vec![1, 2]); // fork back - insert_block(&db, &bc, &b3, vec![]); - let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5); - let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5); - let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5); + insert_block(&db, &bc, &b3.last().encoded(), vec![]); + let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 5); + let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 5); + let blocks_ba = bc.blocks_with_blooms(&[bloom_ba], 0, 5); assert_eq!(blocks_b1, vec![1]); assert_eq!(blocks_b2, vec![2]); assert_eq!(blocks_ba, vec![3]); } + #[test] + fn test_insert_unordered() { + let bloom_b1: H2048 = "00000020000000000000000000000000000000000000000002000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000400000000000000000000002000".into(); + + let bloom_b2: H2048 = "00000000000000000000000000000000000000000000020000001000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); + + let bloom_b3: H2048 = "00000000000000000000000000000000000000000000020000000800000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(); + + let genesis = BlockBuilder::genesis(); + let b1 = genesis.add_block_with_bloom(bloom_b1); + let b2 = b1.add_block_with_bloom(bloom_b2); + let b3 = b2.add_block_with_bloom(bloom_b3); + let b1_total_difficulty = *genesis.last().header.difficulty() + *b1.last().header.difficulty(); + + let db = new_db(); + let bc = new_chain(&genesis.last().encoded(), db.clone()); + let mut batch = db.transaction(); + bc.insert_unordered_block(&mut batch, &b2.last().encoded(), vec![], Some(b1_total_difficulty), false, false); + bc.commit(); + bc.insert_unordered_block(&mut batch, &b3.last().encoded(), vec![], None, true, false); + bc.commit(); + bc.insert_unordered_block(&mut batch, &b1.last().encoded(), vec![], None, false, false); + bc.commit(); + db.write(batch).unwrap(); + + assert_eq!(bc.best_block_hash(), b3.last().hash()); + assert_eq!(bc.block_hash(1).unwrap(), b1.last().hash()); + assert_eq!(bc.block_hash(2).unwrap(), b2.last().hash()); + assert_eq!(bc.block_hash(3).unwrap(), b3.last().hash()); + + let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 3); + let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 3); + let blocks_b3 = bc.blocks_with_blooms(&[bloom_b3], 0, 3); + + assert_eq!(blocks_b1, vec![1]); + assert_eq!(blocks_b2, vec![2]); + assert_eq!(blocks_b3, vec![3]); + } + #[test] fn test_best_block_update() { - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); + let genesis = BlockBuilder::genesis(); + let next_5 = genesis.add_blocks(5); + let uncle = genesis.add_block_with_difficulty(9); + let generator = BlockGenerator::new(iter::once(next_5)); let db = new_db(); { - let bc = new_chain(&genesis, db.clone()); - let uncle = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap(); + let bc = new_chain(&genesis.last().encoded(), db.clone()); let mut batch = db.transaction(); // create a longer fork - for _ in 0..5 { - let canon_block = canon_chain.generate(&mut finalizer).unwrap(); - bc.insert_block(&mut batch, &canon_block, vec![]); + for block in generator { + bc.insert_block(&mut batch, &block.encoded(), vec![]); bc.commit(); } assert_eq!(bc.best_block_number(), 5); - bc.insert_block(&mut batch, &uncle, vec![]); + bc.insert_block(&mut batch, &uncle.last().encoded(), vec![]); db.write(batch).unwrap(); bc.commit(); } // re-loading the blockchain should load the correct best block. - let bc = new_chain(&genesis, db); + let bc = new_chain(&genesis.last().encoded(), db); assert_eq!(bc.best_block_number(), 5); } - #[test] - fn test_rewind() { - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); - let first = canon_chain.generate(&mut finalizer).unwrap(); - let second = canon_chain.generate(&mut finalizer).unwrap(); - let genesis_hash = BlockView::new(&genesis).header_view().hash(); - let first_hash = BlockView::new(&first).header_view().hash(); - let second_hash = BlockView::new(&second).header_view().hash(); - - let db = new_db(); - let bc = new_chain(&genesis, db.clone()); - - let mut batch =db.transaction(); - bc.insert_block(&mut batch, &first, vec![]); - bc.commit(); - bc.insert_block(&mut batch, &second, vec![]); - bc.commit(); - db.write(batch).unwrap(); - - assert_eq!(bc.rewind(), Some(first_hash.clone())); - assert!(!bc.is_known(&second_hash)); - assert_eq!(bc.best_block_number(), 1); - assert_eq!(bc.best_block_hash(), first_hash.clone()); - - assert_eq!(bc.rewind(), Some(genesis_hash.clone())); - assert_eq!(bc.rewind(), None); - } - #[test] fn epoch_transitions_iter() { use ::engines::EpochTransition; - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); + let genesis = BlockBuilder::genesis(); + let next_5 = genesis.add_blocks(5); + let uncle = genesis.add_block_with_difficulty(9); + let generator = BlockGenerator::new(iter::once(next_5)); let db = new_db(); { - let bc = new_chain(&genesis, db.clone()); - let uncle = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap(); + let bc = new_chain(&genesis.last().encoded(), db.clone()); let mut batch = db.transaction(); // create a longer fork - for i in 0..5 { - let canon_block = canon_chain.generate(&mut finalizer).unwrap(); - let hash = BlockView::new(&canon_block).header_view().hash(); + for (i, block) in generator.into_iter().enumerate() { - bc.insert_block(&mut batch, &canon_block, vec![]); - bc.insert_epoch_transition(&mut batch, i, EpochTransition { - block_hash: hash, - block_number: i + 1, + bc.insert_block(&mut batch, &block.encoded(), vec![]); + bc.insert_epoch_transition(&mut batch, i as u64, EpochTransition { + block_hash: block.hash(), + block_number: i as u64 + 1, proof: vec![], }); bc.commit(); @@ -2288,10 +2112,9 @@ mod tests { assert_eq!(bc.best_block_number(), 5); - let hash = BlockView::new(&uncle).header_view().hash(); - bc.insert_block(&mut batch, &uncle, vec![]); + bc.insert_block(&mut batch, &uncle.last().encoded(), vec![]); bc.insert_epoch_transition(&mut batch, 999, EpochTransition { - block_hash: hash, + block_hash: uncle.last().hash(), block_number: 1, proof: vec![], }); @@ -2304,7 +2127,7 @@ mod tests { } // re-loading the blockchain should load the correct best block. - let bc = new_chain(&genesis, db); + let bc = new_chain(&genesis.last().encoded(), db); assert_eq!(bc.best_block_number(), 5); assert_eq!(bc.epoch_transitions().map(|(i, _)| i).collect::>(), vec![0, 1, 2, 3, 4]); @@ -2314,13 +2137,18 @@ mod tests { fn epoch_transition_for() { use ::engines::EpochTransition; - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); + let genesis = BlockBuilder::genesis(); + let fork_7 = genesis.add_blocks_with(7, || BlockOptions { + difficulty: 9.into(), + ..Default::default() + }); + let next_10 = genesis.add_blocks(10); + let fork_generator = BlockGenerator::new(iter::once(fork_7)); + let next_generator = BlockGenerator::new(iter::once(next_10)); let db = new_db(); - let bc = new_chain(&genesis, db.clone()); + let bc = new_chain(&genesis.last().encoded(), db.clone()); let mut batch = db.transaction(); bc.insert_epoch_transition(&mut batch, 0, EpochTransition { @@ -2333,14 +2161,10 @@ mod tests { // set up a chain where we have a canonical chain of 10 blocks // and a non-canonical fork of 8 from genesis. let fork_hash = { - let mut fork_chain = canon_chain.fork(1); - let mut fork_finalizer = finalizer.fork(); - - for _ in 0..7 { + for block in fork_generator { let mut batch = db.transaction(); - let fork_block = fork_chain.generate(&mut fork_finalizer).unwrap(); - bc.insert_block(&mut batch, &fork_block, vec![]); + bc.insert_block(&mut batch, &block.encoded(), vec![]); bc.commit(); db.write(batch).unwrap(); } @@ -2349,11 +2173,9 @@ mod tests { bc.chain_info().best_block_hash }; - for _ in 0..10 { + for block in next_generator { let mut batch = db.transaction(); - let canon_block = canon_chain.generate(&mut finalizer).unwrap(); - - bc.insert_block(&mut batch, &canon_block, vec![]); + bc.insert_block(&mut batch, &block.encoded(), vec![]); bc.commit(); db.write(batch).unwrap(); diff --git a/ethcore/src/blockchain/cache.rs b/ethcore/src/blockchain/cache.rs index 999be423d..5a609f9d7 100644 --- a/ethcore/src/blockchain/cache.rs +++ b/ethcore/src/blockchain/cache.rs @@ -23,8 +23,6 @@ pub struct CacheSize { pub block_details: usize, /// Transaction addresses cache size. pub transaction_addresses: usize, - /// Blooms cache size. - pub blocks_blooms: usize, /// Block receipts size. pub block_receipts: usize, } @@ -32,6 +30,6 @@ pub struct CacheSize { impl CacheSize { /// Total amount used by the cache. pub fn total(&self) -> usize { - self.blocks + self.block_details + self.transaction_addresses + self.blocks_blooms + self.block_receipts + self.blocks + self.block_details + self.transaction_addresses + self.block_receipts } } diff --git a/ethcore/src/blockchain/extras.rs b/ethcore/src/blockchain/extras.rs index 6184a2f47..56a1805f8 100644 --- a/ethcore/src/blockchain/extras.rs +++ b/ethcore/src/blockchain/extras.rs @@ -18,8 +18,6 @@ use std::ops; use std::io::Write; -use bloomchain; -use blooms::{GroupPosition, BloomGroup}; use db::Key; use engines::epoch::{Transition as EpochTransition}; use header::BlockNumber; @@ -39,8 +37,6 @@ pub enum ExtrasIndex { BlockHash = 1, /// Transaction address index TransactionAddress = 2, - /// Block blooms index - BlocksBlooms = 3, /// Block receipts index BlockReceipts = 4, /// Epoch transition data index. @@ -88,46 +84,6 @@ impl Key for H256 { } } -pub struct LogGroupKey([u8; 6]); - -impl ops::Deref for LogGroupKey { - type Target = [u8]; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -#[derive(Debug, PartialEq, Eq, Hash, Clone)] -pub struct LogGroupPosition(GroupPosition); - -impl From for LogGroupPosition { - fn from(position: bloomchain::group::GroupPosition) -> Self { - LogGroupPosition(From::from(position)) - } -} - -impl HeapSizeOf for LogGroupPosition { - fn heap_size_of_children(&self) -> usize { - self.0.heap_size_of_children() - } -} - -impl Key for LogGroupPosition { - type Target = LogGroupKey; - - fn key(&self) -> Self::Target { - let mut result = [0u8; 6]; - result[0] = ExtrasIndex::BlocksBlooms as u8; - result[1] = self.0.level; - result[2] = (self.0.index >> 24) as u8; - result[3] = (self.0.index >> 16) as u8; - result[4] = (self.0.index >> 8) as u8; - result[5] = self.0.index as u8; - LogGroupKey(result) - } -} - impl Key for H256 { type Target = H264; diff --git a/ethcore/src/blockchain/generator.rs b/ethcore/src/blockchain/generator.rs new file mode 100644 index 000000000..6e831f103 --- /dev/null +++ b/ethcore/src/blockchain/generator.rs @@ -0,0 +1,218 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Blockchain generator for tests. + +use std::collections::VecDeque; +use bigint::prelude::{U256, H256, H2048 as Bloom}; + +use bytes::Bytes; +use header::Header; +use rlp::encode; +use transaction::SignedTransaction; +use views::BlockView; + +/// Helper structure, used for encoding blocks. +#[derive(Default, Clone, RlpEncodable)] +pub struct Block { + pub header: Header, + pub transactions: Vec, + pub uncles: Vec
+} + +impl Block { + #[inline] + pub fn header(&self) -> Header { + self.header.clone() + } + + #[inline] + pub fn hash(&self) -> H256 { + BlockView::new(&self.encoded()).header_view().hash() + } + + #[inline] + pub fn number(&self) -> u64 { + self.header.number() + } + + #[inline] + pub fn encoded(&self) -> Bytes { + encode(self).into_vec() + } +} + +#[derive(Debug)] +pub struct BlockOptions { + pub difficulty: U256, + pub bloom: Bloom, + pub transactions: Vec, +} + +impl Default for BlockOptions { + fn default() -> Self { + BlockOptions { + difficulty: 10.into(), + bloom: Bloom::default(), + transactions: Vec::new(), + } + } +} + +#[derive(Clone)] +pub struct BlockBuilder { + blocks: VecDeque, +} + +impl BlockBuilder { + pub fn genesis() -> Self { + let mut blocks = VecDeque::with_capacity(1); + blocks.push_back(Block::default()); + + BlockBuilder { + blocks, + } + } + + #[inline] + pub fn add_block(&self) -> Self { + self.add_block_with(|| BlockOptions::default()) + } + + #[inline] + pub fn add_blocks(&self, count: usize) -> Self { + self.add_blocks_with(count, || BlockOptions::default()) + } + + #[inline] + pub fn add_block_with(&self, get_metadata: T) -> Self where T: Fn() -> BlockOptions { + self.add_blocks_with(1, get_metadata) + } + + #[inline] + pub fn add_block_with_difficulty(&self, difficulty: T) -> Self where T: Into { + let difficulty = difficulty.into(); + self.add_blocks_with(1, move || BlockOptions { + difficulty, + ..Default::default() + }) + } + + #[inline] + pub fn add_block_with_transactions(&self, transactions: T) -> Self + where T: IntoIterator { + let transactions = transactions.into_iter().collect::>(); + self.add_blocks_with(1, || BlockOptions { + transactions: transactions.clone(), + ..Default::default() + }) + } + + #[inline] + pub fn add_block_with_bloom(&self, bloom: Bloom) -> Self { + self.add_blocks_with(1, move || BlockOptions { + bloom, + ..Default::default() + }) + } + + pub fn add_blocks_with(&self, count: usize, get_metadata: T) -> Self where T: Fn() -> BlockOptions { + assert!(count > 0, "There must be at least 1 block"); + let mut parent_hash = self.last().hash(); + let mut parent_number = self.last().number(); + let mut blocks = VecDeque::with_capacity(count); + for _ in 0..count { + let mut block = Block::default(); + let metadata = get_metadata(); + let block_number = parent_number + 1; + block.header.set_parent_hash(parent_hash); + block.header.set_number(block_number); + block.header.set_log_bloom(metadata.bloom); + block.header.set_difficulty(metadata.difficulty); + block.transactions = metadata.transactions; + + parent_hash = block.hash(); + parent_number = block_number; + + blocks.push_back(block); + } + + BlockBuilder { + blocks, + } + } + + #[inline] + pub fn last(&self) -> &Block { + self.blocks.back().expect("There is always at least 1 block") + } +} + +#[derive(Clone)] +pub struct BlockGenerator { + builders: VecDeque, +} + +impl BlockGenerator { + pub fn new(builders: T) -> Self where T: IntoIterator { + BlockGenerator { + builders: builders.into_iter().collect(), + } + } +} + +impl Iterator for BlockGenerator { + type Item = Block; + + fn next(&mut self) -> Option { + loop { + match self.builders.front_mut() { + Some(ref mut builder) => { + if let Some(block) = builder.blocks.pop_front() { + return Some(block); + } + }, + None => return None, + } + self.builders.pop_front(); + } + + } +} + +#[cfg(test)] +mod tests { + use super::{BlockBuilder, BlockOptions, BlockGenerator}; + + #[test] + fn test_block_builder() { + let genesis = BlockBuilder::genesis(); + let block_1 = genesis.add_block(); + let block_1001 = block_1.add_blocks(1000); + let block_1002 = block_1001.add_block_with(|| BlockOptions::default()); + let generator = BlockGenerator::new(vec![genesis, block_1, block_1001, block_1002]); + assert_eq!(generator.count(), 1003); + } + + #[test] + fn test_block_builder_fork() { + let genesis = BlockBuilder::genesis(); + let block_10a = genesis.add_blocks(10); + let block_11b = genesis.add_blocks(11); + assert_eq!(block_10a.last().number(), 10); + assert_eq!(block_11b.last().number(), 11); + } +} diff --git a/ethcore/src/blockchain/generator/block.rs b/ethcore/src/blockchain/generator/block.rs deleted file mode 100644 index 4aba039a3..000000000 --- a/ethcore/src/blockchain/generator/block.rs +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use rlp::*; -use bigint::hash::{H256, H2048}; -use bytes::Bytes; -use header::Header; -use transaction::SignedTransaction; - -use super::fork::Forkable; -use super::bloom::WithBloom; -use super::complete::CompleteBlock; -use super::transaction::WithTransaction; - -/// Helper structure, used for encoding blocks. -#[derive(Default)] -pub struct Block { - pub header: Header, - pub transactions: Vec, - pub uncles: Vec
-} - -impl Encodable for Block { - fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(3); - s.append(&self.header); - s.append_list(&self.transactions); - s.append_list(&self.uncles); - } -} - -impl Forkable for Block { - fn fork(mut self, fork_number: usize) -> Self where Self: Sized { - let difficulty = self.header.difficulty().clone() - fork_number.into(); - self.header.set_difficulty(difficulty); - self - } -} - -impl WithBloom for Block { - fn with_bloom(mut self, bloom: H2048) -> Self where Self: Sized { - self.header.set_log_bloom(bloom); - self - } -} - -impl WithTransaction for Block { - fn with_transaction(mut self, transaction: SignedTransaction) -> Self where Self: Sized { - self.transactions.push(transaction); - self - } -} - -impl CompleteBlock for Block { - fn complete(mut self, parent_hash: H256) -> Bytes { - self.header.set_parent_hash(parent_hash); - encode(&self).into_vec() - } -} diff --git a/ethcore/src/blockchain/generator/bloom.rs b/ethcore/src/blockchain/generator/bloom.rs deleted file mode 100644 index bb83ff1af..000000000 --- a/ethcore/src/blockchain/generator/bloom.rs +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use bigint::hash::H2048; - -pub trait WithBloom { - fn with_bloom(self, bloom: H2048) -> Self where Self: Sized; -} - -pub struct Bloom<'a, I> where I: 'a { - pub iter: &'a mut I, - pub bloom: H2048, -} - -impl<'a, I> Iterator for Bloom<'a, I> where I: Iterator, ::Item: WithBloom { - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|item| item.with_bloom(self.bloom.clone())) - } -} diff --git a/ethcore/src/blockchain/generator/complete.rs b/ethcore/src/blockchain/generator/complete.rs deleted file mode 100644 index 7b9bc572a..000000000 --- a/ethcore/src/blockchain/generator/complete.rs +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use bigint::hash::H256; -use bytes::Bytes; -use views::BlockView; - -#[derive(Default, Clone)] -pub struct BlockFinalizer { - parent_hash: H256 -} - -impl BlockFinalizer { - pub fn fork(&self) -> Self { - self.clone() - } -} - -pub trait CompleteBlock { - fn complete(self, parent_hash: H256) -> Bytes; -} - -pub struct Complete<'a, I> where I: 'a { - pub iter: &'a mut I, - pub finalizer: &'a mut BlockFinalizer, -} - -impl<'a, I> Iterator for Complete<'a, I> where I: Iterator, ::Item: CompleteBlock { - type Item = Bytes; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|item| { - let rlp = item.complete(self.finalizer.parent_hash.clone()); - self.finalizer.parent_hash = BlockView::new(&rlp).header_view().hash(); - rlp - }) - } -} diff --git a/ethcore/src/blockchain/generator/fork.rs b/ethcore/src/blockchain/generator/fork.rs deleted file mode 100644 index 4f09f3c04..000000000 --- a/ethcore/src/blockchain/generator/fork.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -pub trait Forkable { - fn fork(self, fork_number: usize) -> Self where Self: Sized; -} - -pub struct Fork { - pub iter: I, - pub fork_number: usize, -} - -impl Clone for Fork where I: Iterator + Clone { - fn clone(&self) -> Self { - Fork { - iter: self.iter.clone(), - fork_number: self.fork_number - } - } -} - -impl Iterator for Fork where I: Iterator, ::Item: Forkable { - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|item| item.fork(self.fork_number)) - } -} diff --git a/ethcore/src/blockchain/generator/generator.rs b/ethcore/src/blockchain/generator/generator.rs deleted file mode 100644 index 967fc37fd..000000000 --- a/ethcore/src/blockchain/generator/generator.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use bigint::prelude::U256; -use bigint::hash::H2048; -use bytes::Bytes; -use header::BlockNumber; -use transaction::SignedTransaction; -use super::fork::Fork; -use super::bloom::Bloom; -use super::complete::{BlockFinalizer, CompleteBlock, Complete}; -use super::block::Block; -use super::transaction::Transaction; - -/// Chain iterator interface. -pub trait ChainIterator: Iterator + Sized { - /// Should be called to create a fork of current iterator. - /// Blocks generated by fork will have lower difficulty than current chain. - fn fork(&self, fork_number: usize) -> Fork where Self: Clone; - /// Should be called to make every consecutive block have given bloom. - fn with_bloom(&mut self, bloom: H2048) -> Bloom; - /// Should be called to make every consecutive block have given transaction. - fn with_transaction(&mut self, transaction: SignedTransaction) -> Transaction; - /// Should be called to complete block. Without complete, block may have incorrect hash. - fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self>; - /// Completes and generates block. - fn generate<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Option where Self::Item: CompleteBlock; -} - -impl ChainIterator for I where I: Iterator + Sized { - fn fork(&self, fork_number: usize) -> Fork where I: Clone { - Fork { - iter: self.clone(), - fork_number: fork_number - } - } - - fn with_bloom(&mut self, bloom: H2048) -> Bloom { - Bloom { - iter: self, - bloom: bloom - } - } - - fn with_transaction(&mut self, transaction: SignedTransaction) -> Transaction { - Transaction { - iter: self, - transaction: transaction, - } - } - - fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self> { - Complete { - iter: self, - finalizer: finalizer - } - } - - fn generate<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Option where ::Item: CompleteBlock { - self.complete(finalizer).next() - } -} - -/// Blockchain generator. -#[derive(Clone)] -pub struct ChainGenerator { - /// Next block number. - number: BlockNumber, - /// Next block difficulty. - difficulty: U256, -} - -impl ChainGenerator { - fn prepare_block(&self) -> Block { - let mut block = Block::default(); - block.header.set_number(self.number); - block.header.set_difficulty(self.difficulty); - block - } -} - -impl Default for ChainGenerator { - fn default() -> Self { - ChainGenerator { - number: 0, - difficulty: 1000.into(), - } - } -} - -impl Iterator for ChainGenerator { - type Item = Block; - - fn next(&mut self) -> Option { - let block = self.prepare_block(); - self.number += 1; - Some(block) - } -} - -mod tests { - use bigint::hash::{H256, H2048}; - use views::BlockView; - use blockchain::generator::{ChainIterator, ChainGenerator, BlockFinalizer}; - - #[test] - fn canon_chain_generator() { - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - - let genesis_rlp = canon_chain.generate(&mut finalizer).unwrap(); - let genesis = BlockView::new(&genesis_rlp); - - assert_eq!(genesis.header_view().parent_hash(), H256::default()); - assert_eq!(genesis.header_view().number(), 0); - - let b1_rlp = canon_chain.generate(&mut finalizer).unwrap(); - let b1 = BlockView::new(&b1_rlp); - - assert_eq!(b1.header_view().parent_hash(), genesis.header_view().hash()); - assert_eq!(b1.header_view().number(), 1); - - let mut fork_chain = canon_chain.fork(1); - - let b2_rlp_fork = fork_chain.generate(&mut finalizer.fork()).unwrap(); - let b2_fork = BlockView::new(&b2_rlp_fork); - - assert_eq!(b2_fork.header_view().parent_hash(), b1.header_view().hash()); - assert_eq!(b2_fork.header_view().number(), 2); - - let b2_rlp = canon_chain.generate(&mut finalizer).unwrap(); - let b2 = BlockView::new(&b2_rlp); - - assert_eq!(b2.header_view().parent_hash(), b1.header_view().hash()); - assert_eq!(b2.header_view().number(), 2); - assert!(b2.header_view().difficulty() > b2_fork.header_view().difficulty()); - } - - #[test] - fn with_bloom_generator() { - let bloom = H2048([0x1; 256]); - let mut gen = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - - let block0_rlp = gen.with_bloom(bloom).generate(&mut finalizer).unwrap(); - let block1_rlp = gen.generate(&mut finalizer).unwrap(); - let block0 = BlockView::new(&block0_rlp); - let block1 = BlockView::new(&block1_rlp); - - assert_eq!(block0.header_view().number(), 0); - assert_eq!(block0.header_view().parent_hash(), H256::default()); - - assert_eq!(block1.header_view().number(), 1); - assert_eq!(block1.header_view().parent_hash(), block0.header_view().hash()); - - } - - #[test] - fn generate_1000_blocks() { - let generator = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let blocks: Vec<_> = generator.take(1000).complete(&mut finalizer).collect(); - assert_eq!(blocks.len(), 1000); - } -} - diff --git a/ethcore/src/blockchain/generator/mod.rs b/ethcore/src/blockchain/generator/mod.rs deleted file mode 100644 index c32c4a3d7..000000000 --- a/ethcore/src/blockchain/generator/mod.rs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Blockchain generator for tests. - -mod bloom; -mod block; -mod complete; -mod fork; -pub mod generator; -mod transaction; - -pub use self::complete::BlockFinalizer; -pub use self::generator::{ChainIterator, ChainGenerator}; diff --git a/ethcore/src/blockchain/generator/transaction.rs b/ethcore/src/blockchain/generator/transaction.rs deleted file mode 100644 index c2c89ace7..000000000 --- a/ethcore/src/blockchain/generator/transaction.rs +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use transaction::SignedTransaction; - -pub trait WithTransaction { - fn with_transaction(self, transaction: SignedTransaction) -> Self where Self: Sized; -} - -pub struct Transaction<'a, I> where I: 'a { - pub iter: &'a mut I, - pub transaction: SignedTransaction, -} - -impl <'a, I> Iterator for Transaction<'a, I> where I: Iterator, ::Item: WithTransaction { - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|item| item.with_transaction(self.transaction.clone())) - } -} diff --git a/ethcore/src/blockchain/update.rs b/ethcore/src/blockchain/update.rs index 10a6fcd06..a7d9a2cb9 100644 --- a/ethcore/src/blockchain/update.rs +++ b/ethcore/src/blockchain/update.rs @@ -2,8 +2,7 @@ use std::collections::HashMap; use bigint::hash::H256; use header::BlockNumber; use blockchain::block_info::BlockInfo; -use blooms::BloomGroup; -use super::extras::{BlockDetails, BlockReceipts, TransactionAddress, LogGroupPosition}; +use blockchain::extras::{BlockDetails, BlockReceipts, TransactionAddress}; /// Block extras update info. pub struct ExtrasUpdate<'a> { @@ -19,8 +18,6 @@ pub struct ExtrasUpdate<'a> { pub block_details: HashMap, /// Modified block receipts. pub block_receipts: HashMap, - /// Modified blocks blooms. - pub blocks_blooms: HashMap, /// Modified transaction addresses (None signifies removed transactions). pub transactions_addresses: HashMap>, } diff --git a/ethcore/src/blooms/bloom_group.rs b/ethcore/src/blooms/bloom_group.rs deleted file mode 100644 index 1867b7ecc..000000000 --- a/ethcore/src/blooms/bloom_group.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use bloomchain::group as bc; -use rlp::*; -use heapsize::HeapSizeOf; -use super::Bloom; - -/// Represents group of X consecutive blooms. -#[derive(Debug, Clone)] -pub struct BloomGroup { - blooms: Vec, -} - -impl From for BloomGroup { - fn from(group: bc::BloomGroup) -> Self { - let blooms = group.blooms - .into_iter() - .map(From::from) - .collect(); - - BloomGroup { - blooms: blooms - } - } -} - -impl Into for BloomGroup { - fn into(self) -> bc::BloomGroup { - let blooms = self.blooms - .into_iter() - .map(Into::into) - .collect(); - - bc::BloomGroup { - blooms: blooms - } - } -} - -impl Decodable for BloomGroup { - fn decode(rlp: &UntrustedRlp) -> Result { - let blooms = rlp.as_list()?; - let group = BloomGroup { - blooms: blooms - }; - Ok(group) - } -} - -impl Encodable for BloomGroup { - fn rlp_append(&self, s: &mut RlpStream) { - s.append_list(&self.blooms); - } -} - -impl HeapSizeOf for BloomGroup { - fn heap_size_of_children(&self) -> usize { - self.blooms.heap_size_of_children() - } -} diff --git a/ethcore/src/blooms/group_position.rs b/ethcore/src/blooms/group_position.rs deleted file mode 100644 index b1ea82792..000000000 --- a/ethcore/src/blooms/group_position.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use bloomchain::group as bc; -use heapsize::HeapSizeOf; - -/// Represents `BloomGroup` position in database. -#[derive(PartialEq, Eq, Hash, Clone, Debug)] -pub struct GroupPosition { - /// Bloom level. - pub level: u8, - /// Group index. - pub index: u32, -} - -impl From for GroupPosition { - fn from(p: bc::GroupPosition) -> Self { - GroupPosition { - level: p.level as u8, - index: p.index as u32, - } - } -} - -impl HeapSizeOf for GroupPosition { - fn heap_size_of_children(&self) -> usize { - 0 - } -} diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 0cddfcc8a..f803eb7d6 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1673,17 +1673,8 @@ impl BlockChainClient for Client { }; let chain = self.chain.read(); - let blocks = filter.bloom_possibilities().iter() - .map(move |bloom| { - chain.blocks_with_bloom(bloom, from, to) - }) - .flat_map(|m| m) - // remove duplicate elements - .collect::>() - .into_iter() - .collect::>(); - - self.chain.read().logs(blocks, |entry| filter.matches(entry), filter.limit) + let blocks = chain.blocks_with_blooms(&filter.bloom_possibilities(), from, to); + chain.logs(blocks, |entry| filter.matches(entry), filter.limit) } fn filter_traces(&self, filter: TraceFilter) -> Option> { diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index fdbd35e95..b4ae063b4 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -54,7 +54,6 @@ //! cargo build --release //! ``` -extern crate bloomchain; extern crate bn; extern crate byteorder; extern crate crossbeam; @@ -155,7 +154,6 @@ pub mod verification; pub mod views; mod cache_manager; -mod blooms; mod basic_types; mod pod_account; mod state_db; diff --git a/ethcore/src/snapshot/error.rs b/ethcore/src/snapshot/error.rs index bd0cb227b..33faab667 100644 --- a/ethcore/src/snapshot/error.rs +++ b/ethcore/src/snapshot/error.rs @@ -57,6 +57,8 @@ pub enum Error { VersionNotSupported(u64), /// Max chunk size is to small to fit basic account data. ChunkTooSmall, + /// Oversized chunk + ChunkTooLarge, /// Snapshots not supported by the consensus engine. SnapshotsUnsupported, /// Bad epoch transition. @@ -85,6 +87,7 @@ impl fmt::Display for Error { Error::Trie(ref err) => err.fmt(f), Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver), Error::ChunkTooSmall => write!(f, "Chunk size is too small."), + Error::ChunkTooLarge => write!(f, "Chunk size is too large."), Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."), Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i), Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg), diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 02adb2c16..e54989405 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -77,6 +77,11 @@ mod traits; // Try to have chunks be around 4MB (before compression) const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024; +// Maximal chunk size (decompressed) +// Snappy::decompressed_len estimation may sometimes yield results greater +// than PREFERRED_CHUNK_SIZE so allow some threshold here. +const MAX_CHUNK_SIZE: usize = PREFERRED_CHUNK_SIZE / 4 * 5; + // Minimum supported state chunk version. const MIN_SUPPORTED_STATE_CHUNK_VERSION: u64 = 1; // current state chunk version. diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 21e7a6752..88ad5c511 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -23,7 +23,7 @@ use std::path::PathBuf; use std::sync::Arc; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use super::{ManifestData, StateRebuilder, Rebuilder, RestorationStatus, SnapshotService}; +use super::{ManifestData, StateRebuilder, Rebuilder, RestorationStatus, SnapshotService, MAX_CHUNK_SIZE}; use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter}; use blockchain::BlockChain; @@ -130,6 +130,11 @@ impl Restoration { // feeds a state chunk, aborts early if `flag` becomes false. fn feed_state(&mut self, hash: H256, chunk: &[u8], flag: &AtomicBool) -> Result<(), Error> { if self.state_chunks_left.contains(&hash) { + let expected_len = snappy::decompressed_len(chunk)?; + if expected_len > MAX_CHUNK_SIZE { + trace!(target: "snapshot", "Discarding large chunk: {} vs {}", expected_len, MAX_CHUNK_SIZE); + return Err(::snapshot::Error::ChunkTooLarge.into()); + } let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?; self.state.feed(&self.snappy_buffer[..len], flag)?; @@ -147,6 +152,11 @@ impl Restoration { // feeds a block chunk fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &EthEngine, flag: &AtomicBool) -> Result<(), Error> { if self.block_chunks_left.contains(&hash) { + let expected_len = snappy::decompressed_len(chunk)?; + if expected_len > MAX_CHUNK_SIZE { + trace!(target: "snapshot", "Discarding large chunk: {} vs {}", expected_len, MAX_CHUNK_SIZE); + return Err(::snapshot::Error::ChunkTooLarge.into()); + } let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?; self.secondary.feed(&self.snappy_buffer[..len], engine, flag)?; diff --git a/ethcore/src/snapshot/tests/proof_of_work.rs b/ethcore/src/snapshot/tests/proof_of_work.rs index d4df7bb10..16b39dc05 100644 --- a/ethcore/src/snapshot/tests/proof_of_work.rs +++ b/ethcore/src/snapshot/tests/proof_of_work.rs @@ -19,7 +19,7 @@ use devtools::RandomTempPath; use error::Error; -use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer}; +use blockchain::generator::{BlockGenerator, BlockBuilder}; use blockchain::BlockChain; use snapshot::{chunk_secondary, Error as SnapshotError, Progress, SnapshotComponents}; use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}; @@ -35,9 +35,10 @@ use std::sync::atomic::AtomicBool; const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot { blocks: 30000, max_restore_blocks: 30000 }; fn chunk_and_restore(amount: u64) { - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - let genesis = canon_chain.generate(&mut finalizer).unwrap(); + let genesis = BlockBuilder::genesis(); + let rest = genesis.add_blocks(amount as usize); + let generator = BlockGenerator::new(vec![rest]); + let genesis = genesis.last(); let engine = ::spec::Spec::new_test().engine; let new_path = RandomTempPath::create_dir(); @@ -45,13 +46,12 @@ fn chunk_and_restore(amount: u64) { snapshot_path.push("SNAP"); let old_db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0))); - let bc = BlockChain::new(Default::default(), &genesis, old_db.clone()); + let bc = BlockChain::new(Default::default(), &genesis.encoded(), old_db.clone()); // build the blockchain. let mut batch = DBTransaction::new(); - for _ in 0..amount { - let block = canon_chain.generate(&mut finalizer).unwrap(); - bc.insert_block(&mut batch, &block, vec![]); + for block in generator { + bc.insert_block(&mut batch, &block.encoded(), vec![]); bc.commit(); } @@ -82,7 +82,7 @@ fn chunk_and_restore(amount: u64) { // restore it. let new_db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0))); - let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone()); + let new_chain = BlockChain::new(Default::default(), &genesis.encoded(), new_db.clone()); let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap(); let reader = PackedReader::new(&snapshot_path).unwrap().unwrap(); @@ -97,15 +97,19 @@ fn chunk_and_restore(amount: u64) { drop(rebuilder); // and test it. - let new_chain = BlockChain::new(Default::default(), &genesis, new_db); + let new_chain = BlockChain::new(Default::default(), &genesis.encoded(), new_db); assert_eq!(new_chain.best_block_hash(), best_hash); } #[test] -fn chunk_and_restore_500() { chunk_and_restore(500) } +fn chunk_and_restore_500() { + chunk_and_restore(500) +} #[test] -fn chunk_and_restore_40k() { chunk_and_restore(40000) } +fn chunk_and_restore_4k() { + chunk_and_restore(4000) +} #[test] fn checks_flag() { @@ -120,17 +124,12 @@ fn checks_flag() { stream.append_empty_data().append_empty_data(); - let genesis = { - let mut canon_chain = ChainGenerator::default(); - let mut finalizer = BlockFinalizer::default(); - canon_chain.generate(&mut finalizer).unwrap() - }; - + let genesis = BlockBuilder::genesis(); let chunk = stream.out(); let db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0))); let engine = ::spec::Spec::new_test().engine; - let chain = BlockChain::new(Default::default(), &genesis, db.clone()); + let chain = BlockChain::new(Default::default(), &genesis.last().encoded(), db.clone()); let manifest = ::snapshot::ManifestData { version: 2, diff --git a/ethcore/src/trace/bloom.rs b/ethcore/src/trace/bloom.rs deleted file mode 100644 index ed34d6505..000000000 --- a/ethcore/src/trace/bloom.rs +++ /dev/null @@ -1,77 +0,0 @@ -use bloomchain::Bloom; -use bloomchain::group::{BloomGroup, GroupPosition}; -use basic_types::LogBloom; - -/// Helper structure representing bloom of the trace. -#[derive(Clone, RlpEncodableWrapper, RlpDecodableWrapper)] -pub struct BlockTracesBloom(LogBloom); - -impl From for BlockTracesBloom { - fn from(bloom: LogBloom) -> BlockTracesBloom { - BlockTracesBloom(bloom) - } -} - -impl From for BlockTracesBloom { - fn from(bloom: Bloom) -> BlockTracesBloom { - let bytes: [u8; 256] = bloom.into(); - BlockTracesBloom(LogBloom::from(bytes)) - } -} - -impl Into for BlockTracesBloom { - fn into(self) -> Bloom { - let log = self.0; - Bloom::from(log.0) - } -} - -/// Represents group of X consecutive blooms. -#[derive(Clone, RlpEncodableWrapper, RlpDecodableWrapper)] -pub struct BlockTracesBloomGroup { - blooms: Vec, -} - -impl From for BlockTracesBloomGroup { - fn from(group: BloomGroup) -> Self { - let blooms = group.blooms - .into_iter() - .map(From::from) - .collect(); - - BlockTracesBloomGroup { - blooms: blooms - } - } -} - -impl Into for BlockTracesBloomGroup { - fn into(self) -> BloomGroup { - let blooms = self.blooms - .into_iter() - .map(Into::into) - .collect(); - - BloomGroup { - blooms: blooms - } - } -} - -/// Represents `BloomGroup` position in database. -#[derive(PartialEq, Eq, Hash, Clone, Debug)] -pub struct TraceGroupPosition { - /// Bloom level. - pub level: u8, - /// Group index. - pub index: u32, -} - -impl From for TraceGroupPosition { - fn from(p: GroupPosition) -> Self { - TraceGroupPosition { - level: p.level as u8, - index: p.index as u32, - } - } -} diff --git a/ethcore/src/trace/config.rs b/ethcore/src/trace/config.rs index dbd8a97af..59ce099de 100644 --- a/ethcore/src/trace/config.rs +++ b/ethcore/src/trace/config.rs @@ -15,7 +15,6 @@ // along with Parity. If not, see . //! Traces config. -use bloomchain::Config as BloomConfig; /// Traces config. #[derive(Debug, PartialEq, Clone)] @@ -23,8 +22,6 @@ pub struct Config { /// Indicates if tracing should be enabled or not. /// If it's None, it will be automatically configured. pub enabled: bool, - /// Traces blooms configuration. - pub blooms: BloomConfig, /// Preferef cache-size. pub pref_cache_size: usize, /// Max cache-size. @@ -35,10 +32,6 @@ impl Default for Config { fn default() -> Self { Config { enabled: false, - blooms: BloomConfig { - levels: 3, - elements_per_index: 16, - }, pref_cache_size: 15 * 1024 * 1024, max_cache_size: 20 * 1024 * 1024, } diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs index e90eedc4b..5ab5d96cc 100644 --- a/ethcore/src/trace/db.rs +++ b/ethcore/src/trace/db.rs @@ -15,19 +15,15 @@ // along with Parity. If not, see . //! Trace database. -use std::ops::Deref; use std::collections::{HashMap, VecDeque}; use std::sync::Arc; -use bloomchain::{Number, Config as BloomConfig}; -use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup}; use heapsize::HeapSizeOf; -use bigint::hash::{H256, H264}; +use bigint::hash::{H256, H264, H2048 as Bloom}; use kvdb::{KeyValueDB, DBTransaction}; use parking_lot::RwLock; use header::BlockNumber; use trace::{LocalizedTrace, Config, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras}; use db::{self, Key, Writable, Readable, CacheUpdatePolicy}; -use blooms; use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; use cache_manager::CacheManager; @@ -37,8 +33,8 @@ const TRACE_DB_VER: &'static [u8] = b"1.0"; enum TraceDBIndex { /// Block traces index. BlockTraces = 0, - /// Trace bloom group index. - BloomGroups = 1, + /// Blooms index. + Blooms = 2, } impl Key for H256 { @@ -52,80 +48,37 @@ impl Key for H256 { } } -/// Wrapper around `blooms::GroupPosition` so it could be -/// uniquely identified in the database. -#[derive(Debug, PartialEq, Eq, Hash, Clone)] -struct TraceGroupPosition(blooms::GroupPosition); +impl Key for H256 { + type Target = H264; -impl From for TraceGroupPosition { - fn from(position: GroupPosition) -> Self { - TraceGroupPosition(From::from(position)) - } -} - -impl HeapSizeOf for TraceGroupPosition { - fn heap_size_of_children(&self) -> usize { - 0 - } -} - -/// Helper data structure created cause [u8; 6] does not implement Deref to &[u8]. -pub struct TraceGroupKey([u8; 6]); - -impl Deref for TraceGroupKey { - type Target = [u8]; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl Key for TraceGroupPosition { - type Target = TraceGroupKey; - - fn key(&self) -> Self::Target { - let mut result = [0u8; 6]; - result[0] = TraceDBIndex::BloomGroups as u8; - result[1] = self.0.level; - result[2] = self.0.index as u8; - result[3] = (self.0.index >> 8) as u8; - result[4] = (self.0.index >> 16) as u8; - result[5] = (self.0.index >> 24) as u8; - TraceGroupKey(result) + fn key(&self) -> H264 { + let mut result = H264::default(); + result[0] = TraceDBIndex::Blooms as u8; + result[1..33].copy_from_slice(self); + result } } #[derive(Debug, Hash, Eq, PartialEq)] enum CacheId { Trace(H256), - Bloom(TraceGroupPosition), + Bloom(H256), } /// Trace database. pub struct TraceDB where T: DatabaseExtras { // cache traces: RwLock>, - blooms: RwLock>, + blooms: RwLock>, cache_manager: RwLock>, // db tracesdb: Arc, - // config, - bloom_config: BloomConfig, // tracing enabled enabled: bool, // extras extras: Arc, } -impl BloomGroupDatabase for TraceDB where T: DatabaseExtras { - fn blooms_at(&self, position: &GroupPosition) -> Option { - let position = TraceGroupPosition::from(position.clone()); - let result = self.tracesdb.read_with_cache(db::COL_TRACE, &self.blooms, &position).map(Into::into); - self.note_used(CacheId::Bloom(position)); - result - } -} - impl TraceDB where T: DatabaseExtras { /// Creates new instance of `TraceDB`. pub fn new(config: Config, tracesdb: Arc, extras: Arc) -> Self { @@ -137,13 +90,12 @@ impl TraceDB where T: DatabaseExtras { tracesdb.write(batch).expect("failed to update version"); TraceDB { - traces: RwLock::new(HashMap::new()), - blooms: RwLock::new(HashMap::new()), cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)), - tracesdb: tracesdb, - bloom_config: config.blooms, + tracesdb, enabled: config.enabled, - extras: extras, + extras, + traces: RwLock::default(), + blooms: RwLock::default(), } } @@ -188,6 +140,12 @@ impl TraceDB where T: DatabaseExtras { result } + fn bloom(&self, block_hash: &H256) -> Option { + let result = self.tracesdb.read_with_cache(db::COL_TRACE, &self.blooms, block_hash); + self.note_used(CacheId::Bloom(block_hash.clone())); + result + } + /// Returns vector of transaction traces for given block. fn transactions_traces(&self, block_hash: &H256) -> Option> { self.traces(block_hash).map(Into::into) @@ -264,49 +222,16 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { return; } - // now let's rebuild the blooms - if !request.enacted.is_empty() { - let range_start = request.block_number as Number + 1 - request.enacted.len(); - let range_end = range_start + request.retracted; - let replaced_range = range_start..range_end; - let enacted_blooms = request.enacted - .iter() - // all traces are expected to be found here. That's why `expect` has been used - // instead of `filter_map`. If some traces haven't been found, it meens that - // traces database is corrupted or incomplete. - .map(|block_hash| if block_hash == &request.block_hash { - request.traces.bloom() - } else { - self.traces(block_hash).expect("Traces database is incomplete.").bloom() - }) - .map(blooms::Bloom::from) - .map(Into::into) - .collect(); - - let chain = BloomGroupChain::new(self.bloom_config, self); - let trace_blooms = chain.replace(&replaced_range, enacted_blooms); - let blooms_to_insert = trace_blooms.into_iter() - .map(|p| (From::from(p.0), From::from(p.1))) - .collect::>(); - - let blooms_keys: Vec<_> = blooms_to_insert.keys().cloned().collect(); - let mut blooms = self.blooms.write(); - batch.extend_with_cache(db::COL_TRACE, &mut *blooms, blooms_to_insert, CacheUpdatePolicy::Remove); - // note_used must be called after locking blooms to avoid cache/traces deadlock on garbage collection - for key in blooms_keys { - self.note_used(CacheId::Bloom(key)); - } - } - // insert new block traces into the cache and the database - { - let mut traces = self.traces.write(); - // it's important to use overwrite here, - // cause this value might be queried by hash later - batch.write_with_cache(db::COL_TRACE, &mut *traces, request.block_hash, request.traces, CacheUpdatePolicy::Overwrite); - // note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection - self.note_used(CacheId::Trace(request.block_hash.clone())); - } + let mut traces = self.traces.write(); + let mut blooms = self.blooms.write(); + // it's important to use overwrite here, + // cause this value might be queried by hash later + batch.write_with_cache(db::COL_TRACE, &mut *blooms, request.block_hash, request.traces.bloom(), CacheUpdatePolicy::Overwrite); + batch.write_with_cache(db::COL_TRACE, &mut *traces, request.block_hash, request.traces, CacheUpdatePolicy::Overwrite); + // note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection + self.note_used(CacheId::Trace(request.block_hash)); + self.note_used(CacheId::Bloom(request.block_hash)); } fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec) -> Option { @@ -393,15 +318,17 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { } fn filter(&self, filter: &Filter) -> Vec { - let chain = BloomGroupChain::new(self.bloom_config, self); - let numbers = chain.filter(filter); - numbers.into_iter() - .flat_map(|n| { - let number = n as BlockNumber; - let hash = self.extras.block_hash(number) - .expect("Expected to find block hash. Extras db is probably corrupted"); - let traces = self.traces(&hash) - .expect("Expected to find a trace. Db is probably corrupted."); + let possibilities = filter.bloom_possibilities(); + // + 1, cause filters are inclusive + (filter.range.start..filter.range.end + 1).into_iter() + .map(|n| n as BlockNumber) + .filter_map(|n| self.extras.block_hash(n).map(|hash| (n, hash))) + .filter(|&(_,ref hash)| { + let bloom = self.bloom(hash).expect("hash exists; qed"); + possibilities.iter().any(|p| bloom.contains(p)) + }) + .flat_map(|(number, hash)| { + let traces = self.traces(&hash).expect("hash exists; qed"); self.matching_block_traces(filter, traces, hash, number) }) .collect() diff --git a/ethcore/src/trace/mod.rs b/ethcore/src/trace/mod.rs index 991e434fc..e427af7d2 100644 --- a/ethcore/src/trace/mod.rs +++ b/ethcore/src/trace/mod.rs @@ -16,7 +16,6 @@ //! Tracing -mod bloom; mod config; mod db; mod executive_tracer; diff --git a/ethcore/src/trace/types/filter.rs b/ethcore/src/trace/types/filter.rs index f7e2d2140..4df362d17 100644 --- a/ethcore/src/trace/types/filter.rs +++ b/ethcore/src/trace/types/filter.rs @@ -17,10 +17,10 @@ //! Trace filters type definitions use std::ops::Range; -use bloomchain::{Filter as BloomFilter, Bloom, Number}; use hash::keccak; use util::Address; use bloomable::Bloomable; +use bigint::prelude::H2048 as Bloom; use basic_types::LogBloom; use trace::flat::FlatTrace; use super::trace::{Action, Res}; @@ -87,22 +87,9 @@ pub struct Filter { pub to_address: AddressesFilter, } -impl BloomFilter for Filter { - fn bloom_possibilities(&self) -> Vec { - self.bloom_possibilities() - .into_iter() - .map(|b| Bloom::from(b.0)) - .collect() - } - - fn range(&self) -> Range { - self.range.clone() - } -} - impl Filter { /// Returns combinations of each address. - fn bloom_possibilities(&self) -> Vec { + pub fn bloom_possibilities(&self) -> Vec { self.to_address.with_blooms(self.from_address.blooms()) } diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index fb9a2b6c0..7ac943d0e 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -461,7 +461,7 @@ mod tests { unimplemented!() } - fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockNumber, _to_block: BlockNumber) -> Vec { + fn blocks_with_blooms(&self, _blooms: &[H2048], _from_block: BlockNumber, _to_block: BlockNumber) -> Vec { unimplemented!() } diff --git a/hw/src/ledger.rs b/hw/src/ledger.rs index 6b4c87a0a..f0ec44c8b 100644 --- a/hw/src/ledger.rs +++ b/hw/src/ledger.rs @@ -22,16 +22,21 @@ use super::{WalletInfo, KeyPath}; use bigint::hash::H256; use ethkey::{Address, Signature}; use hidapi; +use libusb; use parking_lot::{Mutex, RwLock}; use std::cmp::min; use std::fmt; use std::str::FromStr; -use std::sync::Arc; +use std::sync::{Arc, Weak}; use std::time::Duration; +use std::thread; + +/// Ledger vendor ID +pub const LEDGER_VID: u16 = 0x2c97; +/// Legder product IDs: [Nano S and Blue] +pub const LEDGER_PIDS: [u16; 2] = [0x0000, 0x0001]; -const LEDGER_VID: u16 = 0x2c97; -const LEDGER_PIDS: [u16; 2] = [0x0000, 0x0001]; // Nano S and Blue const ETH_DERIVATION_PATH_BE: [u8; 17] = [4, 0x80, 0, 0, 44, 0x80, 0, 0, 60, 0x80, 0, 0, 0, 0, 0, 0, 0]; // 44'/60'/0'/0 const ETC_DERIVATION_PATH_BE: [u8; 21] = [5, 0x80, 0, 0, 44, 0x80, 0, 0, 60, 0x80, 0x02, 0x73, 0xd0, 0x80, 0, 0, 0, 0, 0, 0, 0]; // 44'/60'/160720'/0'/0 @@ -54,10 +59,14 @@ pub enum Error { Protocol(&'static str), /// Hidapi error. Usb(hidapi::HidError), + /// Libusb error + LibUsb(libusb::Error), /// Device with request key is not available. KeyNotFound, /// Signing has been cancelled by user. UserCancel, + /// Invalid Device + InvalidDevice, } impl fmt::Display for Error { @@ -65,8 +74,10 @@ impl fmt::Display for Error { match *self { Error::Protocol(ref s) => write!(f, "Ledger protocol error: {}", s), Error::Usb(ref e) => write!(f, "USB communication error: {}", e), + Error::LibUsb(ref e) => write!(f, "LibUSB communication error: {}", e), Error::KeyNotFound => write!(f, "Key not found"), Error::UserCancel => write!(f, "Operation has been cancelled"), + Error::InvalidDevice => write!(f, "Unsupported product was entered"), } } } @@ -77,6 +88,12 @@ impl From for Error { } } +impl From for Error { + fn from(err: libusb::Error) -> Error { + Error::LibUsb(err) + } +} + /// Ledger device manager. pub struct Manager { usb: Arc>, @@ -234,16 +251,7 @@ impl Manager { fn open_path(&self, f: F) -> Result where F: Fn() -> Result { - let mut err = Error::KeyNotFound; - // Try to open device a few times. - for _ in 0..10 { - match f() { - Ok(handle) => return Ok(handle), - Err(e) => err = From::from(e), - } - ::std::thread::sleep(Duration::from_millis(200)); - } - Err(err) + f().map_err(Into::into) } fn send_apdu(handle: &hidapi::HidDevice, command: u8, p1: u8, p2: u8, data: &[u8]) -> Result, Error> { @@ -333,6 +341,54 @@ impl Manager { message.truncate(new_len); Ok(message) } + + fn is_valid_ledger(device: &libusb::Device) -> Result<(), Error> { + let desc = device.device_descriptor()?; + let vendor_id = desc.vendor_id(); + let product_id = desc.product_id(); + + if vendor_id == LEDGER_VID && LEDGER_PIDS.contains(&product_id) { + Ok(()) + } else { + Err(Error::InvalidDevice) + } + } + +} + +/// Ledger event handler +/// A seperate thread is handling incoming events +pub struct EventHandler { + ledger: Weak, +} + +impl EventHandler { + /// Ledger event handler constructor + pub fn new(ledger: Weak) -> Self { + Self { ledger: ledger } + } +} + +impl libusb::Hotplug for EventHandler { + fn device_arrived(&mut self, device: libusb::Device) { + if let (Some(ledger), Ok(_)) = (self.ledger.upgrade(), Manager::is_valid_ledger(&device)) { + debug!(target: "hw", "Ledger arrived"); + // Wait for the device to boot up + thread::sleep(Duration::from_millis(1000)); + if let Err(e) = ledger.update_devices() { + debug!(target: "hw", "Ledger connect error: {:?}", e); + } + } + } + + fn device_left(&mut self, device: libusb::Device) { + if let (Some(ledger), Ok(_)) = (self.ledger.upgrade(), Manager::is_valid_ledger(&device)) { + debug!(target: "hw", "Ledger left"); + if let Err(e) = ledger.update_devices() { + debug!(target: "hw", "Ledger disconnect error: {:?}", e); + } + } + } } #[test] diff --git a/hw/src/lib.rs b/hw/src/lib.rs index ef58f2d52..a1e1fbd5d 100644 --- a/hw/src/lib.rs +++ b/hw/src/lib.rs @@ -33,13 +33,15 @@ use ethkey::{Address, Signature}; use parking_lot::Mutex; use std::fmt; -use std::sync::{Arc, Weak}; +use std::sync::Arc; use std::sync::atomic; use std::sync::atomic::AtomicBool; use std::thread; use std::time::Duration; use bigint::prelude::uint::U256; +const USB_DEVICE_CLASS_DEVICE: u8 = 0; + /// Hardware wallet error. #[derive(Debug)] pub enum Error { @@ -128,84 +130,78 @@ impl From for Error { /// Hardware wallet management interface. pub struct HardwareWalletManager { - update_thread: Option>, exiting: Arc, ledger: Arc, trezor: Arc, } -struct EventHandler { - ledger: Weak, - trezor: Weak, -} - -impl libusb::Hotplug for EventHandler { - fn device_arrived(&mut self, _device: libusb::Device) { - debug!("USB Device arrived"); - if let (Some(l), Some(t)) = (self.ledger.upgrade(), self.trezor.upgrade()) { - for _ in 0..10 { - let l_devices = l.update_devices().unwrap_or_else(|e| { - debug!("Error enumerating Ledger devices: {}", e); - 0 - }); - let t_devices = t.update_devices().unwrap_or_else(|e| { - debug!("Error enumerating Trezor devices: {}", e); - 0 - }); - if l_devices + t_devices > 0 { - break; - } - thread::sleep(Duration::from_millis(200)); - } - } - } - - fn device_left(&mut self, _device: libusb::Device) { - debug!("USB Device lost"); - if let (Some(l), Some(t)) = (self.ledger.upgrade(), self.trezor.upgrade()) { - l.update_devices().unwrap_or_else(|e| {debug!("Error enumerating Ledger devices: {}", e); 0}); - t.update_devices().unwrap_or_else(|e| {debug!("Error enumerating Trezor devices: {}", e); 0}); - } - } -} impl HardwareWalletManager { + /// Hardware wallet constructor pub fn new() -> Result { - let usb_context = Arc::new(libusb::Context::new()?); + let usb_context_trezor = Arc::new(libusb::Context::new()?); + let usb_context_ledger = Arc::new(libusb::Context::new()?); let hidapi = Arc::new(Mutex::new(hidapi::HidApi::new().map_err(|e| Error::Hid(e.to_string().clone()))?)); let ledger = Arc::new(ledger::Manager::new(hidapi.clone())); let trezor = Arc::new(trezor::Manager::new(hidapi.clone())); - usb_context.register_callback( - None, None, None, - Box::new(EventHandler { - ledger: Arc::downgrade(&ledger), - trezor: Arc::downgrade(&trezor), - }), - )?; + + // Subscribe to TREZOR V1 + // Note, this support only TREZOR V1 becasue TREZOR V2 has another vendorID for some reason + // Also, we now only support one product as the second argument specifies + usb_context_trezor.register_callback( + Some(trezor::TREZOR_VID), Some(trezor::TREZOR_PIDS[0]), Some(USB_DEVICE_CLASS_DEVICE), + Box::new(trezor::EventHandler::new(Arc::downgrade(&trezor))))?; + + // Subscribe to all Ledger Devices + // This means that we need to check that the given productID is supported + // None => LIBUSB_HOTPLUG_MATCH_ANY, in other words that all are subscribed to + // More info can be found: http://libusb.sourceforge.net/api-1.0/group__hotplug.html#gae6c5f1add6cc754005549c7259dc35ea + usb_context_ledger.register_callback( + Some(ledger::LEDGER_VID), None, Some(USB_DEVICE_CLASS_DEVICE), + Box::new(ledger::EventHandler::new(Arc::downgrade(&ledger))))?; + let exiting = Arc::new(AtomicBool::new(false)); - let thread_exiting = exiting.clone(); + let thread_exiting_ledger = exiting.clone(); + let thread_exiting_trezor = exiting.clone(); let l = ledger.clone(); let t = trezor.clone(); - let thread = thread::Builder::new() - .name("hw_wallet".to_string()) + + // Ledger event thread + thread::Builder::new() + .name("hw_wallet_ledger".to_string()) .spawn(move || { if let Err(e) = l.update_devices() { - debug!("Error updating ledger devices: {}", e); - } - if let Err(e) = t.update_devices() { - debug!("Error updating trezor devices: {}", e); + debug!(target: "hw", "Ledger couldn't connect at startup, error: {}", e); + //debug!("Ledger could not connect at startup, error: {}", e); } loop { - usb_context.handle_events(Some(Duration::from_millis(500))) - .unwrap_or_else(|e| debug!("Error processing USB events: {}", e)); - if thread_exiting.load(atomic::Ordering::Acquire) { + usb_context_ledger.handle_events(Some(Duration::from_millis(500))) + .unwrap_or_else(|e| debug!(target: "hw", "Ledger event handler error: {}", e)); + if thread_exiting_ledger.load(atomic::Ordering::Acquire) { break; } } }) .ok(); + + // Trezor event thread + thread::Builder::new() + .name("hw_wallet_trezor".to_string()) + .spawn(move || { + if let Err(e) = t.update_devices() { + debug!(target: "hw", "Trezor couldn't connect at startup, error: {}", e); + } + loop { + usb_context_trezor.handle_events(Some(Duration::from_millis(500))) + .unwrap_or_else(|e| debug!(target: "hw", "Trezor event handler error: {}", e)); + if thread_exiting_trezor.load(atomic::Ordering::Acquire) { + break; + } + } + }) + .ok(); + Ok(HardwareWalletManager { - update_thread: thread, exiting: exiting, ledger: ledger, trezor: trezor, @@ -259,10 +255,10 @@ impl HardwareWalletManager { impl Drop for HardwareWalletManager { fn drop(&mut self) { + // Indicate to the USB Hotplug handlers that they + // shall terminate but don't wait for them to terminate. + // If they don't terminate for some reason USB Hotplug events will be handled + // even if the HardwareWalletManger has been dropped self.exiting.store(true, atomic::Ordering::Release); - if let Some(thread) = self.update_thread.take() { - thread.thread().unpark(); - thread.join().ok(); - } } } diff --git a/hw/src/trezor.rs b/hw/src/trezor.rs index a77d7233c..c32504385 100644 --- a/hw/src/trezor.rs +++ b/hw/src/trezor.rs @@ -24,23 +24,26 @@ use super::{WalletInfo, TransactionInfo, KeyPath}; use bigint::hash::H256; use ethkey::{Address, Signature}; use hidapi; +use libusb; use parking_lot::{Mutex, RwLock}; use protobuf; use protobuf::{Message, ProtobufEnum}; use std::cmp::{min, max}; use std::fmt; -use std::sync::Arc; +use std::sync::{Arc, Weak}; use std::time::Duration; use bigint::prelude::uint::U256; use trezor_sys::messages::{EthereumAddress, PinMatrixAck, MessageType, EthereumTxRequest, EthereumSignTx, EthereumGetAddress, EthereumTxAck, ButtonAck}; -const TREZOR_VID: u16 = 0x534c; -const TREZOR_PIDS: [u16; 1] = [0x0001]; // Trezor v1, keeping this as an array to leave room for Trezor v2 which is in progress +/// Trezor v1 vendor ID +pub const TREZOR_VID: u16 = 0x534c; +/// Trezor product IDs +pub const TREZOR_PIDS: [u16; 1] = [0x0001]; + const ETH_DERIVATION_PATH: [u32; 5] = [0x8000002C, 0x8000003C, 0x80000000, 0, 0]; // m/44'/60'/0'/0/0 const ETC_DERIVATION_PATH: [u32; 5] = [0x8000002C, 0x8000003D, 0x80000000, 0, 0]; // m/44'/61'/0'/0/0 - /// Hardware wallet error. #[derive(Debug)] pub enum Error { @@ -55,7 +58,7 @@ pub enum Error { /// The Message Type given in the trezor RPC call is not something we recognize BadMessageType, /// Trying to read from a closed device at the given path - ClosedDevice(String), + LockedDevice(String), } impl fmt::Display for Error { @@ -66,7 +69,7 @@ impl fmt::Display for Error { Error::KeyNotFound => write!(f, "Key not found"), Error::UserCancel => write!(f, "Operation has been cancelled"), Error::BadMessageType => write!(f, "Bad Message Type in RPC call"), - Error::ClosedDevice(ref s) => write!(f, "Device is closed, needs PIN to perform operations: {}", s), + Error::LockedDevice(ref s) => write!(f, "Device is locked, needs PIN to perform operations: {}", s), } } } @@ -83,11 +86,11 @@ impl From for Error { } } -/// Ledger device manager. +/// Ledger device manager pub struct Manager { usb: Arc>, devices: RwLock>, - closed_devices: RwLock>, + locked_devices: RwLock>, key_path: RwLock, } @@ -109,7 +112,7 @@ impl Manager { Manager { usb: hidapi, devices: RwLock::new(Vec::new()), - closed_devices: RwLock::new(Vec::new()), + locked_devices: RwLock::new(Vec::new()), key_path: RwLock::new(KeyPath::Ethereum), } } @@ -120,7 +123,7 @@ impl Manager { usb.refresh_devices(); let devices = usb.devices(); let mut new_devices = Vec::new(); - let mut closed_devices = Vec::new(); + let mut locked_devices = Vec::new(); let mut error = None; for usb_device in devices { let is_trezor = usb_device.vendor_id == TREZOR_VID; @@ -139,7 +142,7 @@ impl Manager { } match self.read_device_info(&usb, &usb_device) { Ok(device) => new_devices.push(device), - Err(Error::ClosedDevice(path)) => closed_devices.push(path.to_string()), + Err(Error::LockedDevice(path)) => locked_devices.push(path.to_string()), Err(e) => { warn!("Error reading device: {:?}", e); error = Some(e); @@ -147,9 +150,9 @@ impl Manager { } } let count = new_devices.len(); - trace!("Got devices: {:?}, closed: {:?}", new_devices, closed_devices); + trace!("Got devices: {:?}, closed: {:?}", new_devices, locked_devices); *self.devices.write() = new_devices; - *self.closed_devices.write() = closed_devices; + *self.locked_devices.write() = locked_devices; match error { Some(e) => Err(e), None => Ok(count), @@ -173,7 +176,7 @@ impl Manager { }, }) } - Ok(None) => Err(Error::ClosedDevice(dev_info.path.clone())), + Ok(None) => Err(Error::LockedDevice(dev_info.path.clone())), Err(e) => Err(e), } } @@ -189,7 +192,7 @@ impl Manager { } pub fn list_locked_devices(&self) -> Vec { - (*self.closed_devices.read()).clone() + (*self.locked_devices.read()).clone() } /// Get wallet info. @@ -200,16 +203,7 @@ impl Manager { fn open_path(&self, f: F) -> Result where F: Fn() -> Result { - let mut err = Error::KeyNotFound; - // Try to open device a few times. - for _ in 0..10 { - match f() { - Ok(handle) => return Ok(handle), - Err(e) => err = From::from(e), - } - ::std::thread::sleep(Duration::from_millis(200)); - } - Err(err) + f().map_err(Into::into) } pub fn pin_matrix_ack(&self, device_path: &str, pin: &str) -> Result { @@ -406,6 +400,42 @@ impl Manager { } } +/// Trezor event handler +/// A separate thread is handeling incoming events +pub struct EventHandler { + trezor: Weak, +} + +impl EventHandler { + // Trezor event handler constructor + pub fn new(trezor: Weak) -> Self { + Self { trezor: trezor } + } +} + +impl libusb::Hotplug for EventHandler { + fn device_arrived(&mut self, _device: libusb::Device) { + debug!(target: "hw", "Trezor V1 arrived"); + if let Some(trezor) = self.trezor.upgrade() { + // Wait for the device to boot up + ::std::thread::sleep(Duration::from_millis(1000)); + if let Err(e) = trezor.update_devices() { + debug!(target: "hw", "Trezor V1 connect error: {:?}", e); + } + + } + } + + fn device_left(&mut self, _device: libusb::Device) { + debug!(target: "hw", "Trezor V1 left"); + if let Some(trezor) = self.trezor.upgrade() { + if let Err(e) = trezor.update_devices() { + debug!(target: "hw", "Trezor V1 disconnect error: {:?}", e); + } + } + } +} + #[test] #[ignore] /// This test can't be run without an actual trezor device connected diff --git a/parity/run.rs b/parity/run.rs index 98fed7e0d..7f6b7db69 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -676,11 +676,15 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc) let event_loop = EventLoop::spawn(); // the updater service + let mut updater_fetch = fetch.clone(); + // parity binaries should be smaller than 128MB + updater_fetch.set_limit(Some(128 * 1024 * 1024)); + let updater = Updater::new( Arc::downgrade(&(service.client() as Arc)), Arc::downgrade(&sync_provider), update_policy, - fetch.clone(), + updater_fetch, event_loop.remote(), ); service.add_notify(updater.clone()); diff --git a/scripts/gitlab-build.sh b/scripts/gitlab-build.sh index 8de221ac3..69e91569a 100755 --- a/scripts/gitlab-build.sh +++ b/scripts/gitlab-build.sh @@ -22,13 +22,10 @@ echo "Parity version: " $VER echo "Branch: " $CI_BUILD_REF_NAME echo "--------------------" -echo "Rhash version:" # NOTE for md5 and sha256 we want to display filename as well # hence we use --* instead of -p * MD5_BIN="rhash --md5" SHA256_BIN="rhash --sha256" -# NOTE For SHA3 we need only hash (hence -p) -SHA3_BIN="rhash -p %{sha3-256}" set_env () { echo "Set ENVIROMENT" @@ -70,14 +67,12 @@ strip_binaries () { calculate_checksums () { echo "Checksum calculation:" rhash --version + rm -rf *.md5 rm -rf *.sha256 - export SHA3="$($SHA3_BIN target/$PLATFORM/release/parity$S3WIN)" - # NOTE rhash 1.3.1 doesnt support keccak, workaround - if [ "$SHA3" == "%{sha3-256}" ]; then - export SHA3="$(target/$PLATFORM/release/parity$S3WIN tools hash target/$PLATFORM/release/parity$S3WIN)" - fi + BIN="target/$PLATFORM/release/parity$S3WIN" + export SHA3="$($BIN tools hash $BIN)" echo "Parity file SHA3: $SHA3" $MD5_BIN target/$PLATFORM/release/parity$S3WIN > parity$S3WIN.md5 diff --git a/util/fetch/src/client.rs b/util/fetch/src/client.rs index a24cf991b..07c6a1624 100644 --- a/util/fetch/src/client.rs +++ b/util/fetch/src/client.rs @@ -127,6 +127,11 @@ impl Client { }) } + /// Sets a limit on the maximum download size. + pub fn set_limit(&mut self, limit: Option) { + self.limit = limit + } + fn client(&self) -> Result, Error> { { let (ref time, ref client) = *self.client.read(); @@ -150,8 +155,8 @@ impl Fetch for Client { type Result = CpuFuture; fn new() -> Result { - // Max 50MB will be downloaded. - Self::with_limit(Some(50*1024*1024)) + // Max 64MB will be downloaded. + Self::with_limit(Some(64 * 1024 * 1024)) } fn process(&self, f: F) -> BoxFuture where