From bfbcb8d5512274fe97afa4bf8aeb4c4565523578 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 29 Jun 2016 19:35:36 +0300 Subject: [PATCH 01/36] btree map serializer --- ethcore/src/client/test_client.rs | 2 +- ipc/rpc/src/binary.rs | 92 ++++++++++++++++++++++++++++++- 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index ed1f10e09..69dff6c92 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -244,7 +244,7 @@ impl MiningBlockChainClient for TestBlockChainClient { fn prepare_open_block(&self, _author: Address, _gas_range_target: (U256, U256), _extra_data: Bytes) -> OpenBlock { unimplemented!(); } - + fn vm_factory(&self) -> &EvmFactory { unimplemented!(); } diff --git a/ipc/rpc/src/binary.rs b/ipc/rpc/src/binary.rs index 62a3c43b0..84e521aa5 100644 --- a/ipc/rpc/src/binary.rs +++ b/ipc/rpc/src/binary.rs @@ -19,7 +19,7 @@ use util::bytes::Populatable; use util::numbers::{U256, U512, H256, H2048, Address}; use std::mem; -use std::collections::VecDeque; +use std::collections::{VecDeque, BTreeMap}; use std::ops::Range; #[derive(Debug)] @@ -139,6 +139,96 @@ impl BinaryConvertable for Result BinaryConvertable for BTreeMap where K : BinaryConvertable + Ord, V: BinaryConvertable { + fn size(&self) -> usize { + 0usize + match K::len_params() { + 0 => mem::size_of::() * self.len(), + _ => self.iter().fold(0usize, |acc, (k, _)| acc + k.size()) + } + + + match V::len_params() { + 0 => mem::size_of::() * self.len(), + _ => self.iter().fold(0usize, |acc, (_, v)| acc + v.size()) + } + } + + fn to_bytes(&self, buffer: &mut [u8], length_stack: &mut VecDeque) -> Result<(), BinaryConvertError> { + let mut offset = 0usize; + for (key, val) in self.iter() { + let key_size = match K::len_params() { + 0 => mem::size_of::(), + _ => { let size = key.size(); length_stack.push_back(size); size } + }; + let val_size = match K::len_params() { + 0 => mem::size_of::(), + _ => { let size = val.size(); length_stack.push_back(size); size } + }; + + if key_size > 0 { + let item_end = offset + key_size; + try!(key.to_bytes(&mut buffer[offset..item_end], length_stack)); + offset = item_end; + } + + if val_size > 0 { + let item_end = offset + key_size; + try!(val.to_bytes(&mut buffer[offset..item_end], length_stack)); + offset = item_end; + } + } + Ok(()) + } + + fn from_bytes(buffer: &[u8], length_stack: &mut VecDeque ) -> Result { + let mut index = 0; + let mut result = Self::new(); + + if buffer.len() == 0 { return Ok(result); } + + loop { + let key_size = match K::len_params() { + 0 => mem::size_of::(), + _ => try!(length_stack.pop_front().ok_or(BinaryConvertError)), + }; + let key = if key_size == 0 { + try!(K::from_empty_bytes()) + } + else { + try!(K::from_bytes(&buffer[index..index+key_size], length_stack)) + }; + index = index + key_size; + + let val_size = match V::len_params() { + 0 => mem::size_of::(), + _ => try!(length_stack.pop_front().ok_or(BinaryConvertError)), + }; + let val = if val_size == 0 { + try!(V::from_empty_bytes()) + } + else { + try!(V::from_bytes(&buffer[index..index+val_size], length_stack)) + }; + result.insert(key, val); + index = index + val_size; + + if index == buffer.len() { break; } + if index > buffer.len() { + return Err(BinaryConvertError) + } + } + + Ok(result) + } + + fn from_empty_bytes() -> Result { + Ok(Self::new()) + } + + fn len_params() -> usize { + 1 + } +} + impl BinaryConvertable for Vec where T: BinaryConvertable { fn size(&self) -> usize { match T::len_params() { From 9aef8ba063da67f5c64ae04021a531e8e2d19a62 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 29 Jun 2016 19:41:28 +0300 Subject: [PATCH 02/36] serde tests --- ipc/rpc/src/binary.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/ipc/rpc/src/binary.rs b/ipc/rpc/src/binary.rs index 84e521aa5..ea49b3454 100644 --- a/ipc/rpc/src/binary.rs +++ b/ipc/rpc/src/binary.rs @@ -742,3 +742,18 @@ fn serialize_err_opt_vec_in_out() { assert!(vec.is_ok()); } + +#[test] +fn serialize_btree() { + use std::io::{Cursor, SeekFrom, Seek}; + + let mut buff = Cursor::new(Vec::new()); + let mut btree = BTreeMap::new(); + btree.insert(1u64, 5u64); + serialize_into(&btree, &mut buff).unwrap(); + + buff.seek(SeekFrom::Start(0)).unwrap(); + let res = deserialize_from::, _>(&mut buff).unwrap(); + + assert_eq!(res[&1u64], 5u64); +} From 3cca6c869ef97ded8269bbadd7044f11239c584f Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 29 Jun 2016 19:56:47 +0300 Subject: [PATCH 03/36] state diff serialization --- ethcore/src/pod_state.rs | 2 +- ethcore/src/types/account_diff.rs | 9 +++++---- ethcore/src/types/state_diff.rs | 11 +++++++---- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/ethcore/src/pod_state.rs b/ethcore/src/pod_state.rs index 76dac214b..d99344adb 100644 --- a/ethcore/src/pod_state.rs +++ b/ethcore/src/pod_state.rs @@ -72,7 +72,7 @@ impl fmt::Display for PodState { /// Calculate and return diff between `pre` state and `post` state. pub fn diff_pod(pre: &PodState, post: &PodState) -> StateDiff { - StateDiff(pre.get().keys().merge(post.get().keys()).filter_map(|acc| pod_account::diff_pod(pre.get().get(acc), post.get().get(acc)).map(|d|(acc.clone(), d))).collect()) + StateDiff { raw: pre.get().keys().merge(post.get().keys()).filter_map(|acc| pod_account::diff_pod(pre.get().get(acc), post.get().get(acc)).map(|d|(acc.clone(), d))).collect() } } #[cfg(test)] diff --git a/ethcore/src/types/account_diff.rs b/ethcore/src/types/account_diff.rs index 49fc51110..d37d3c86a 100644 --- a/ethcore/src/types/account_diff.rs +++ b/ethcore/src/types/account_diff.rs @@ -17,10 +17,11 @@ //! Diff between two accounts. use util::*; +use ipc::binary::{BinaryConvertError, BinaryConvertable}; -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, PartialEq, Eq, Clone, Binary)] /// Diff type for specifying a change (or not). -pub enum Diff where T: Eq { +pub enum Diff where T: Eq + BinaryConvertable { /// Both sides are the same. Same, /// Left (pre, source) side doesn't include value, right side (post, destination) does. @@ -31,7 +32,7 @@ pub enum Diff where T: Eq { Died(T), } -impl Diff where T: Eq { +impl Diff where T: Eq + BinaryConvertable { /// Construct new object with given `pre` and `post`. pub fn new(pre: T, post: T) -> Self { if pre == post { Diff::Same } else { Diff::Changed(pre, post) } } @@ -59,7 +60,7 @@ pub struct AccountDiff { } #[derive(Debug, PartialEq, Eq, Clone)] -/// Change in existance type. +/// Change in existance type. // TODO: include other types of change. pub enum Existance { /// Item came into existance. diff --git a/ethcore/src/types/state_diff.rs b/ethcore/src/types/state_diff.rs index 4257d5b07..9ddb92cdd 100644 --- a/ethcore/src/types/state_diff.rs +++ b/ethcore/src/types/state_diff.rs @@ -22,18 +22,21 @@ use account_diff::*; #[derive(Debug, PartialEq, Eq, Clone)] /// Expression for the delta between two system states. Encoded the /// delta of every altered account. -pub struct StateDiff (pub BTreeMap); +pub struct StateDiff { + /// Raw diff key-value + pub raw: BTreeMap +} impl StateDiff { /// Get the actual data. pub fn get(&self) -> &BTreeMap { - &self.0 + &self.raw } } impl fmt::Display for StateDiff { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for (add, acc) in &self.0 { + for (add, acc) in &self.raw { try!(write!(f, "{} {}: {}", acc.existance(), add, acc)); } Ok(()) @@ -44,6 +47,6 @@ impl Deref for StateDiff { type Target = BTreeMap; fn deref(&self) -> &Self::Target { - &self.0 + &self.raw } } From dec083a5edcdff07032a8bb5fcb72636cf86cfb1 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 29 Jun 2016 20:25:05 +0300 Subject: [PATCH 04/36] basic layout --- Cargo.lock | 1 + ethcore/Cargo.toml | 1 + ethcore/build.rs | 14 + ethcore/src/client/client.rs | 823 +---------------------------- ethcore/src/client/client.rs.in | 843 ++++++++++++++++++++++++++++++ ethcore/src/lib.rs | 1 + ethcore/src/types/account_diff.rs | 4 +- ethcore/src/types/executed.rs | 2 +- ethcore/src/types/state_diff.rs | 2 +- ipc/rpc/src/interface.rs | 2 +- 10 files changed, 867 insertions(+), 826 deletions(-) create mode 100644 ethcore/src/client/client.rs.in diff --git a/Cargo.lock b/Cargo.lock index 8448b0a97..f8b29bdac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -267,6 +267,7 @@ dependencies = [ "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 2b56bf581..8852d8d8c 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -31,6 +31,7 @@ bloomchain = "0.1" "ethcore-ipc" = { path = "../ipc/rpc" } rayon = "0.3.1" ethstore = { path = "../ethstore" } +semver = "0.2" [features] jit = ["evmjit"] diff --git a/ethcore/build.rs b/ethcore/build.rs index dadcce13a..190147ce2 100644 --- a/ethcore/build.rs +++ b/ethcore/build.rs @@ -30,4 +30,18 @@ fn main() { codegen::register(&mut registry); registry.expand("", &src, &dst).unwrap(); } + + // client interface + { + let src = Path::new("src/client/client.rs.in"); + let intermediate = Path::new(&out_dir).join("client.intermediate.rs.in"); + let mut registry = syntex::Registry::new(); + codegen::register(&mut registry); + registry.expand("", &src, &intermediate).unwrap(); + + let dst = Path::new(&out_dir).join("client.ipc.rs"); + let mut registry = syntex::Registry::new(); + codegen::register(&mut registry); + registry.expand("", &intermediate, &dst).unwrap(); + } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 5d157b654..8d15272a3 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -16,824 +16,5 @@ //! Blockchain database client. -use std::path::PathBuf; -use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; -use util::*; -use util::panics::*; -use views::BlockView; -use error::{Error, ImportError, ExecutionError, BlockError, ImportResult}; -use header::{BlockNumber, Header}; -use state::State; -use spec::Spec; -use engine::Engine; -use views::HeaderView; -use service::{NetSyncMessage, SyncMessage}; -use env_info::LastHashes; -use verification; -use verification::{PreverifiedBlock, Verifier}; -use block::*; -use transaction::{LocalizedTransaction, SignedTransaction, Action}; -use blockchain::extras::TransactionAddress; -use filter::Filter; -use log_entry::LocalizedLogEntry; -use block_queue::{BlockQueue, BlockQueueInfo}; -use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; -use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient, TraceFilter, CallAnalytics}; -use client::Error as ClientError; -use env_info::EnvInfo; -use executive::{Executive, Executed, TransactOptions, contract_address}; -use receipt::LocalizedReceipt; -pub use blockchain::CacheSize as BlockChainCacheSize; -use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase}; -use trace; -pub use types::blockchain_info::BlockChainInfo; -pub use types::block_status::BlockStatus; -use evm::Factory as EvmFactory; -use miner::{Miner, MinerService, TransactionImportResult, AccountDetails}; - -const MAX_TX_QUEUE_SIZE: usize = 4096; - -impl fmt::Display for BlockChainInfo { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "#{}.{}", self.best_block_number, self.best_block_hash) - } -} - -/// Report on the status of a client. -#[derive(Default, Clone, Debug, Eq, PartialEq)] -pub struct ClientReport { - /// How many blocks have been imported so far. - pub blocks_imported: usize, - /// How many transactions have been applied so far. - pub transactions_applied: usize, - /// How much gas has been processed so far. - pub gas_processed: U256, - /// Memory used by state DB - pub state_db_mem: usize, -} - -impl ClientReport { - /// Alter internal reporting to reflect the additional `block` has been processed. - pub fn accrue_block(&mut self, block: &PreverifiedBlock) { - self.blocks_imported += 1; - self.transactions_applied += block.transactions.len(); - self.gas_processed = self.gas_processed + block.header.gas_used; - } -} - -/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. -/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. -pub struct Client { - chain: Arc, - tracedb: Arc>, - engine: Arc>, - state_db: Mutex>, - block_queue: BlockQueue, - report: RwLock, - import_lock: Mutex<()>, - panic_handler: Arc, - verifier: Box, - vm_factory: Arc, - miner: Arc, - io_channel: IoChannel, - queue_transactions: AtomicUsize, -} - -const HISTORY: u64 = 1200; -// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING. -// Altering it will force a blanket DB update for *all* JournalDB-derived -// databases. -// Instead, add/upgrade the version string of the individual JournalDB-derived database -// of which you actually want force an upgrade. -const CLIENT_DB_VER_STR: &'static str = "5.3"; - -/// Get the path for the databases given the root path and information on the databases. -pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H256) -> PathBuf { - let mut dir = path.to_path_buf(); - dir.push(H64::from(genesis_hash).hex()); - //TODO: sec/fat: pruned/full versioning - // version here is a bit useless now, since it's controlled only be the pruning algo. - dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, pruning)); - dir -} - -/// Append a path element to the given path and return the string. -pub fn append_path(path: &Path, item: &str) -> String { - let mut p = path.to_path_buf(); - p.push(item); - p.to_str().unwrap().to_owned() -} - -impl Client { - /// Create a new client with given spec and DB path and custom verifier. - pub fn new( - config: ClientConfig, - spec: Spec, - path: &Path, - miner: Arc, - message_channel: IoChannel) - -> Result, ClientError> - { - let path = get_db_path(path, config.pruning, spec.genesis_header().hash()); - let gb = spec.genesis_block(); - let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path)); - let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone()))); - - let mut state_db_config = match config.db_cache_size { - None => DatabaseConfig::default(), - Some(cache_size) => DatabaseConfig::with_cache(cache_size), - }; - - if config.db_compaction == DatabaseCompactionProfile::HDD { - state_db_config = state_db_config.compaction(CompactionProfile::hdd()); - } - - let mut state_db = journaldb::new( - &append_path(&path, "state"), - config.pruning, - state_db_config - ); - - if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) { - state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); - } - - let engine = Arc::new(spec.engine); - - let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone()); - let panic_handler = PanicHandler::new_in_arc(); - panic_handler.forward_from(&block_queue); - - let client = Client { - chain: chain, - tracedb: tracedb, - engine: engine, - state_db: Mutex::new(state_db), - block_queue: block_queue, - report: RwLock::new(Default::default()), - import_lock: Mutex::new(()), - panic_handler: panic_handler, - verifier: verification::new(config.verifier_type), - vm_factory: Arc::new(EvmFactory::new(config.vm_type)), - miner: miner, - io_channel: message_channel, - queue_transactions: AtomicUsize::new(0), - }; - - Ok(Arc::new(client)) - } - - /// Flush the block import queue. - pub fn flush_queue(&self) { - self.block_queue.flush(); - } - - fn build_last_hashes(&self, parent_hash: H256) -> LastHashes { - let mut last_hashes = LastHashes::new(); - last_hashes.resize(256, H256::new()); - last_hashes[0] = parent_hash; - for i in 0..255 { - match self.chain.block_details(&last_hashes[i]) { - Some(details) => { - last_hashes[i + 1] = details.parent.clone(); - }, - None => break, - } - } - last_hashes - } - - fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result { - let engine = self.engine.deref().deref(); - let header = &block.header; - - // Check the block isn't so old we won't be able to enact it. - let best_block_number = self.chain.best_block_number(); - if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY { - warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); - return Err(()); - } - - // Verify Block Family - let verify_family_result = self.verifier.verify_block_family(&header, &block.bytes, engine, self.chain.deref()); - if let Err(e) = verify_family_result { - warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - return Err(()); - }; - - // Check if Parent is in chain - let chain_has_parent = self.chain.block_header(&header.parent_hash); - if let None = chain_has_parent { - warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); - return Err(()); - }; - - // Enact Verified Block - let parent = chain_has_parent.unwrap(); - let last_hashes = self.build_last_hashes(header.parent_hash.clone()); - let db = self.state_db.lock().unwrap().boxed_clone(); - - let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.dao_rescue_block_gas_limit(header.parent_hash.clone()), &self.vm_factory); - if let Err(e) = enact_result { - warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - return Err(()); - }; - - // Final Verification - let locked_block = enact_result.unwrap(); - if let Err(e) = self.verifier.verify_block_final(&header, locked_block.block().header()) { - warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - return Err(()); - } - - Ok(locked_block) - } - - fn calculate_enacted_retracted(&self, import_results: Vec) -> (Vec, Vec) { - fn map_to_vec(map: Vec<(H256, bool)>) -> Vec { - map.into_iter().map(|(k, _v)| k).collect() - } - - // In ImportRoute we get all the blocks that have been enacted and retracted by single insert. - // Because we are doing multiple inserts some of the blocks that were enacted in import `k` - // could be retracted in import `k+1`. This is why to understand if after all inserts - // the block is enacted or retracted we iterate over all routes and at the end final state - // will be in the hashmap - let map = import_results.into_iter().fold(HashMap::new(), |mut map, route| { - for hash in route.enacted { - map.insert(hash, true); - } - for hash in route.retracted { - map.insert(hash, false); - } - map - }); - - // Split to enacted retracted (using hashmap value) - let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v); - // And convert tuples to keys - (map_to_vec(enacted), map_to_vec(retracted)) - } - - /// This is triggered by a message coming from a block queue when the block is ready for insertion - pub fn import_verified_blocks(&self, io: &IoChannel) -> usize { - let max_blocks_to_import = 64; - - let mut imported_blocks = Vec::with_capacity(max_blocks_to_import); - let mut invalid_blocks = HashSet::new(); - let mut import_results = Vec::with_capacity(max_blocks_to_import); - - let _import_lock = self.import_lock.lock(); - let _timer = PerfTimer::new("import_verified_blocks"); - let blocks = self.block_queue.drain(max_blocks_to_import); - - let original_best = self.chain_info().best_block_hash; - - for block in blocks { - let header = &block.header; - - if invalid_blocks.contains(&header.parent_hash) { - invalid_blocks.insert(header.hash()); - continue; - } - let closed_block = self.check_and_close_block(&block); - if let Err(_) = closed_block { - invalid_blocks.insert(header.hash()); - continue; - } - imported_blocks.push(header.hash()); - - // Are we committing an era? - let ancient = if header.number() >= HISTORY { - let n = header.number() - HISTORY; - Some((n, self.chain.block_hash(n).unwrap())) - } else { - None - }; - - // Commit results - let closed_block = closed_block.unwrap(); - let receipts = closed_block.block().receipts().clone(); - let traces = From::from(closed_block.block().traces().clone().unwrap_or_else(Vec::new)); - - closed_block.drain() - .commit(header.number(), &header.hash(), ancient) - .expect("State DB commit failed."); - - // And update the chain after commit to prevent race conditions - // (when something is in chain but you are not able to fetch details) - let route = self.chain.insert_block(&block.bytes, receipts); - self.tracedb.import(TraceImportRequest { - traces: traces, - block_hash: header.hash(), - block_number: header.number(), - enacted: route.enacted.clone(), - retracted: route.retracted.len() - }); - - import_results.push(route); - - self.report.write().unwrap().accrue_block(&block); - trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); - } - - let imported = imported_blocks.len(); - let invalid_blocks = invalid_blocks.into_iter().collect::>(); - - { - if !invalid_blocks.is_empty() { - self.block_queue.mark_as_bad(&invalid_blocks); - } - if !imported_blocks.is_empty() { - self.block_queue.mark_as_good(&imported_blocks); - } - } - - { - if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() { - let (enacted, retracted) = self.calculate_enacted_retracted(import_results); - - if self.queue_info().is_empty() { - self.miner.chain_new_blocks(self, &imported_blocks, &invalid_blocks, &enacted, &retracted); - } - - io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { - imported: imported_blocks, - invalid: invalid_blocks, - enacted: enacted, - retracted: retracted, - })).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e)); - } - } - - { - if self.chain_info().best_block_hash != original_best { - self.miner.update_sealing(self); - } - } - - imported - } - - /// Import transactions from the IO queue - pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize { - let _timer = PerfTimer::new("import_queued_transactions"); - self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst); - let fetch_account = |a: &Address| AccountDetails { - nonce: self.latest_nonce(a), - balance: self.latest_balance(a), - }; - let tx = transactions.iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect(); - let results = self.miner.import_transactions(self, tx, fetch_account); - results.len() - } - - /// Attempt to get a copy of a specific block's state. - /// - /// This will not fail if given BlockID::Latest. - /// Otherwise, this can fail (but may not) if the DB prunes state. - pub fn state_at(&self, id: BlockID) -> Option { - // fast path for latest state. - if let BlockID::Latest = id.clone() { - return Some(self.state()) - } - - let block_number = match self.block_number(id.clone()) { - Some(num) => num, - None => return None, - }; - - self.block_header(id).and_then(|header| { - let db = self.state_db.lock().unwrap().boxed_clone(); - - // early exit for pruned blocks - if db.is_pruned() && self.chain.best_block_number() >= block_number + HISTORY { - return None; - } - - let root = HeaderView::new(&header).state_root(); - - State::from_existing(db, root, self.engine.account_start_nonce()).ok() - }) - } - - /// Get a copy of the best block's state. - pub fn state(&self) -> State { - State::from_existing(self.state_db.lock().unwrap().boxed_clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) - .expect("State root of best block header always valid.") - } - - /// Get info on the cache. - pub fn blockchain_cache_info(&self) -> BlockChainCacheSize { - self.chain.cache_size() - } - - /// Get the report. - pub fn report(&self) -> ClientReport { - let mut report = self.report.read().unwrap().clone(); - report.state_db_mem = self.state_db.lock().unwrap().mem_used(); - report - } - - /// Tick the client. - pub fn tick(&self) { - self.chain.collect_garbage(); - self.block_queue.collect_garbage(); - } - - /// Set up the cache behaviour. - pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) { - self.chain.configure_cache(pref_cache_size, max_cache_size); - } - - /// Look up the block number for the given block ID. - pub fn block_number(&self, id: BlockID) -> Option { - match id { - BlockID::Number(number) => Some(number), - BlockID::Hash(ref hash) => self.chain.block_number(hash), - BlockID::Earliest => Some(0), - BlockID::Latest => Some(self.chain.best_block_number()) - } - } - - fn block_hash(chain: &BlockChain, id: BlockID) -> Option { - match id { - BlockID::Hash(hash) => Some(hash), - BlockID::Number(number) => chain.block_hash(number), - BlockID::Earliest => chain.block_hash(0), - BlockID::Latest => Some(chain.best_block_hash()) - } - } - - fn transaction_address(&self, id: TransactionID) -> Option { - match id { - TransactionID::Hash(ref hash) => self.chain.transaction_address(hash), - TransactionID::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress { - block_hash: hash, - index: index, - }) - } - } -} - -impl BlockChainClient for Client { - fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result { - let header = self.block_header(BlockID::Latest).unwrap(); - let view = HeaderView::new(&header); - let last_hashes = self.build_last_hashes(view.hash()); - let env_info = EnvInfo { - number: view.number(), - author: view.author(), - timestamp: view.timestamp(), - difficulty: view.difficulty(), - last_hashes: last_hashes, - gas_used: U256::zero(), - gas_limit: U256::max_value(), - dao_rescue_block_gas_limit: self.dao_rescue_block_gas_limit(view.parent_hash()), - }; - // that's just a copy of the state. - let mut state = self.state(); - let sender = try!(t.sender().map_err(|e| { - let message = format!("Transaction malformed: {:?}", e); - ExecutionError::TransactionMalformed(message) - })); - let balance = state.balance(&sender); - let needed_balance = t.value + t.gas * t.gas_price; - if balance < needed_balance { - // give the sender a sufficient balance - state.add_balance(&sender, &(needed_balance - balance)); - } - let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; - let mut ret = Executive::new(&mut state, &env_info, self.engine.deref().deref(), &self.vm_factory).transact(t, options); - - // TODO gav move this into Executive. - if analytics.state_diffing { - if let Ok(ref mut x) = ret { - x.state_diff = Some(state.diff_from(self.state())); - } - } - ret - } - - - fn block_header(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) - } - - fn block_body(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| { - self.chain.block(&hash).map(|bytes| { - let rlp = Rlp::new(&bytes); - let mut body = RlpStream::new_list(2); - body.append_raw(rlp.at(1).as_raw(), 1); - body.append_raw(rlp.at(2).as_raw(), 1); - body.out() - }) - }) - } - - fn block(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| { - self.chain.block(&hash) - }) - } - - fn block_status(&self, id: BlockID) -> BlockStatus { - match Self::block_hash(&self.chain, id) { - Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain, - Some(hash) => self.block_queue.block_status(&hash), - None => BlockStatus::Unknown - } - } - - fn block_total_difficulty(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty) - } - - fn nonce(&self, address: &Address, id: BlockID) -> Option { - self.state_at(id).map(|s| s.nonce(address)) - } - - fn block_hash(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id) - } - - fn code(&self, address: &Address) -> Option { - self.state().code(address) - } - - fn balance(&self, address: &Address, id: BlockID) -> Option { - self.state_at(id).map(|s| s.balance(address)) - } - - fn storage_at(&self, address: &Address, position: &H256, id: BlockID) -> Option { - self.state_at(id).map(|s| s.storage_at(address, position)) - } - - fn transaction(&self, id: TransactionID) -> Option { - self.transaction_address(id).and_then(|address| self.chain.transaction(&address)) - } - - fn uncle(&self, id: UncleID) -> Option
{ - let index = id.1; - self.block(id.0).and_then(|block| BlockView::new(&block).uncle_at(index)) - } - - fn transaction_receipt(&self, id: TransactionID) -> Option { - self.transaction_address(id).and_then(|address| { - let t = self.chain.block(&address.block_hash) - .and_then(|block| BlockView::new(&block).localized_transaction_at(address.index)); - - match (t, self.chain.transaction_receipt(&address)) { - (Some(tx), Some(receipt)) => { - let block_hash = tx.block_hash.clone(); - let block_number = tx.block_number.clone(); - let transaction_hash = tx.hash(); - let transaction_index = tx.transaction_index; - let prior_gas_used = match tx.transaction_index { - 0 => U256::zero(), - i => { - let prior_address = TransactionAddress { block_hash: address.block_hash, index: i - 1 }; - let prior_receipt = self.chain.transaction_receipt(&prior_address).expect("Transaction receipt at `address` exists; `prior_address` has lower index in same block; qed"); - prior_receipt.gas_used - } - }; - Some(LocalizedReceipt { - transaction_hash: tx.hash(), - transaction_index: tx.transaction_index, - block_hash: tx.block_hash, - block_number: tx.block_number, - cumulative_gas_used: receipt.gas_used, - gas_used: receipt.gas_used - prior_gas_used, - contract_address: match tx.action { - Action::Call(_) => None, - Action::Create => Some(contract_address(&tx.sender().unwrap(), &tx.nonce)) - }, - logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry { - entry: log, - block_hash: block_hash.clone(), - block_number: block_number, - transaction_hash: transaction_hash.clone(), - transaction_index: transaction_index, - log_index: i - }).collect() - }) - }, - _ => None - } - }) - } - - fn tree_route(&self, from: &H256, to: &H256) -> Option { - match self.chain.is_known(from) && self.chain.is_known(to) { - true => Some(self.chain.tree_route(from.clone(), to.clone())), - false => None - } - } - - fn find_uncles(&self, hash: &H256) -> Option> { - self.chain.find_uncle_hashes(hash, self.engine.maximum_uncle_age()) - } - - fn state_data(&self, hash: &H256) -> Option { - self.state_db.lock().unwrap().state(hash) - } - - fn block_receipts(&self, hash: &H256) -> Option { - self.chain.block_receipts(hash).map(|receipts| rlp::encode(&receipts).to_vec()) - } - - fn import_block(&self, bytes: Bytes) -> ImportResult { - { - let header = BlockView::new(&bytes).header_view(); - if self.chain.is_known(&header.sha3()) { - return Err(ImportError::AlreadyInChain.into()); - } - if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown { - return Err(BlockError::UnknownParent(header.parent_hash()).into()); - } - } - self.block_queue.import_block(bytes) - } - - fn queue_info(&self) -> BlockQueueInfo { - self.block_queue.queue_info() - } - - fn clear_queue(&self) { - self.block_queue.clear(); - } - - fn chain_info(&self) -> BlockChainInfo { - BlockChainInfo { - total_difficulty: self.chain.best_block_total_difficulty(), - pending_total_difficulty: self.chain.best_block_total_difficulty(), - genesis_hash: self.chain.genesis_hash(), - best_block_hash: self.chain.best_block_hash(), - best_block_number: From::from(self.chain.best_block_number()) - } - } - - fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option> { - match (self.block_number(from_block), self.block_number(to_block)) { - (Some(from), Some(to)) => Some(self.chain.blocks_with_bloom(bloom, from, to)), - _ => None - } - } - - fn logs(&self, filter: Filter) -> Vec { - // TODO: lock blockchain only once - - let mut blocks = filter.bloom_possibilities().iter() - .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) - .flat_map(|m| m) - // remove duplicate elements - .collect::>() - .into_iter() - .collect::>(); - - blocks.sort(); - - blocks.into_iter() - .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash))) - .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) - .filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) - .flat_map(|(number, hash, receipts, hashes)| { - let mut log_index = 0; - receipts.into_iter() - .enumerate() - .flat_map(|(index, receipt)| { - log_index += receipt.logs.len(); - receipt.logs.into_iter() - .enumerate() - .filter(|tuple| filter.matches(&tuple.1)) - .map(|(i, log)| LocalizedLogEntry { - entry: log, - block_hash: hash.clone(), - block_number: number, - transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::new), - transaction_index: index, - log_index: log_index + i - }) - .collect::>() - }) - .collect::>() - - }) - .collect() - } - - fn filter_traces(&self, filter: TraceFilter) -> Option> { - let start = self.block_number(filter.range.start); - let end = self.block_number(filter.range.end); - - if start.is_some() && end.is_some() { - let filter = trace::Filter { - range: start.unwrap() as usize..end.unwrap() as usize, - from_address: From::from(filter.from_address), - to_address: From::from(filter.to_address), - }; - - let traces = self.tracedb.filter(&filter); - Some(traces) - } else { - None - } - } - - fn trace(&self, trace: TraceId) -> Option { - let trace_address = trace.address; - self.transaction_address(trace.transaction) - .and_then(|tx_address| { - self.block_number(BlockID::Hash(tx_address.block_hash)) - .and_then(|number| self.tracedb.trace(number, tx_address.index, trace_address)) - }) - } - - fn transaction_traces(&self, transaction: TransactionID) -> Option> { - self.transaction_address(transaction) - .and_then(|tx_address| { - self.block_number(BlockID::Hash(tx_address.block_hash)) - .and_then(|number| self.tracedb.transaction_traces(number, tx_address.index)) - }) - } - - fn block_traces(&self, block: BlockID) -> Option> { - self.block_number(block) - .and_then(|number| self.tracedb.block_traces(number)) - } - - fn last_hashes(&self) -> LastHashes { - self.build_last_hashes(self.chain.best_block_hash()) - } - - fn import_transactions(&self, transactions: Vec) -> Vec> { - let fetch_account = |a: &Address| AccountDetails { - nonce: self.latest_nonce(a), - balance: self.latest_balance(a), - }; - self.miner.import_transactions(self, transactions, fetch_account) - } - - fn queue_transactions(&self, transactions: Vec) { - if self.queue_transactions.load(AtomicOrdering::Relaxed) > MAX_TX_QUEUE_SIZE { - debug!("Ignoring {} transactions: queue is full", transactions.len()); - } else { - let len = transactions.len(); - match self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewTransactions(transactions))) { - Ok(_) => { - self.queue_transactions.fetch_add(len, AtomicOrdering::SeqCst); - } - Err(e) => { - debug!("Ignoring {} transactions: error queueing: {}", len, e); - } - } - } - } - - fn pending_transactions(&self) -> Vec { - self.miner.pending_transactions() - } -} - -impl MiningBlockChainClient for Client { - fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock { - let engine = self.engine.deref().deref(); - let h = self.chain.best_block_hash(); - - let mut open_block = OpenBlock::new( - engine, - &self.vm_factory, - false, // TODO: this will need to be parameterised once we want to do immediate mining insertion. - self.state_db.lock().unwrap().boxed_clone(), - &self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"), - self.build_last_hashes(h.clone()), - self.dao_rescue_block_gas_limit(h.clone()), - author, - gas_range_target, - extra_data, - ).expect("OpenBlock::new only fails if parent state root invalid; state root of best block's header is never invalid; qed"); - - // Add uncles - self.chain - .find_uncle_headers(&h, engine.maximum_uncle_age()) - .unwrap() - .into_iter() - .take(engine.maximum_uncle_count()) - .foreach(|h| { - open_block.push_uncle(h).unwrap(); - }); - - open_block - } - - fn vm_factory(&self) -> &EvmFactory { - &self.vm_factory - } -} - -impl MayPanic for Client { - fn on_panic(&self, closure: F) where F: OnPanicListener { - self.panic_handler.on_panic(closure); - } -} +#![allow(dead_code, unused_assignments, unused_variables)] // codegen issues +include!(concat!(env!("OUT_DIR"), "/client.ipc.rs")); diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in new file mode 100644 index 000000000..2a26bbd59 --- /dev/null +++ b/ethcore/src/client/client.rs.in @@ -0,0 +1,843 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::path::PathBuf; +use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; +use util::*; +use util::panics::*; +use views::BlockView; +use error::{Error, ImportError, ExecutionError, BlockError, ImportResult}; +use header::{BlockNumber, Header}; +use state::State; +use spec::Spec; +use engine::Engine; +use views::HeaderView; +use service::{NetSyncMessage, SyncMessage}; +use env_info::LastHashes; +use verification; +use verification::{PreverifiedBlock, Verifier}; +use block::*; +use transaction::{LocalizedTransaction, SignedTransaction, Action}; +use blockchain::extras::TransactionAddress; +use filter::Filter; +use log_entry::LocalizedLogEntry; +use block_queue::{BlockQueue, BlockQueueInfo}; +use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; +use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient, TraceFilter, CallAnalytics}; +use client::Error as ClientError; +use env_info::EnvInfo; +use executive::{Executive, Executed, TransactOptions, contract_address}; +use receipt::LocalizedReceipt; +pub use blockchain::CacheSize as BlockChainCacheSize; +use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase}; +use trace; +pub use types::blockchain_info::BlockChainInfo; +pub use types::block_status::BlockStatus; +use evm::Factory as EvmFactory; +use miner::{Miner, MinerService, TransactionImportResult, AccountDetails}; + +use ipc::IpcConfig; +use ipc::binary::{BinaryConvertable, BinaryConvertError}; + +const MAX_TX_QUEUE_SIZE: usize = 4096; + +impl fmt::Display for BlockChainInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "#{}.{}", self.best_block_number, self.best_block_hash) + } +} + +/// Report on the status of a client. +#[derive(Default, Clone, Debug, Eq, PartialEq)] +pub struct ClientReport { + /// How many blocks have been imported so far. + pub blocks_imported: usize, + /// How many transactions have been applied so far. + pub transactions_applied: usize, + /// How much gas has been processed so far. + pub gas_processed: U256, + /// Memory used by state DB + pub state_db_mem: usize, +} + +impl ClientReport { + /// Alter internal reporting to reflect the additional `block` has been processed. + pub fn accrue_block(&mut self, block: &PreverifiedBlock) { + self.blocks_imported += 1; + self.transactions_applied += block.transactions.len(); + self.gas_processed = self.gas_processed + block.header.gas_used; + } +} + +/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. +/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. +pub struct Client { + chain: Arc, + tracedb: Arc>, + engine: Arc>, + state_db: Mutex>, + block_queue: BlockQueue, + report: RwLock, + import_lock: Mutex<()>, + panic_handler: Arc, + verifier: Box, + vm_factory: Arc, + miner: Arc, + io_channel: IoChannel, + queue_transactions: AtomicUsize, +} + +const HISTORY: u64 = 1200; +// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING. +// Altering it will force a blanket DB update for *all* JournalDB-derived +// databases. +// Instead, add/upgrade the version string of the individual JournalDB-derived database +// of which you actually want force an upgrade. +const CLIENT_DB_VER_STR: &'static str = "5.3"; + +/// Get the path for the databases given the root path and information on the databases. +pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H256) -> PathBuf { + let mut dir = path.to_path_buf(); + dir.push(H64::from(genesis_hash).hex()); + //TODO: sec/fat: pruned/full versioning + // version here is a bit useless now, since it's controlled only be the pruning algo. + dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, pruning)); + dir +} + +/// Append a path element to the given path and return the string. +pub fn append_path(path: &Path, item: &str) -> String { + let mut p = path.to_path_buf(); + p.push(item); + p.to_str().unwrap().to_owned() +} + +impl Client { + /// Create a new client with given spec and DB path and custom verifier. + pub fn new( + config: ClientConfig, + spec: Spec, + path: &Path, + miner: Arc, + message_channel: IoChannel) + -> Result, ClientError> + { + let path = get_db_path(path, config.pruning, spec.genesis_header().hash()); + let gb = spec.genesis_block(); + let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path)); + let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone()))); + + let mut state_db_config = match config.db_cache_size { + None => DatabaseConfig::default(), + Some(cache_size) => DatabaseConfig::with_cache(cache_size), + }; + + if config.db_compaction == DatabaseCompactionProfile::HDD { + state_db_config = state_db_config.compaction(CompactionProfile::hdd()); + } + + let mut state_db = journaldb::new( + &append_path(&path, "state"), + config.pruning, + state_db_config + ); + + if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) { + state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); + } + + let engine = Arc::new(spec.engine); + + let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone()); + let panic_handler = PanicHandler::new_in_arc(); + panic_handler.forward_from(&block_queue); + + let client = Client { + chain: chain, + tracedb: tracedb, + engine: engine, + state_db: Mutex::new(state_db), + block_queue: block_queue, + report: RwLock::new(Default::default()), + import_lock: Mutex::new(()), + panic_handler: panic_handler, + verifier: verification::new(config.verifier_type), + vm_factory: Arc::new(EvmFactory::new(config.vm_type)), + miner: miner, + io_channel: message_channel, + queue_transactions: AtomicUsize::new(0), + }; + + Ok(Arc::new(client)) + } + + /// Flush the block import queue. + pub fn flush_queue(&self) { + self.block_queue.flush(); + } + + fn build_last_hashes(&self, parent_hash: H256) -> LastHashes { + let mut last_hashes = LastHashes::new(); + last_hashes.resize(256, H256::new()); + last_hashes[0] = parent_hash; + for i in 0..255 { + match self.chain.block_details(&last_hashes[i]) { + Some(details) => { + last_hashes[i + 1] = details.parent.clone(); + }, + None => break, + } + } + last_hashes + } + + fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result { + let engine = self.engine.deref().deref(); + let header = &block.header; + + // Check the block isn't so old we won't be able to enact it. + let best_block_number = self.chain.best_block_number(); + if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY { + warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); + return Err(()); + } + + // Verify Block Family + let verify_family_result = self.verifier.verify_block_family(&header, &block.bytes, engine, self.chain.deref()); + if let Err(e) = verify_family_result { + warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + return Err(()); + }; + + // Check if Parent is in chain + let chain_has_parent = self.chain.block_header(&header.parent_hash); + if let None = chain_has_parent { + warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); + return Err(()); + }; + + // Enact Verified Block + let parent = chain_has_parent.unwrap(); + let last_hashes = self.build_last_hashes(header.parent_hash.clone()); + let db = self.state_db.lock().unwrap().boxed_clone(); + + let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.dao_rescue_block_gas_limit(header.parent_hash.clone()), &self.vm_factory); + if let Err(e) = enact_result { + warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + return Err(()); + }; + + // Final Verification + let locked_block = enact_result.unwrap(); + if let Err(e) = self.verifier.verify_block_final(&header, locked_block.block().header()) { + warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + return Err(()); + } + + Ok(locked_block) + } + + fn calculate_enacted_retracted(&self, import_results: Vec) -> (Vec, Vec) { + fn map_to_vec(map: Vec<(H256, bool)>) -> Vec { + map.into_iter().map(|(k, _v)| k).collect() + } + + // In ImportRoute we get all the blocks that have been enacted and retracted by single insert. + // Because we are doing multiple inserts some of the blocks that were enacted in import `k` + // could be retracted in import `k+1`. This is why to understand if after all inserts + // the block is enacted or retracted we iterate over all routes and at the end final state + // will be in the hashmap + let map = import_results.into_iter().fold(HashMap::new(), |mut map, route| { + for hash in route.enacted { + map.insert(hash, true); + } + for hash in route.retracted { + map.insert(hash, false); + } + map + }); + + // Split to enacted retracted (using hashmap value) + let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v); + // And convert tuples to keys + (map_to_vec(enacted), map_to_vec(retracted)) + } + + /// This is triggered by a message coming from a block queue when the block is ready for insertion + pub fn import_verified_blocks(&self, io: &IoChannel) -> usize { + let max_blocks_to_import = 64; + + let mut imported_blocks = Vec::with_capacity(max_blocks_to_import); + let mut invalid_blocks = HashSet::new(); + let mut import_results = Vec::with_capacity(max_blocks_to_import); + + let _import_lock = self.import_lock.lock(); + let _timer = PerfTimer::new("import_verified_blocks"); + let blocks = self.block_queue.drain(max_blocks_to_import); + + let original_best = self.chain_info().best_block_hash; + + for block in blocks { + let header = &block.header; + + if invalid_blocks.contains(&header.parent_hash) { + invalid_blocks.insert(header.hash()); + continue; + } + let closed_block = self.check_and_close_block(&block); + if let Err(_) = closed_block { + invalid_blocks.insert(header.hash()); + continue; + } + imported_blocks.push(header.hash()); + + // Are we committing an era? + let ancient = if header.number() >= HISTORY { + let n = header.number() - HISTORY; + Some((n, self.chain.block_hash(n).unwrap())) + } else { + None + }; + + // Commit results + let closed_block = closed_block.unwrap(); + let receipts = closed_block.block().receipts().clone(); + let traces = From::from(closed_block.block().traces().clone().unwrap_or_else(Vec::new)); + + closed_block.drain() + .commit(header.number(), &header.hash(), ancient) + .expect("State DB commit failed."); + + // And update the chain after commit to prevent race conditions + // (when something is in chain but you are not able to fetch details) + let route = self.chain.insert_block(&block.bytes, receipts); + self.tracedb.import(TraceImportRequest { + traces: traces, + block_hash: header.hash(), + block_number: header.number(), + enacted: route.enacted.clone(), + retracted: route.retracted.len() + }); + + import_results.push(route); + + self.report.write().unwrap().accrue_block(&block); + trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); + } + + let imported = imported_blocks.len(); + let invalid_blocks = invalid_blocks.into_iter().collect::>(); + + { + if !invalid_blocks.is_empty() { + self.block_queue.mark_as_bad(&invalid_blocks); + } + if !imported_blocks.is_empty() { + self.block_queue.mark_as_good(&imported_blocks); + } + } + + { + if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() { + let (enacted, retracted) = self.calculate_enacted_retracted(import_results); + + if self.queue_info().is_empty() { + self.miner.chain_new_blocks(self, &imported_blocks, &invalid_blocks, &enacted, &retracted); + } + + io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { + imported: imported_blocks, + invalid: invalid_blocks, + enacted: enacted, + retracted: retracted, + })).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e)); + } + } + + { + if self.chain_info().best_block_hash != original_best { + self.miner.update_sealing(self); + } + } + + imported + } + + /// Import transactions from the IO queue + pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize { + let _timer = PerfTimer::new("import_queued_transactions"); + self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst); + let fetch_account = |a: &Address| AccountDetails { + nonce: self.latest_nonce(a), + balance: self.latest_balance(a), + }; + let tx = transactions.iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect(); + let results = self.miner.import_transactions(self, tx, fetch_account); + results.len() + } + + /// Attempt to get a copy of a specific block's state. + /// + /// This will not fail if given BlockID::Latest. + /// Otherwise, this can fail (but may not) if the DB prunes state. + pub fn state_at(&self, id: BlockID) -> Option { + // fast path for latest state. + if let BlockID::Latest = id.clone() { + return Some(self.state()) + } + + let block_number = match self.block_number(id.clone()) { + Some(num) => num, + None => return None, + }; + + self.block_header(id).and_then(|header| { + let db = self.state_db.lock().unwrap().boxed_clone(); + + // early exit for pruned blocks + if db.is_pruned() && self.chain.best_block_number() >= block_number + HISTORY { + return None; + } + + let root = HeaderView::new(&header).state_root(); + + State::from_existing(db, root, self.engine.account_start_nonce()).ok() + }) + } + + /// Get a copy of the best block's state. + pub fn state(&self) -> State { + State::from_existing(self.state_db.lock().unwrap().boxed_clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) + .expect("State root of best block header always valid.") + } + + /// Get info on the cache. + pub fn blockchain_cache_info(&self) -> BlockChainCacheSize { + self.chain.cache_size() + } + + /// Get the report. + pub fn report(&self) -> ClientReport { + let mut report = self.report.read().unwrap().clone(); + report.state_db_mem = self.state_db.lock().unwrap().mem_used(); + report + } + + /// Tick the client. + pub fn tick(&self) { + self.chain.collect_garbage(); + self.block_queue.collect_garbage(); + } + + /// Set up the cache behaviour. + pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) { + self.chain.configure_cache(pref_cache_size, max_cache_size); + } + + /// Look up the block number for the given block ID. + pub fn block_number(&self, id: BlockID) -> Option { + match id { + BlockID::Number(number) => Some(number), + BlockID::Hash(ref hash) => self.chain.block_number(hash), + BlockID::Earliest => Some(0), + BlockID::Latest => Some(self.chain.best_block_number()) + } + } + + fn block_hash(chain: &BlockChain, id: BlockID) -> Option { + match id { + BlockID::Hash(hash) => Some(hash), + BlockID::Number(number) => chain.block_hash(number), + BlockID::Earliest => chain.block_hash(0), + BlockID::Latest => Some(chain.best_block_hash()) + } + } + + fn transaction_address(&self, id: TransactionID) -> Option { + match id { + TransactionID::Hash(ref hash) => self.chain.transaction_address(hash), + TransactionID::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress { + block_hash: hash, + index: index, + }) + } + } +} + +#[derive(Ipc)] +impl BlockChainClient for Client { + fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result { + let header = self.block_header(BlockID::Latest).unwrap(); + let view = HeaderView::new(&header); + let last_hashes = self.build_last_hashes(view.hash()); + let env_info = EnvInfo { + number: view.number(), + author: view.author(), + timestamp: view.timestamp(), + difficulty: view.difficulty(), + last_hashes: last_hashes, + gas_used: U256::zero(), + gas_limit: U256::max_value(), + dao_rescue_block_gas_limit: self.dao_rescue_block_gas_limit(view.parent_hash()), + }; + // that's just a copy of the state. + let mut state = self.state(); + let sender = try!(t.sender().map_err(|e| { + let message = format!("Transaction malformed: {:?}", e); + ExecutionError::TransactionMalformed(message) + })); + let balance = state.balance(&sender); + let needed_balance = t.value + t.gas * t.gas_price; + if balance < needed_balance { + // give the sender a sufficient balance + state.add_balance(&sender, &(needed_balance - balance)); + } + let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; + let mut ret = Executive::new(&mut state, &env_info, self.engine.deref().deref(), &self.vm_factory).transact(t, options); + + // TODO gav move this into Executive. + if analytics.state_diffing { + if let Ok(ref mut x) = ret { + x.state_diff = Some(state.diff_from(self.state())); + } + } + ret + } + + + fn block_header(&self, id: BlockID) -> Option { + Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) + } + + fn block_body(&self, id: BlockID) -> Option { + Self::block_hash(&self.chain, id).and_then(|hash| { + self.chain.block(&hash).map(|bytes| { + let rlp = Rlp::new(&bytes); + let mut body = RlpStream::new_list(2); + body.append_raw(rlp.at(1).as_raw(), 1); + body.append_raw(rlp.at(2).as_raw(), 1); + body.out() + }) + }) + } + + fn block(&self, id: BlockID) -> Option { + Self::block_hash(&self.chain, id).and_then(|hash| { + self.chain.block(&hash) + }) + } + + fn block_status(&self, id: BlockID) -> BlockStatus { + match Self::block_hash(&self.chain, id) { + Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain, + Some(hash) => self.block_queue.block_status(&hash), + None => BlockStatus::Unknown + } + } + + fn block_total_difficulty(&self, id: BlockID) -> Option { + Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty) + } + + fn nonce(&self, address: &Address, id: BlockID) -> Option { + self.state_at(id).map(|s| s.nonce(address)) + } + + fn block_hash(&self, id: BlockID) -> Option { + Self::block_hash(&self.chain, id) + } + + fn code(&self, address: &Address) -> Option { + self.state().code(address) + } + + fn balance(&self, address: &Address, id: BlockID) -> Option { + self.state_at(id).map(|s| s.balance(address)) + } + + fn storage_at(&self, address: &Address, position: &H256, id: BlockID) -> Option { + self.state_at(id).map(|s| s.storage_at(address, position)) + } + + fn transaction(&self, id: TransactionID) -> Option { + self.transaction_address(id).and_then(|address| self.chain.transaction(&address)) + } + + fn uncle(&self, id: UncleID) -> Option
{ + let index = id.1; + self.block(id.0).and_then(|block| BlockView::new(&block).uncle_at(index)) + } + + fn transaction_receipt(&self, id: TransactionID) -> Option { + self.transaction_address(id).and_then(|address| { + let t = self.chain.block(&address.block_hash) + .and_then(|block| BlockView::new(&block).localized_transaction_at(address.index)); + + match (t, self.chain.transaction_receipt(&address)) { + (Some(tx), Some(receipt)) => { + let block_hash = tx.block_hash.clone(); + let block_number = tx.block_number.clone(); + let transaction_hash = tx.hash(); + let transaction_index = tx.transaction_index; + let prior_gas_used = match tx.transaction_index { + 0 => U256::zero(), + i => { + let prior_address = TransactionAddress { block_hash: address.block_hash, index: i - 1 }; + let prior_receipt = self.chain.transaction_receipt(&prior_address).expect("Transaction receipt at `address` exists; `prior_address` has lower index in same block; qed"); + prior_receipt.gas_used + } + }; + Some(LocalizedReceipt { + transaction_hash: tx.hash(), + transaction_index: tx.transaction_index, + block_hash: tx.block_hash, + block_number: tx.block_number, + cumulative_gas_used: receipt.gas_used, + gas_used: receipt.gas_used - prior_gas_used, + contract_address: match tx.action { + Action::Call(_) => None, + Action::Create => Some(contract_address(&tx.sender().unwrap(), &tx.nonce)) + }, + logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry { + entry: log, + block_hash: block_hash.clone(), + block_number: block_number, + transaction_hash: transaction_hash.clone(), + transaction_index: transaction_index, + log_index: i + }).collect() + }) + }, + _ => None + } + }) + } + + fn tree_route(&self, from: &H256, to: &H256) -> Option { + match self.chain.is_known(from) && self.chain.is_known(to) { + true => Some(self.chain.tree_route(from.clone(), to.clone())), + false => None + } + } + + fn find_uncles(&self, hash: &H256) -> Option> { + self.chain.find_uncle_hashes(hash, self.engine.maximum_uncle_age()) + } + + fn state_data(&self, hash: &H256) -> Option { + self.state_db.lock().unwrap().state(hash) + } + + fn block_receipts(&self, hash: &H256) -> Option { + self.chain.block_receipts(hash).map(|receipts| rlp::encode(&receipts).to_vec()) + } + + fn import_block(&self, bytes: Bytes) -> ImportResult { + { + let header = BlockView::new(&bytes).header_view(); + if self.chain.is_known(&header.sha3()) { + return Err(ImportError::AlreadyInChain.into()); + } + if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown { + return Err(BlockError::UnknownParent(header.parent_hash()).into()); + } + } + self.block_queue.import_block(bytes) + } + + fn queue_info(&self) -> BlockQueueInfo { + self.block_queue.queue_info() + } + + fn clear_queue(&self) { + self.block_queue.clear(); + } + + fn chain_info(&self) -> BlockChainInfo { + BlockChainInfo { + total_difficulty: self.chain.best_block_total_difficulty(), + pending_total_difficulty: self.chain.best_block_total_difficulty(), + genesis_hash: self.chain.genesis_hash(), + best_block_hash: self.chain.best_block_hash(), + best_block_number: From::from(self.chain.best_block_number()) + } + } + + fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option> { + match (self.block_number(from_block), self.block_number(to_block)) { + (Some(from), Some(to)) => Some(self.chain.blocks_with_bloom(bloom, from, to)), + _ => None + } + } + + fn logs(&self, filter: Filter) -> Vec { + // TODO: lock blockchain only once + + let mut blocks = filter.bloom_possibilities().iter() + .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) + .flat_map(|m| m) + // remove duplicate elements + .collect::>() + .into_iter() + .collect::>(); + + blocks.sort(); + + blocks.into_iter() + .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash))) + .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) + .filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) + .flat_map(|(number, hash, receipts, hashes)| { + let mut log_index = 0; + receipts.into_iter() + .enumerate() + .flat_map(|(index, receipt)| { + log_index += receipt.logs.len(); + receipt.logs.into_iter() + .enumerate() + .filter(|tuple| filter.matches(&tuple.1)) + .map(|(i, log)| LocalizedLogEntry { + entry: log, + block_hash: hash.clone(), + block_number: number, + transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::new), + transaction_index: index, + log_index: log_index + i + }) + .collect::>() + }) + .collect::>() + + }) + .collect() + } + + fn filter_traces(&self, filter: TraceFilter) -> Option> { + let start = self.block_number(filter.range.start); + let end = self.block_number(filter.range.end); + + if start.is_some() && end.is_some() { + let filter = trace::Filter { + range: start.unwrap() as usize..end.unwrap() as usize, + from_address: From::from(filter.from_address), + to_address: From::from(filter.to_address), + }; + + let traces = self.tracedb.filter(&filter); + Some(traces) + } else { + None + } + } + + fn trace(&self, trace: TraceId) -> Option { + let trace_address = trace.address; + self.transaction_address(trace.transaction) + .and_then(|tx_address| { + self.block_number(BlockID::Hash(tx_address.block_hash)) + .and_then(|number| self.tracedb.trace(number, tx_address.index, trace_address)) + }) + } + + fn transaction_traces(&self, transaction: TransactionID) -> Option> { + self.transaction_address(transaction) + .and_then(|tx_address| { + self.block_number(BlockID::Hash(tx_address.block_hash)) + .and_then(|number| self.tracedb.transaction_traces(number, tx_address.index)) + }) + } + + fn block_traces(&self, block: BlockID) -> Option> { + self.block_number(block) + .and_then(|number| self.tracedb.block_traces(number)) + } + + fn last_hashes(&self) -> LastHashes { + self.build_last_hashes(self.chain.best_block_hash()) + } + + fn import_transactions(&self, transactions: Vec) -> Vec> { + let fetch_account = |a: &Address| AccountDetails { + nonce: self.latest_nonce(a), + balance: self.latest_balance(a), + }; + self.miner.import_transactions(self, transactions, fetch_account) + } + + fn queue_transactions(&self, transactions: Vec) { + if self.queue_transactions.load(AtomicOrdering::Relaxed) > MAX_TX_QUEUE_SIZE { + debug!("Ignoring {} transactions: queue is full", transactions.len()); + } else { + let len = transactions.len(); + match self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewTransactions(transactions))) { + Ok(_) => { + self.queue_transactions.fetch_add(len, AtomicOrdering::SeqCst); + } + Err(e) => { + debug!("Ignoring {} transactions: error queueing: {}", len, e); + } + } + } + } + + fn pending_transactions(&self) -> Vec { + self.miner.pending_transactions() + } +} + +impl MiningBlockChainClient for Client { + fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock { + let engine = self.engine.deref().deref(); + let h = self.chain.best_block_hash(); + + let mut open_block = OpenBlock::new( + engine, + &self.vm_factory, + false, // TODO: this will need to be parameterised once we want to do immediate mining insertion. + self.state_db.lock().unwrap().boxed_clone(), + &self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"), + self.build_last_hashes(h.clone()), + self.dao_rescue_block_gas_limit(h.clone()), + author, + gas_range_target, + extra_data, + ).expect("OpenBlock::new only fails if parent state root invalid; state root of best block's header is never invalid; qed"); + + // Add uncles + self.chain + .find_uncle_headers(&h, engine.maximum_uncle_age()) + .unwrap() + .into_iter() + .take(engine.maximum_uncle_count()) + .foreach(|h| { + open_block.push_uncle(h).unwrap(); + }); + + open_block + } + + fn vm_factory(&self) -> &EvmFactory { + &self.vm_factory + } +} + +impl MayPanic for Client { + fn on_panic(&self, closure: F) where F: OnPanicListener { + self.panic_handler.on_panic(closure); + } +} + +impl IpcConfig for Client { } diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 9919ec62a..2379e6805 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -92,6 +92,7 @@ extern crate bloomchain; #[macro_use] extern crate ethcore_ipc as ipc; extern crate rayon; pub extern crate ethstore; +extern crate semver; #[cfg(test)] extern crate ethcore_devtools as devtools; #[cfg(feature = "jit" )] extern crate evmjit; diff --git a/ethcore/src/types/account_diff.rs b/ethcore/src/types/account_diff.rs index d37d3c86a..abcac9a8b 100644 --- a/ethcore/src/types/account_diff.rs +++ b/ethcore/src/types/account_diff.rs @@ -46,7 +46,7 @@ impl Diff where T: Eq + BinaryConvertable { pub fn is_same(&self) -> bool { match *self { Diff::Same => true, _ => false }} } -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, PartialEq, Eq, Clone, Binary)] /// Account diff. pub struct AccountDiff { /// Change in balance, allowed to be `Diff::Same`. @@ -59,7 +59,7 @@ pub struct AccountDiff { pub storage: BTreeMap>, } -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, PartialEq, Eq, Clone, Binary)] /// Change in existance type. // TODO: include other types of change. pub enum Existance { diff --git a/ethcore/src/types/executed.rs b/ethcore/src/types/executed.rs index 4d31b9fe5..293a427f7 100644 --- a/ethcore/src/types/executed.rs +++ b/ethcore/src/types/executed.rs @@ -27,7 +27,7 @@ use std::mem; use std::collections::VecDeque; /// Transaction execution receipt. -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Binary)] pub struct Executed { /// Gas paid up front for execution of transaction. pub gas: U256, diff --git a/ethcore/src/types/state_diff.rs b/ethcore/src/types/state_diff.rs index 9ddb92cdd..f61a55b01 100644 --- a/ethcore/src/types/state_diff.rs +++ b/ethcore/src/types/state_diff.rs @@ -19,7 +19,7 @@ use util::*; use account_diff::*; -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, PartialEq, Eq, Clone, Binary)] /// Expression for the delta between two system states. Encoded the /// delta of every altered account. pub struct StateDiff { diff --git a/ipc/rpc/src/interface.rs b/ipc/rpc/src/interface.rs index 820994a2b..d4514467f 100644 --- a/ipc/rpc/src/interface.rs +++ b/ipc/rpc/src/interface.rs @@ -91,7 +91,7 @@ pub fn invoke(method_num: u16, params: &Option>, w: &mut W) where W: } /// IpcSocket, read/write generalization -pub trait IpcSocket: Read + Write + Sync { +pub trait IpcSocket: Read + Write + Sync + Send { } /// Basically something that needs only socket to be spawned From 562e591ed39e977c51cd0fa0517e0c2e7b6aeb9d Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 29 Jun 2016 20:59:13 +0300 Subject: [PATCH 05/36] more missing serializaers --- ethcore/src/client/client.rs.in | 6 +++--- ethcore/src/types/account_diff.rs | 11 ++++++++++- ethcore/src/types/ids.rs | 11 ++++++----- ethcore/src/types/state_diff.rs | 7 ++++++- ethcore/src/types/tree_route.rs | 5 ++++- 5 files changed, 29 insertions(+), 11 deletions(-) diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index 2a26bbd59..ddd401835 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -477,7 +477,7 @@ impl Client { } } -#[derive(Ipc)] +//#[derive(Ipc)] impl BlockChainClient for Client { fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result { let header = self.block_header(BlockID::Latest).unwrap(); @@ -577,8 +577,8 @@ impl BlockChainClient for Client { } fn uncle(&self, id: UncleID) -> Option
{ - let index = id.1; - self.block(id.0).and_then(|block| BlockView::new(&block).uncle_at(index)) + let index = id.position; + self.block(id.block).and_then(|block| BlockView::new(&block).uncle_at(index)) } fn transaction_receipt(&self, id: TransactionID) -> Option { diff --git a/ethcore/src/types/account_diff.rs b/ethcore/src/types/account_diff.rs index abcac9a8b..5071c2f7e 100644 --- a/ethcore/src/types/account_diff.rs +++ b/ethcore/src/types/account_diff.rs @@ -16,8 +16,13 @@ //! Diff between two accounts. -use util::*; +use util::numbers::*; +use std::cmp::*; +use std::fmt; use ipc::binary::{BinaryConvertError, BinaryConvertable}; +use util::Bytes; +use std::collections::{VecDeque, BTreeMap}; +use std::mem; #[derive(Debug, PartialEq, Eq, Clone, Binary)] /// Diff type for specifying a change (or not). @@ -95,6 +100,8 @@ impl AccountDiff { // TODO: refactor into something nicer. fn interpreted_hash(u: &H256) -> String { + use util::bytes::*; + if u <= &H256::from(0xffffffff) { format!("{} = 0x{:x}", U256::from(u.as_slice()).low_u32(), U256::from(u.as_slice()).low_u32()) } else if u <= &H256::from(u64::max_value()) { @@ -108,6 +115,8 @@ fn interpreted_hash(u: &H256) -> String { impl fmt::Display for AccountDiff { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use util::bytes::*; + match self.nonce { Diff::Born(ref x) => try!(write!(f, " non {}", x)), Diff::Changed(ref pre, ref post) => try!(write!(f, "#{} ({} {} {})", post, pre, if pre > post {"-"} else {"+"}, *max(pre, post) - * min(pre, post))), diff --git a/ethcore/src/types/ids.rs b/ethcore/src/types/ids.rs index 0a492735c..c08ab4116 100644 --- a/ethcore/src/types/ids.rs +++ b/ethcore/src/types/ids.rs @@ -47,6 +47,7 @@ pub enum TransactionID { } /// Uniquely identifies Trace. +#[derive(Binary)] pub struct TraceId { /// Transaction pub transaction: TransactionID, @@ -55,10 +56,10 @@ pub struct TraceId { } /// Uniquely identifies Uncle. -#[derive(Debug)] -pub struct UncleID ( +#[derive(Debug, Binary)] +pub struct UncleID { /// Block id. - pub BlockID, + pub block: BlockID, /// Position in block. - pub usize -); + pub position: usize +} diff --git a/ethcore/src/types/state_diff.rs b/ethcore/src/types/state_diff.rs index f61a55b01..e341b8436 100644 --- a/ethcore/src/types/state_diff.rs +++ b/ethcore/src/types/state_diff.rs @@ -16,8 +16,13 @@ //! State diff module. -use util::*; +use util::numbers::*; use account_diff::*; +use ipc::binary::BinaryConvertError; +use std::mem; +use std::fmt; +use std::ops::*; +use std::collections::{VecDeque, BTreeMap}; #[derive(Debug, PartialEq, Eq, Clone, Binary)] /// Expression for the delta between two system states. Encoded the diff --git a/ethcore/src/types/tree_route.rs b/ethcore/src/types/tree_route.rs index 2ad0aa240..37413be57 100644 --- a/ethcore/src/types/tree_route.rs +++ b/ethcore/src/types/tree_route.rs @@ -17,9 +17,12 @@ //! Tree route info type definition use util::numbers::H256; +use ipc::BinaryConvertError; +use std::collections::VecDeque; +use std::mem; /// Represents a tree route between `from` block and `to` block: -#[derive(Debug)] +#[derive(Debug, Binary)] pub struct TreeRoute { /// A vector of hashes of all blocks, ordered from `from` to `to`. pub blocks: Vec, From 5337de8d4212a118d4cb7a3ac5fac49e3904d109 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 29 Jun 2016 21:08:34 +0300 Subject: [PATCH 06/36] uncle returns rlp --- ethcore/src/client/client.rs.in | 5 +++-- ethcore/src/client/mod.rs | 2 +- ethcore/src/client/test_client.rs | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index ddd401835..0c77574a9 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -48,6 +48,7 @@ pub use types::blockchain_info::BlockChainInfo; pub use types::block_status::BlockStatus; use evm::Factory as EvmFactory; use miner::{Miner, MinerService, TransactionImportResult, AccountDetails}; +use basic_types::*; use ipc::IpcConfig; use ipc::binary::{BinaryConvertable, BinaryConvertError}; @@ -576,9 +577,9 @@ impl BlockChainClient for Client { self.transaction_address(id).and_then(|address| self.chain.transaction(&address)) } - fn uncle(&self, id: UncleID) -> Option
{ + fn uncle(&self, id: UncleID) -> Option { let index = id.position; - self.block(id.block).and_then(|block| BlockView::new(&block).uncle_at(index)) + self.block(id.block).and_then(|block| BlockView::new(&block).uncle_at(index).and_then(|u| Some(u.rlp(Seal::With)))) } fn transaction_receipt(&self, id: TransactionID) -> Option { diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index bef814b4e..e4e454d03 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -126,7 +126,7 @@ pub trait BlockChainClient : Sync + Send { fn transaction(&self, id: TransactionID) -> Option; /// Get uncle with given id. - fn uncle(&self, id: UncleID) -> Option
; + fn uncle(&self, id: UncleID) -> Option; /// Get transaction receipt with given hash. fn transaction_receipt(&self, id: TransactionID) -> Option; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 69dff6c92..b54c7f867 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -294,7 +294,7 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } - fn uncle(&self, _id: UncleID) -> Option { + fn uncle(&self, _id: UncleID) -> Option { unimplemented!(); } From f31ddec3a88a9f7464cb36348d4eab9bceffef83 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 29 Jun 2016 21:28:21 +0300 Subject: [PATCH 07/36] block queue info --- ethcore/src/block_queue.rs | 18 ++---------- ethcore/src/blockchain/block_info.rs | 40 -------------------------- ethcore/src/client/client.rs.in | 30 ++++++++++++++++--- ethcore/src/miner/transaction_queue.rs | 1 + ethcore/src/types/block_queue_info.rs | 32 +++++++++++++++++++++ ethcore/src/types/mod.rs.in | 1 + 6 files changed, 62 insertions(+), 60 deletions(-) create mode 100644 ethcore/src/types/block_queue_info.rs diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index ce99dcccd..2288c1509 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -28,6 +28,8 @@ use service::*; use client::BlockStatus; use util::panics::*; +pub use types::block_queue_info::BlockQueueInfo; + known_heap_size!(0, UnverifiedBlock, VerifyingBlock, PreverifiedBlock); const MIN_MEM_LIMIT: usize = 16384; @@ -53,22 +55,6 @@ impl Default for BlockQueueConfig { } } -/// Block queue status -#[derive(Debug)] -pub struct BlockQueueInfo { - /// Number of queued blocks pending verification - pub unverified_queue_size: usize, - /// Number of verified queued blocks pending import - pub verified_queue_size: usize, - /// Number of blocks being verified - pub verifying_queue_size: usize, - /// Configured maximum number of blocks in the queue - pub max_queue_size: usize, - /// Configured maximum number of bytes to use - pub max_mem_use: usize, - /// Heap memory used in bytes - pub mem_used: usize, -} impl BlockQueueInfo { /// The total size of the queues. diff --git a/ethcore/src/blockchain/block_info.rs b/ethcore/src/blockchain/block_info.rs index eb3677c25..52fd32291 100644 --- a/ethcore/src/blockchain/block_info.rs +++ b/ethcore/src/blockchain/block_info.rs @@ -54,43 +54,3 @@ pub struct BranchBecomingCanonChainData { /// Hashes of the blocks which were invalidated. pub retracted: Vec, } - -impl FromRawBytesVariable for BranchBecomingCanonChainData { - fn from_bytes_variable(bytes: &[u8]) -> Result { - type Tuple = (Vec, Vec, H256); - let (enacted, retracted, ancestor) = try!(Tuple::from_bytes_variable(bytes)); - Ok(BranchBecomingCanonChainData { ancestor: ancestor, enacted: enacted, retracted: retracted }) - } -} - -impl FromRawBytesVariable for BlockLocation { - fn from_bytes_variable(bytes: &[u8]) -> Result { - match bytes[0] { - 0 => Ok(BlockLocation::CanonChain), - 1 => Ok(BlockLocation::Branch), - 2 => Ok(BlockLocation::BranchBecomingCanonChain( - try!(BranchBecomingCanonChainData::from_bytes_variable(&bytes[1..bytes.len()])))), - _ => Err(FromBytesError::UnknownMarker) - } - } -} - -impl ToBytesWithMap for BranchBecomingCanonChainData { - fn to_bytes_map(&self) -> Vec { - (&self.enacted, &self.retracted, &self.ancestor).to_bytes_map() - } -} - -impl ToBytesWithMap for BlockLocation { - fn to_bytes_map(&self) -> Vec { - match *self { - BlockLocation::CanonChain => vec![0u8], - BlockLocation::Branch => vec![1u8], - BlockLocation::BranchBecomingCanonChain(ref data) => { - let mut bytes = (&data.enacted, &data.retracted, &data.ancestor).to_bytes_map(); - bytes.insert(0, 2u8); - bytes - } - } - } -} diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index 0c77574a9..1dc6512ab 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -14,10 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::marker::PhantomData; +use std::path::PathBuf; +use std::collections::{HashSet, HashMap}; +use std::ops::Deref; +use std::mem; +use std::collections::VecDeque; +use std::sync::*; +use std::path::Path; +use std::fmt; +use util::Itertools; + +// util +use util::numbers::*; +use util::panics::*; +use util::network::*; +use util::io::*; +use util::rlp; +use util::sha3::*; +use util::{UtilError, CryptoError, Bytes, Signature, Secret, ec}; +use util::rlp::{encode, decode, RlpStream, Rlp}; +use util::journaldb; +use util::journaldb::JournalDB; use std::path::PathBuf; use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; -use util::*; -use util::panics::*; + +// other use views::BlockView; use error::{Error, ImportError, ExecutionError, BlockError, ImportResult}; use header::{BlockNumber, Header}; @@ -478,7 +500,7 @@ impl Client { } } -//#[derive(Ipc)] +#[derive(Ipc)] impl BlockChainClient for Client { fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result { let header = self.block_header(BlockID::Latest).unwrap(); @@ -770,7 +792,7 @@ impl BlockChainClient for Client { self.build_last_hashes(self.chain.best_block_hash()) } - fn import_transactions(&self, transactions: Vec) -> Vec> { + fn import_transactions(&self, transactions: Vec) -> Vec> { let fetch_account = |a: &Address| AccountDetails { nonce: self.latest_nonce(a), balance: self.latest_balance(a), diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index 7f5b59c38..b6c8d4edf 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -197,6 +197,7 @@ struct VerifiedTransaction { /// transaction origin origin: TransactionOrigin, } + impl VerifiedTransaction { fn new(transaction: SignedTransaction, origin: TransactionOrigin) -> Result { try!(transaction.sender()); diff --git a/ethcore/src/types/block_queue_info.rs b/ethcore/src/types/block_queue_info.rs new file mode 100644 index 000000000..d6fd8a9cc --- /dev/null +++ b/ethcore/src/types/block_queue_info.rs @@ -0,0 +1,32 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +/// Block queue status +#[derive(Debug, Binary)] +pub struct BlockQueueInfo { + /// Number of queued blocks pending verification + pub unverified_queue_size: usize, + /// Number of verified queued blocks pending import + pub verified_queue_size: usize, + /// Number of blocks being verified + pub verifying_queue_size: usize, + /// Configured maximum number of blocks in the queue + pub max_queue_size: usize, + /// Configured maximum number of bytes to use + pub max_mem_use: usize, + /// Heap memory used in bytes + pub mem_used: usize, +} diff --git a/ethcore/src/types/mod.rs.in b/ethcore/src/types/mod.rs.in index b51e9e57b..a60ee0f08 100644 --- a/ethcore/src/types/mod.rs.in +++ b/ethcore/src/types/mod.rs.in @@ -25,3 +25,4 @@ pub mod executed; pub mod block_status; pub mod account_diff; pub mod state_diff; +pub mod block_queue_info; From a14f2391ffae88f665fc375f78758fadeb4c37e1 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 29 Jun 2016 21:35:22 +0300 Subject: [PATCH 08/36] sorting with transaction result --- ethcore/src/client/client.rs.in | 4 ++- ethcore/src/miner/transaction_queue.rs | 10 +------ ethcore/src/types/block_queue_info.rs | 4 +++ ethcore/src/types/mod.rs.in | 1 + .../src/types/transaction_import_result.rs | 28 +++++++++++++++++++ 5 files changed, 37 insertions(+), 10 deletions(-) create mode 100644 ethcore/src/types/transaction_import_result.rs diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index 1dc6512ab..cb0494696 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -797,7 +797,9 @@ impl BlockChainClient for Client { nonce: self.latest_nonce(a), balance: self.latest_balance(a), }; - self.miner.import_transactions(self, transactions, fetch_account) + self.miner.import_transactions(self, transactions, fetch_account).iter() + .map(|res| res.map_err(|e| format!("{:?}", e))) + .collect::>>() } fn queue_transactions(&self, transactions: Vec) { diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index b6c8d4edf..130d46875 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -90,6 +90,7 @@ use util::hash::{Address, H256}; use util::table::*; use transaction::*; use error::{Error, TransactionError}; +pub use types::transaction_import_result::TransactionImportResult; /// Transaction origin #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -310,15 +311,6 @@ pub struct TransactionQueueStatus { pub future: usize, } -#[derive(Debug, PartialEq)] -/// Represents the result of importing transaction. -pub enum TransactionImportResult { - /// Transaction was imported to current queue. - Current, - /// Transaction was imported to future queue. - Future -} - /// Details of account pub struct AccountDetails { /// Most recent account nonce diff --git a/ethcore/src/types/block_queue_info.rs b/ethcore/src/types/block_queue_info.rs index d6fd8a9cc..00e9b059f 100644 --- a/ethcore/src/types/block_queue_info.rs +++ b/ethcore/src/types/block_queue_info.rs @@ -14,6 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::mem; +use ipc::binary::BinaryConvertError; +use std::collections::VecDeque; + /// Block queue status #[derive(Debug, Binary)] pub struct BlockQueueInfo { diff --git a/ethcore/src/types/mod.rs.in b/ethcore/src/types/mod.rs.in index a60ee0f08..7f00550ff 100644 --- a/ethcore/src/types/mod.rs.in +++ b/ethcore/src/types/mod.rs.in @@ -26,3 +26,4 @@ pub mod block_status; pub mod account_diff; pub mod state_diff; pub mod block_queue_info; +pub mod transaction_import_result; diff --git a/ethcore/src/types/transaction_import_result.rs b/ethcore/src/types/transaction_import_result.rs new file mode 100644 index 000000000..632cf34c6 --- /dev/null +++ b/ethcore/src/types/transaction_import_result.rs @@ -0,0 +1,28 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::mem; +use ipc::binary::BinaryConvertError; +use std::collections::VecDeque; + +#[derive(Debug, PartialEq)] +/// Represents the result of importing transaction. +pub enum TransactionImportResult { + /// Transaction was imported to current queue. + Current, + /// Transaction was imported to future queue. + Future +} From f6ec1eae4ac38f93c286cc8c97a2eb9f4c73931b Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 29 Jun 2016 21:37:23 +0300 Subject: [PATCH 09/36] sorting out util imports --- ethcore/src/client/client.rs.in | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index cb0494696..5ff4cc4aa 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -23,7 +23,7 @@ use std::collections::VecDeque; use std::sync::*; use std::path::Path; use std::fmt; -use util::Itertools; +use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; // util use util::numbers::*; @@ -33,11 +33,12 @@ use util::io::*; use util::rlp; use util::sha3::*; use util::{UtilError, CryptoError, Bytes, Signature, Secret, ec}; -use util::rlp::{encode, decode, RlpStream, Rlp}; +use util::rlp::{encode, decode, RlpStream, Rlp, UntrustedRlp}; use util::journaldb; use util::journaldb::JournalDB; -use std::path::PathBuf; -use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; +use util::kvdb::*; +use util::Itertools; +use util::PerfTimer; // other use views::BlockView; From d5be0fae540c5e493aca544a6c15df3e7e113d73 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 29 Jun 2016 21:48:22 +0300 Subject: [PATCH 10/36] transaction import result sorting also --- ethcore/src/client/client.rs.in | 9 ++++++--- ethcore/src/client/mod.rs | 2 +- ethcore/src/client/test_client.rs | 7 +++++-- ethcore/src/types/transaction_import_result.rs | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index 5ff4cc4aa..6aae81650 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -39,6 +39,8 @@ use util::journaldb::JournalDB; use util::kvdb::*; use util::Itertools; use util::PerfTimer; +use util::View; +use util::Stream; // other use views::BlockView; @@ -501,7 +503,7 @@ impl Client { } } -#[derive(Ipc)] +//#[derive(Ipc)] impl BlockChainClient for Client { fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result { let header = self.block_header(BlockID::Latest).unwrap(); @@ -798,8 +800,9 @@ impl BlockChainClient for Client { nonce: self.latest_nonce(a), balance: self.latest_balance(a), }; - self.miner.import_transactions(self, transactions, fetch_account).iter() - .map(|res| res.map_err(|e| format!("{:?}", e))) + self.miner.import_transactions(self, transactions, fetch_account) + .iter() + .map(|res| match res { &Ok(ref t) => Ok(t.clone()), &Err(ref e) => Err(format!("{:?}", e)) }) .collect::>>() } diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index e4e454d03..838e4ecee 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -188,7 +188,7 @@ pub trait BlockChainClient : Sync + Send { fn last_hashes(&self) -> LastHashes; /// import transactions from network/other 3rd party - fn import_transactions(&self, transactions: Vec) -> Vec>; + fn import_transactions(&self, transactions: Vec) -> Vec>; /// Queue transactions for importing. fn queue_transactions(&self, transactions: Vec); diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index b54c7f867..14579f905 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -483,7 +483,7 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } - fn import_transactions(&self, transactions: Vec) -> Vec> { + fn import_transactions(&self, transactions: Vec) -> Vec> { let nonces = self.nonces.read().unwrap(); let balances = self.balances.read().unwrap(); let fetch_account = |a: &Address| AccountDetails { @@ -491,7 +491,10 @@ impl BlockChainClient for TestBlockChainClient { balance: balances[a], }; - self.miner.import_transactions(self, transactions, &fetch_account) + self.miner.import_transactions(self, transactions, fetch_account) + .iter() + .map(|res| match res { &Ok(ref t) => Ok(t.clone()), &Err(ref e) => Err(format!("{:?}", e)) }) + .collect::>>() } fn queue_transactions(&self, transactions: Vec) { diff --git a/ethcore/src/types/transaction_import_result.rs b/ethcore/src/types/transaction_import_result.rs index 632cf34c6..81d4c3d32 100644 --- a/ethcore/src/types/transaction_import_result.rs +++ b/ethcore/src/types/transaction_import_result.rs @@ -18,7 +18,7 @@ use std::mem; use ipc::binary::BinaryConvertError; use std::collections::VecDeque; -#[derive(Debug, PartialEq)] +#[derive(Debug, Clone, PartialEq)] /// Represents the result of importing transaction. pub enum TransactionImportResult { /// Transaction was imported to current queue. From 2891b7b4eafc60d791881a592f0975298dd86a4e Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 30 Jun 2016 00:15:34 +0300 Subject: [PATCH 11/36] sorting filters & ranges --- ethcore/src/client/client.rs.in | 2 +- ethcore/src/client/mod.rs | 2 +- ethcore/src/client/trace.rs | 11 +------ ethcore/src/lib.rs | 1 - ethcore/src/{ => types}/filter.rs | 24 +++++++++----- ethcore/src/types/mod.rs.in | 2 ++ ethcore/src/types/trace_filter.rs | 33 +++++++++++++++++++ .../src/types/transaction_import_result.rs | 2 +- ipc/rpc/src/binary.rs | 27 +++++++++++++-- 9 files changed, 80 insertions(+), 24 deletions(-) rename ethcore/src/{ => types}/filter.rs (95%) create mode 100644 ethcore/src/types/trace_filter.rs diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index 6aae81650..4d497876a 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -57,7 +57,7 @@ use verification::{PreverifiedBlock, Verifier}; use block::*; use transaction::{LocalizedTransaction, SignedTransaction, Action}; use blockchain::extras::TransactionAddress; -use filter::Filter; +use types::filter::Filter; use log_entry::LocalizedLogEntry; use block_queue::{BlockQueue, BlockQueueInfo}; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index 838e4ecee..04b2c23c7 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -27,7 +27,7 @@ pub use self::config::{ClientConfig, DatabaseCompactionProfile, BlockQueueConfig pub use self::error::Error; pub use types::ids::*; pub use self::test_client::{TestBlockChainClient, EachBlockWith}; -pub use self::trace::Filter as TraceFilter; +pub use types::trace_filter::Filter as TraceFilter; pub use executive::{Executed, Executive, TransactOptions}; pub use env_info::{LastHashes, EnvInfo}; diff --git a/ethcore/src/client/trace.rs b/ethcore/src/client/trace.rs index 15920dea9..42e5d4104 100644 --- a/ethcore/src/client/trace.rs +++ b/ethcore/src/client/trace.rs @@ -8,6 +8,7 @@ use trace::DatabaseExtras as TraceDatabaseExtras; use blockchain::{BlockChain, BlockProvider}; use blockchain::extras::TransactionAddress; use super::BlockID; +pub use types::trace_filter::Filter; impl TraceDatabaseExtras for BlockChain { fn block_hash(&self, block_number: BlockNumber) -> Option { @@ -26,13 +27,3 @@ impl TraceDatabaseExtras for BlockChain { .map(|tx| tx.hash()) } } - -/// Easy to use trace filter. -pub struct Filter { - /// Range of filtering. - pub range: Range, - /// From address. - pub from_address: Vec
, - /// To address. - pub to_address: Vec
, -} diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 2379e6805..766d86254 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -104,7 +104,6 @@ pub mod block_queue; pub mod client; pub mod error; pub mod ethereum; -pub mod filter; pub mod header; pub mod service; pub mod trace; diff --git a/ethcore/src/filter.rs b/ethcore/src/types/filter.rs similarity index 95% rename from ethcore/src/filter.rs rename to ethcore/src/types/filter.rs index d99f80050..7d9b25071 100644 --- a/ethcore/src/filter.rs +++ b/ethcore/src/types/filter.rs @@ -20,6 +20,9 @@ use util::hash::*; use util::sha3::*; use client::BlockID; use log_entry::LogEntry; +use ipc::binary::BinaryConvertError; +use std::mem; +use std::collections::VecDeque; /// Blockchain Filter. pub struct Filter { @@ -29,22 +32,27 @@ pub struct Filter { /// Till this block. pub to_block: BlockID, - /// Search addresses. - /// + /// Search addresses. + /// /// If None, match all. /// If specified, log must be produced by one of these addresses. pub address: Option>, /// Search topics. - /// + /// /// If None, match all. /// If specified, log must contain one of these topics. - pub topics: [Option>; 4], + pub topics: Vec>>, } impl Clone for Filter { fn clone(&self) -> Self { - let mut topics = [None, None, None, None]; + let mut topics = [ + None, + None, + None, + None, + ]; for i in 0..4 { topics[i] = self.topics[i].clone(); } @@ -53,13 +61,13 @@ impl Clone for Filter { from_block: self.from_block.clone(), to_block: self.to_block.clone(), address: self.address.clone(), - topics: topics + topics: topics[..].to_vec() } } } impl Filter { - /// Returns combinations of each address and topic. + /// Returns combinations of each address and topic. pub fn bloom_possibilities(&self) -> Vec { let blooms = match self.address { Some(ref addresses) if !addresses.is_empty() => @@ -71,7 +79,7 @@ impl Filter { _ => vec![H2048::new()] }; - self.topics.iter().fold(blooms, | bs, topic | match *topic { + self.topics.iter().fold(blooms, |bs, topic| match *topic { None => bs, Some(ref topics) => bs.into_iter().flat_map(|bloom| { topics.into_iter().map(|topic| { diff --git a/ethcore/src/types/mod.rs.in b/ethcore/src/types/mod.rs.in index 7f00550ff..183812ed9 100644 --- a/ethcore/src/types/mod.rs.in +++ b/ethcore/src/types/mod.rs.in @@ -27,3 +27,5 @@ pub mod account_diff; pub mod state_diff; pub mod block_queue_info; pub mod transaction_import_result; +pub mod filter; +pub mod trace_filter; diff --git a/ethcore/src/types/trace_filter.rs b/ethcore/src/types/trace_filter.rs new file mode 100644 index 000000000..c374f7273 --- /dev/null +++ b/ethcore/src/types/trace_filter.rs @@ -0,0 +1,33 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::mem; +use ipc::binary::{BinaryConvertError, BinaryConvertable}; +use std::collections::VecDeque; +use std::ops::Range; +use util::{Address, H256}; +use types::ids::BlockID; + +/// Easy to use trace filter. +#[derive(Binary)] +pub struct Filter { + /// Range of filtering. + pub range: Range, + /// From address. + pub from_address: Vec
, + /// To address. + pub to_address: Vec
, +} diff --git a/ethcore/src/types/transaction_import_result.rs b/ethcore/src/types/transaction_import_result.rs index 81d4c3d32..c2068b0bc 100644 --- a/ethcore/src/types/transaction_import_result.rs +++ b/ethcore/src/types/transaction_import_result.rs @@ -18,7 +18,7 @@ use std::mem; use ipc::binary::BinaryConvertError; use std::collections::VecDeque; -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Binary)] /// Represents the result of importing transaction. pub enum TransactionImportResult { /// Transaction was imported to current queue. diff --git a/ipc/rpc/src/binary.rs b/ipc/rpc/src/binary.rs index ea49b3454..aabe19ed1 100644 --- a/ipc/rpc/src/binary.rs +++ b/ipc/rpc/src/binary.rs @@ -319,6 +319,31 @@ impl BinaryConvertable for String { } } +impl BinaryConvertable for Range where T: BinaryConvertable { + fn size(&self) -> usize { + mem::size_of::() * 2 + } + + fn from_empty_bytes() -> Result { + Err(BinaryConvertError) + } + + fn to_bytes(&self, buffer: &mut[u8], length_stack: &mut VecDeque) -> Result<(), BinaryConvertError> { + try!(self.start.to_bytes(&mut buffer[..mem::size_of::()], length_stack)); + try!(self.end.to_bytes(&mut buffer[mem::size_of::() + 1..], length_stack)); + Ok(()) + } + + fn from_bytes(buffer: &[u8], length_stack: &mut VecDeque) -> Result { + Ok(try!(T::from_bytes(&buffer[..mem::size_of::()], length_stack))..try!(T::from_bytes(&buffer[mem::size_of::()+1..], length_stack))) + } + + fn len_params() -> usize { + assert_eq!(0, T::len_params()); + 0 + } +} + impl BinaryConvertable for ::std::cell::RefCell where T: BinaryConvertable { fn size(&self) -> usize { self.borrow().size() @@ -543,8 +568,6 @@ binary_fixed_size!(U512); binary_fixed_size!(H256); binary_fixed_size!(H2048); binary_fixed_size!(Address); -binary_fixed_size!(Range); -binary_fixed_size!(Range); #[test] fn vec_serialize() { From 3c061857c4411f7dfa2fce4b590a7cf75b90b721 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 30 Jun 2016 00:56:12 +0300 Subject: [PATCH 12/36] error sorting out --- ethcore/src/client/client.rs.in | 12 ++++---- ethcore/src/client/mod.rs | 15 ++-------- ethcore/src/client/test_client.rs | 5 ++-- ethcore/src/error.rs | 15 ++++++++++ ethcore/src/types/block_import_error.rs | 38 +++++++++++++++++++++++++ ethcore/src/types/call_analytics.rs | 30 +++++++++++++++++++ ethcore/src/types/filter.rs | 1 + ethcore/src/types/mod.rs.in | 2 ++ 8 files changed, 99 insertions(+), 19 deletions(-) create mode 100644 ethcore/src/types/block_import_error.rs create mode 100644 ethcore/src/types/call_analytics.rs diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index 4d497876a..ab6b8ec17 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -61,7 +61,9 @@ use types::filter::Filter; use log_entry::LocalizedLogEntry; use block_queue::{BlockQueue, BlockQueueInfo}; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; -use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient, TraceFilter, CallAnalytics}; +use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, + DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient, + TraceFilter, CallAnalytics, BlockImportError}; use client::Error as ClientError; use env_info::EnvInfo; use executive::{Executive, Executed, TransactOptions, contract_address}; @@ -671,17 +673,17 @@ impl BlockChainClient for Client { self.chain.block_receipts(hash).map(|receipts| rlp::encode(&receipts).to_vec()) } - fn import_block(&self, bytes: Bytes) -> ImportResult { + fn import_block(&self, bytes: Bytes) -> Result { { let header = BlockView::new(&bytes).header_view(); if self.chain.is_known(&header.sha3()) { - return Err(ImportError::AlreadyInChain.into()); + return Err(BlockImportError::Import(ImportError::AlreadyInChain)); } if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown { - return Err(BlockError::UnknownParent(header.parent_hash()).into()); + return Err(BlockImportError::Block(BlockError::UnknownParent(header.parent_hash())); } } - self.block_queue.import_block(bytes) + Ok(try!(self.block_queue.import_block(bytes))) } fn queue_info(&self) -> BlockQueueInfo { diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index 04b2c23c7..e3be447a3 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -49,17 +49,8 @@ use trace::LocalizedTrace; use evm::Factory as EvmFactory; use miner::{TransactionImportResult}; use error::Error as EthError; - -/// Options concerning what analytics we run on the call. -#[derive(Eq, PartialEq, Default, Clone, Copy, Debug)] -pub struct CallAnalytics { - /// Make a transaction trace. - pub transaction_tracing: bool, - /// Make a VM trace. - pub vm_tracing: bool, - /// Make a diff. - pub state_diffing: bool, -} +pub use types::call_analytics::CallAnalytics; +pub use types::block_import_error::BlockImportError; /// Blockchain database client. Owns and manages a blockchain and a block queue. pub trait BlockChainClient : Sync + Send { @@ -145,7 +136,7 @@ pub trait BlockChainClient : Sync + Send { fn block_receipts(&self, hash: &H256) -> Option; /// Import a block into the blockchain. - fn import_block(&self, bytes: Bytes) -> ImportResult; + fn import_block(&self, bytes: Bytes) -> Result; /// Get block queue information. fn queue_info(&self) -> BlockQueueInfo; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 14579f905..a862501ba 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -20,7 +20,8 @@ use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrder}; use util::*; use transaction::{Transaction, LocalizedTransaction, SignedTransaction, Action}; use blockchain::TreeRoute; -use client::{BlockChainClient, MiningBlockChainClient, BlockChainInfo, BlockStatus, BlockID, TransactionID, UncleID, TraceId, TraceFilter, LastHashes, CallAnalytics}; +use client::{BlockChainClient, MiningBlockChainClient, BlockChainInfo, BlockStatus, BlockID, + TransactionID, UncleID, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError}; use header::{Header as BlockHeader, BlockNumber}; use filter::Filter; use log_entry::LocalizedLogEntry; @@ -398,7 +399,7 @@ impl BlockChainClient for TestBlockChainClient { None } - fn import_block(&self, b: Bytes) -> ImportResult { + fn import_block(&self, b: Bytes) -> Result { let header = Rlp::new(&b).val_at::(0); let h = header.hash(); let number: usize = header.number as usize; diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index 92d3cbe6b..0ee09c354 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -20,6 +20,8 @@ use util::*; use header::BlockNumber; use basic_types::LogBloom; use client::Error as ClientError; +use client::BlockImportError; +use ipc::binary::{BinaryConvertable, BinaryConvertError}; pub use types::executed::ExecutionError; @@ -194,6 +196,9 @@ pub enum ImportError { KnownBad, } +binary_fixed_size!(BlockError); +binary_fixed_size!(ImportError); + impl fmt::Display for ImportError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let msg = match *self { @@ -312,6 +317,16 @@ impl From for Error { } } +impl From for Error { + fn from(err: BlockImportError) -> Error { + match err { + BlockImportError::Block(e) => Error::Block(e), + BlockImportError::Import(e) => Error::Import(e), + BlockImportError::Other(s) => Error::Util(UtilError::SimpleString(s)), + } + } +} + // TODO: uncomment below once https://github.com/rust-lang/rust/issues/27336 sorted. /*#![feature(concat_idents)] macro_rules! assimilate { diff --git a/ethcore/src/types/block_import_error.rs b/ethcore/src/types/block_import_error.rs new file mode 100644 index 000000000..96da43c22 --- /dev/null +++ b/ethcore/src/types/block_import_error.rs @@ -0,0 +1,38 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::mem; +use ipc::binary::BinaryConvertError; +use std::collections::VecDeque; +use error::{ImportError, BlockError, Error}; +use std::convert::From; + +#[derive(Binary, Debug)] +pub enum BlockImportError { + Import(ImportError), + Block(BlockError), + Other(String), +} + +impl From for BlockImportError { + fn from(e: Error) -> Self { + match e { + Error::Block(block_error) => BlockImportError::Block(block_error), + Error::Import(import_error) => BlockImportError::Import(import_error), + _ => BlockImportError::Other(format!("other block import error: {:?}", e)), + } + } +} diff --git a/ethcore/src/types/call_analytics.rs b/ethcore/src/types/call_analytics.rs new file mode 100644 index 000000000..56a6fbb74 --- /dev/null +++ b/ethcore/src/types/call_analytics.rs @@ -0,0 +1,30 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::mem; +use ipc::binary::{BinaryConvertError, BinaryConvertable}; +use std::collections::VecDeque; + +/// Options concerning what analytics we run on the call. +#[derive(Eq, PartialEq, Default, Clone, Copy, Debug, Binary)] +pub struct CallAnalytics { + /// Make a transaction trace. + pub transaction_tracing: bool, + /// Make a VM trace. + pub vm_tracing: bool, + /// Make a diff. + pub state_diffing: bool, +} diff --git a/ethcore/src/types/filter.rs b/ethcore/src/types/filter.rs index 7d9b25071..af278cd92 100644 --- a/ethcore/src/types/filter.rs +++ b/ethcore/src/types/filter.rs @@ -25,6 +25,7 @@ use std::mem; use std::collections::VecDeque; /// Blockchain Filter. +#[derive(Binary)] pub struct Filter { /// Blockchain will be searched from this block. pub from_block: BlockID, diff --git a/ethcore/src/types/mod.rs.in b/ethcore/src/types/mod.rs.in index 183812ed9..6330ed29b 100644 --- a/ethcore/src/types/mod.rs.in +++ b/ethcore/src/types/mod.rs.in @@ -29,3 +29,5 @@ pub mod block_queue_info; pub mod transaction_import_result; pub mod filter; pub mod trace_filter; +pub mod call_analytics; +pub mod block_import_error; From 8fc03e2dc2da2f4f7f101b70c1f1562a61f7c36a Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 30 Jun 2016 01:09:35 +0300 Subject: [PATCH 13/36] deriving ipc service compiling --- ethcore/src/client/client.rs.in | 10 +++++++--- ethcore/src/client/mod.rs | 4 ++-- ethcore/src/client/test_client.rs | 6 ++++-- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index ab6b8ec17..d47b0a266 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -505,7 +505,7 @@ impl Client { } } -//#[derive(Ipc)] +#[derive(Ipc)] impl BlockChainClient for Client { fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result { let header = self.block_header(BlockID::Latest).unwrap(); @@ -690,8 +690,9 @@ impl BlockChainClient for Client { self.block_queue.queue_info() } - fn clear_queue(&self) { + fn clear_queue(&self) -> bool { self.block_queue.clear(); + true } fn chain_info(&self) -> BlockChainInfo { @@ -808,17 +809,20 @@ impl BlockChainClient for Client { .collect::>>() } - fn queue_transactions(&self, transactions: Vec) { + fn queue_transactions(&self, transactions: Vec) -> bool { if self.queue_transactions.load(AtomicOrdering::Relaxed) > MAX_TX_QUEUE_SIZE { debug!("Ignoring {} transactions: queue is full", transactions.len()); + false } else { let len = transactions.len(); match self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewTransactions(transactions))) { Ok(_) => { self.queue_transactions.fetch_add(len, AtomicOrdering::SeqCst); + true } Err(e) => { debug!("Ignoring {} transactions: error queueing: {}", len, e); + false } } } diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index e3be447a3..66c747e07 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -142,7 +142,7 @@ pub trait BlockChainClient : Sync + Send { fn queue_info(&self) -> BlockQueueInfo; /// Clear block queue and abort all import activity. - fn clear_queue(&self); + fn clear_queue(&self) -> bool; /// Get blockchain information. fn chain_info(&self) -> BlockChainInfo; @@ -182,7 +182,7 @@ pub trait BlockChainClient : Sync + Send { fn import_transactions(&self, transactions: Vec) -> Vec>; /// Queue transactions for importing. - fn queue_transactions(&self, transactions: Vec); + fn queue_transactions(&self, transactions: Vec) -> bool; /// list all transactions fn pending_transactions(&self) -> Vec; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index a862501ba..defbf701f 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -455,7 +455,8 @@ impl BlockChainClient for TestBlockChainClient { } } - fn clear_queue(&self) { + fn clear_queue(&self) -> bool { + true } fn chain_info(&self) -> BlockChainInfo { @@ -498,10 +499,11 @@ impl BlockChainClient for TestBlockChainClient { .collect::>>() } - fn queue_transactions(&self, transactions: Vec) { + fn queue_transactions(&self, transactions: Vec) -> bool { // import right here let tx = transactions.into_iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect(); self.import_transactions(tx); + true } fn pending_transactions(&self) -> Vec { From ef8dd23254d8f1aa2031854dc78279c4283ee303 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 30 Jun 2016 19:53:57 +0300 Subject: [PATCH 14/36] rpc & sync recompile --- Cargo.lock | 1 + rpc/Cargo.toml | 1 + rpc/src/lib.rs | 1 + rpc/src/v1/impls/eth.rs | 4 ++-- rpc/src/v1/types/filter.rs | 2 +- rpc/src/v1/types/trace.rs | 4 ++-- sync/src/chain.rs | 14 +++++++------- 7 files changed, 15 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f8b29bdac..09c74902c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -342,6 +342,7 @@ dependencies = [ "ethash 1.3.0", "ethcore 1.3.0", "ethcore-devtools 1.3.0", + "ethcore-ipc 1.3.0", "ethcore-util 1.3.0", "ethjson 0.1.0", "ethsync 1.3.0", diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 21a59ad50..f5563b50b 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -25,6 +25,7 @@ transient-hashmap = "0.1" serde_macros = { version = "0.7.0", optional = true } clippy = { version = "0.0.77", optional = true} json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" } +ethcore-ipc = { path = "../ipc/rpc" } [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 43c120e40..73a769b13 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -32,6 +32,7 @@ extern crate ethcore; extern crate ethsync; extern crate transient_hashmap; extern crate json_ipc_server as ipc; +extern crate ethcore_ipc; #[cfg(test)] extern crate ethjson; diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 05dc89564..651fab434 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -435,12 +435,12 @@ impl Eth for EthClient where fn uncle_by_block_hash_and_index(&self, params: Params) -> Result { from_params::<(H256, Index)>(params) - .and_then(|(hash, index)| self.uncle(UncleID(BlockID::Hash(hash), index.value()))) + .and_then(|(hash, index)| self.uncle(UncleID { block: BlockID::Hash(hash), position: index.value() })) } fn uncle_by_block_number_and_index(&self, params: Params) -> Result { from_params::<(BlockNumber, Index)>(params) - .and_then(|(number, index)| self.uncle(UncleID(number.into(), index.value()))) + .and_then(|(number, index)| self.uncle(UncleID { block: number.into(), position: index.value() })) } fn compilers(&self, params: Params) -> Result { diff --git a/rpc/src/v1/types/filter.rs b/rpc/src/v1/types/filter.rs index 77a3f0500..334ec3095 100644 --- a/rpc/src/v1/types/filter.rs +++ b/rpc/src/v1/types/filter.rs @@ -85,7 +85,7 @@ impl Into for Filter { VariadicValue::Single(t) => Some(vec![t]), VariadicValue::Multiple(t) => Some(t) }).filter_map(|m| m).collect()).into_iter(); - [iter.next(), iter.next(), iter.next(), iter.next()] + vec![iter.next(), iter.next(), iter.next(), iter.next()] } } } diff --git a/rpc/src/v1/types/trace.rs b/rpc/src/v1/types/trace.rs index d6226aea0..c7d0ff65c 100644 --- a/rpc/src/v1/types/trace.rs +++ b/rpc/src/v1/types/trace.rs @@ -162,7 +162,7 @@ pub enum Diff where T: Serialize { Changed(ChangedType), } -impl From> for Diff where T: Eq, U: Serialize + From { +impl From> for Diff where T: Eq + ::ethcore_ipc::BinaryConvertable, U: Serialize + From { fn from(c: account_diff::Diff) -> Self { match c { account_diff::Diff::Same => Diff::Same, @@ -205,7 +205,7 @@ impl Serialize for StateDiff { impl From for StateDiff { fn from(c: state_diff::StateDiff) -> Self { - StateDiff(c.0.into_iter().map(|(k, v)| (k, v.into())).collect()) + StateDiff(c.raw.into_iter().map(|(k, v)| (k, v.into())).collect()) } } diff --git a/sync/src/chain.rs b/sync/src/chain.rs index aa3657419..4c901394c 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -93,7 +93,7 @@ use util::*; use std::mem::{replace}; use ethcore::views::{HeaderView, BlockView}; use ethcore::header::{BlockNumber, Header as BlockHeader}; -use ethcore::client::{BlockChainClient, BlockStatus, BlockID, BlockChainInfo}; +use ethcore::client::{BlockChainClient, BlockStatus, BlockID, BlockChainInfo, BlockImportError}; use ethcore::error::*; use ethcore::block::Block; use io::SyncIo; @@ -544,10 +544,10 @@ impl ChainSync { peer.latest_number = Some(header.number()); } match io.chain().import_block(block_rlp.as_raw().to_vec()) { - Err(Error::Import(ImportError::AlreadyInChain)) => { + Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { trace!(target: "sync", "New block already in chain {:?}", h); }, - Err(Error::Import(ImportError::AlreadyQueued)) => { + Err(BlockImportError::Import(ImportError::AlreadyQueued)) => { trace!(target: "sync", "New block already queued {:?}", h); }, Ok(_) => { @@ -557,7 +557,7 @@ impl ChainSync { } trace!(target: "sync", "New block queued {:?} ({})", h, header.number); }, - Err(Error::Block(BlockError::UnknownParent(p))) => { + Err(BlockImportError::Block(BlockError::UnknownParent(p))) => { unknown = true; trace!(target: "sync", "New block with unknown parent ({:?}) {:?}", p, h); }, @@ -841,11 +841,11 @@ impl ChainSync { } match io.chain().import_block(block) { - Err(Error::Import(ImportError::AlreadyInChain)) => { + Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { trace!(target: "sync", "Block already in chain {:?}", h); self.block_imported(&h, number, &parent); }, - Err(Error::Import(ImportError::AlreadyQueued)) => { + Err(BlockImportError::Import(ImportError::AlreadyQueued)) => { trace!(target: "sync", "Block already queued {:?}", h); self.block_imported(&h, number, &parent); }, @@ -854,7 +854,7 @@ impl ChainSync { imported.insert(h.clone()); self.block_imported(&h, number, &parent); }, - Err(Error::Block(BlockError::UnknownParent(_))) if self.state == SyncState::NewBlocks => { + Err(BlockImportError::Block(BlockError::UnknownParent(_))) if self.state == SyncState::NewBlocks => { trace!(target: "sync", "Unknown new block parent, restarting sync"); break; }, From 95538ac42c7dd08a289ce63ebd2fe1c5de339573 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 30 Jun 2016 20:06:24 +0300 Subject: [PATCH 15/36] sorting rpc using uncles --- rpc/src/v1/impls/eth.rs | 60 ++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 651fab434..4013dd5a5 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -30,6 +30,7 @@ use util::sha3::*; use util::rlp::{encode, decode, UntrustedRlp, View}; use ethcore::account_provider::AccountProvider; use ethcore::client::{MiningBlockChainClient, BlockID, TransactionID, UncleID}; +use ethcore::header::Header as BlockHeader; use ethcore::block::IsBlock; use ethcore::views::*; use ethcore::ethereum::Ethash; @@ -126,33 +127,38 @@ impl EthClient where fn uncle(&self, id: UncleID) -> Result { let client = take_weak!(self.client); - match client.uncle(id).and_then(|u| client.block_total_difficulty(BlockID::Hash(u.parent_hash().clone())).map(|diff| (diff, u))) { - Some((parent_difficulty, uncle)) => { - let block = Block { - hash: OptionalValue::Value(uncle.hash()), - parent_hash: uncle.parent_hash, - uncles_hash: uncle.uncles_hash, - author: uncle.author, - miner: uncle.author, - state_root: uncle.state_root, - transactions_root: uncle.transactions_root, - number: OptionalValue::Value(U256::from(uncle.number)), - gas_used: uncle.gas_used, - gas_limit: uncle.gas_limit, - logs_bloom: uncle.log_bloom, - timestamp: U256::from(uncle.timestamp), - difficulty: uncle.difficulty, - total_difficulty: uncle.difficulty + parent_difficulty, - receipts_root: uncle.receipts_root, - extra_data: Bytes::new(uncle.extra_data), - seal_fields: uncle.seal.into_iter().map(|f| decode(&f)).map(Bytes::new).collect(), - uncles: vec![], - transactions: BlockTransactions::Hashes(vec![]), - }; - to_value(&block) - }, - None => Ok(Value::Null) - } + + let uncle: BlockHeader = match client.uncle(id) { + Some(rlp) => decode(&rlp), + None => { return Ok(Value::Null); } + }; + let parent_difficulty = match client.block_total_difficulty(BlockID::Hash(uncle.parent_hash().clone())) { + Some(difficulty) => difficulty, + None => { return Ok(Value::Null); } + }; + + let block = Block { + hash: OptionalValue::Value(uncle.hash()), + parent_hash: uncle.parent_hash, + uncles_hash: uncle.uncles_hash, + author: uncle.author, + miner: uncle.author, + state_root: uncle.state_root, + transactions_root: uncle.transactions_root, + number: OptionalValue::Value(U256::from(uncle.number)), + gas_used: uncle.gas_used, + gas_limit: uncle.gas_limit, + logs_bloom: uncle.log_bloom, + timestamp: U256::from(uncle.timestamp), + difficulty: uncle.difficulty, + total_difficulty: uncle.difficulty + parent_difficulty, + receipts_root: uncle.receipts_root, + extra_data: Bytes::new(uncle.extra_data), + seal_fields: uncle.seal.into_iter().map(|f| decode(&f)).map(Bytes::new).collect(), + uncles: vec![], + transactions: BlockTransactions::Hashes(vec![]), + }; + to_value(&block) } fn sign_call(&self, request: CallRequest) -> Result { From 080b50809a5ba502a01c5eb1abdf962028a3e70d Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 30 Jun 2016 20:27:53 +0300 Subject: [PATCH 16/36] fix compilation --- parity/main.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 047338bc8..6e38399f5 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -82,8 +82,8 @@ use rustc_serialize::hex::FromHex; use ctrlc::CtrlC; use util::{H256, ToPretty, NetworkConfiguration, PayloadInfo, Bytes, UtilError}; use util::panics::{MayPanic, ForwardPanic, PanicHandler}; -use ethcore::client::{BlockID, BlockChainClient, ClientConfig, get_db_path}; -use ethcore::error::{Error, ImportError}; +use ethcore::client::{BlockID, BlockChainClient, ClientConfig, get_db_path, BlockImportError}; +use ethcore::error::{ImportError}; use ethcore::service::ClientService; use ethcore::spec::Spec; use ethsync::EthSync; @@ -459,7 +459,7 @@ fn execute_import(conf: Configuration) { while client.queue_info().is_full() { sleep(Duration::from_secs(1)); } match client.import_block(bytes) { Ok(_) => {} - Err(Error::Import(ImportError::AlreadyInChain)) => { trace!("Skipping block already in chain."); } + Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { trace!("Skipping block already in chain."); } Err(e) => die!("Cannot import block: {:?}", e) } informant.tick(client.deref(), None); From bad02d65a10963e2916ec38bd892186293c210c3 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 30 Jun 2016 20:44:24 +0300 Subject: [PATCH 17/36] fix merging bugs --- ethcore/src/client/client.rs.in | 116 +++++++++++++++++++++----------- 1 file changed, 77 insertions(+), 39 deletions(-) diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index d47b0a266..be43581e8 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -278,7 +278,7 @@ impl Client { Ok(locked_block) } - fn calculate_enacted_retracted(&self, import_results: Vec) -> (Vec, Vec) { + fn calculate_enacted_retracted(&self, import_results: &[ImportRoute]) -> (Vec, Vec) { fn map_to_vec(map: Vec<(H256, bool)>) -> Vec { map.into_iter().map(|(k, _v)| k).collect() } @@ -288,12 +288,12 @@ impl Client { // could be retracted in import `k+1`. This is why to understand if after all inserts // the block is enacted or retracted we iterate over all routes and at the end final state // will be in the hashmap - let map = import_results.into_iter().fold(HashMap::new(), |mut map, route| { - for hash in route.enacted { - map.insert(hash, true); + let map = import_results.iter().fold(HashMap::new(), |mut map, route| { + for hash in &route.enacted { + map.insert(hash.clone(), true); } - for hash in route.retracted { - map.insert(hash, false); + for hash in &route.retracted { + map.insert(hash.clone(), false); } map }); @@ -330,36 +330,10 @@ impl Client { invalid_blocks.insert(header.hash()); continue; } + let closed_block = closed_block.unwrap(); imported_blocks.push(header.hash()); - // Are we committing an era? - let ancient = if header.number() >= HISTORY { - let n = header.number() - HISTORY; - Some((n, self.chain.block_hash(n).unwrap())) - } else { - None - }; - - // Commit results - let closed_block = closed_block.unwrap(); - let receipts = closed_block.block().receipts().clone(); - let traces = From::from(closed_block.block().traces().clone().unwrap_or_else(Vec::new)); - - closed_block.drain() - .commit(header.number(), &header.hash(), ancient) - .expect("State DB commit failed."); - - // And update the chain after commit to prevent race conditions - // (when something is in chain but you are not able to fetch details) - let route = self.chain.insert_block(&block.bytes, receipts); - self.tracedb.import(TraceImportRequest { - traces: traces, - block_hash: header.hash(), - block_number: header.number(), - enacted: route.enacted.clone(), - retracted: route.retracted.len() - }); - + let route = self.commit_block(closed_block, &header.hash(), &block.bytes); import_results.push(route); self.report.write().unwrap().accrue_block(&block); @@ -380,7 +354,7 @@ impl Client { { if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() { - let (enacted, retracted) = self.calculate_enacted_retracted(import_results); + let (enacted, retracted) = self.calculate_enacted_retracted(&import_results); if self.queue_info().is_empty() { self.miner.chain_new_blocks(self, &imported_blocks, &invalid_blocks, &enacted, &retracted); @@ -391,19 +365,50 @@ impl Client { invalid: invalid_blocks, enacted: enacted, retracted: retracted, + sealed: Vec::new(), })).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e)); } } - { - if self.chain_info().best_block_hash != original_best { - self.miner.update_sealing(self); - } + if self.chain_info().best_block_hash != original_best { + self.miner.update_sealing(self); } imported } + fn commit_block(&self, block: B, hash: &H256, block_data: &Bytes) -> ImportRoute where B: IsBlock + Drain { + let number = block.header().number(); + // Are we committing an era? + let ancient = if number >= HISTORY { + let n = number - HISTORY; + Some((n, self.chain.block_hash(n).unwrap())) + } else { + None + }; + + // Commit results + let receipts = block.receipts().clone(); + let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new)); + + // CHECK! I *think* this is fine, even if the state_root is equal to another + // already-imported block of the same number. + // TODO: Prove it with a test. + block.drain().commit(number, hash, ancient).expect("State DB commit failed."); + + // And update the chain after commit to prevent race conditions + // (when something is in chain but you are not able to fetch details) + let route = self.chain.insert_block(block_data, receipts); + self.tracedb.import(TraceImportRequest { + traces: traces, + block_hash: hash.clone(), + block_number: number, + enacted: route.enacted.clone(), + retracted: route.retracted.len() + }); + route + } + /// Import transactions from the IO queue pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize { let _timer = PerfTimer::new("import_queued_transactions"); @@ -867,6 +872,39 @@ impl MiningBlockChainClient for Client { fn vm_factory(&self) -> &EvmFactory { &self.vm_factory } + + fn import_sealed_block(&self, block: SealedBlock) -> ImportResult { + let _import_lock = self.import_lock.lock(); + let _timer = PerfTimer::new("import_sealed_block"); + + let original_best = self.chain_info().best_block_hash; + + let h = block.header().hash(); + let number = block.header().number(); + + let block_data = block.rlp_bytes(); + let route = self.commit_block(block, &h, &block_data); + trace!(target: "client", "Imported sealed block #{} ({})", number, h); + + { + let (enacted, retracted) = self.calculate_enacted_retracted(&[route]); + self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted); + + self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { + imported: vec![h.clone()], + invalid: vec![], + enacted: enacted, + retracted: retracted, + sealed: vec![h.clone()], + })).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e)); + } + + if self.chain_info().best_block_hash != original_best { + self.miner.update_sealing(self); + } + + Ok(h) + } } impl MayPanic for Client { From cdd1c6dd9095e7bfed7273750caf03e6bac9012c Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 30 Jun 2016 20:57:07 +0300 Subject: [PATCH 18/36] fix unused imports --- ethcore/src/blockchain/block_info.rs | 2 -- ethcore/src/client/client.rs.in | 9 ++++----- ethcore/src/client/mod.rs | 3 +-- ethcore/src/client/test_client.rs | 3 +-- ethcore/src/client/trace.rs | 4 +--- ethcore/src/types/block_import_error.rs | 5 +++++ ethcore/src/types/block_queue_info.rs | 2 ++ ethcore/src/types/call_analytics.rs | 2 ++ ethcore/src/types/trace_filter.rs | 2 ++ 9 files changed, 18 insertions(+), 14 deletions(-) diff --git a/ethcore/src/blockchain/block_info.rs b/ethcore/src/blockchain/block_info.rs index 52fd32291..42f1bd439 100644 --- a/ethcore/src/blockchain/block_info.rs +++ b/ethcore/src/blockchain/block_info.rs @@ -17,8 +17,6 @@ use util::numbers::{U256,H256}; use header::BlockNumber; -use util::bytes::{FromRawBytesVariable, FromBytesError, ToBytesWithMap}; - /// Brief info about inserted block. #[derive(Clone)] pub struct BlockInfo { diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index be43581e8..deebaa857 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::marker::PhantomData; use std::path::PathBuf; use std::collections::{HashSet, HashMap}; use std::ops::Deref; @@ -32,8 +31,8 @@ use util::network::*; use util::io::*; use util::rlp; use util::sha3::*; -use util::{UtilError, CryptoError, Bytes, Signature, Secret, ec}; -use util::rlp::{encode, decode, RlpStream, Rlp, UntrustedRlp}; +use util::{Bytes}; +use util::rlp::{RlpStream, Rlp, UntrustedRlp}; use util::journaldb; use util::journaldb::JournalDB; use util::kvdb::*; @@ -44,8 +43,8 @@ use util::Stream; // other use views::BlockView; -use error::{Error, ImportError, ExecutionError, BlockError, ImportResult}; -use header::{BlockNumber, Header}; +use error::{ImportError, ExecutionError, BlockError, ImportResult}; +use header::BlockNumber; use state::State; use spec::Spec; use engine::Engine; diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index 67fdbaaec..7a2562f4b 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -38,7 +38,7 @@ use util::Itertools; use blockchain::TreeRoute; use block_queue::BlockQueueInfo; use block::{OpenBlock, SealedBlock}; -use header::{BlockNumber, Header}; +use header::{BlockNumber}; use transaction::{LocalizedTransaction, SignedTransaction}; use log_entry::LocalizedLogEntry; use filter::Filter; @@ -48,7 +48,6 @@ use receipt::LocalizedReceipt; use trace::LocalizedTrace; use evm::Factory as EvmFactory; use miner::{TransactionImportResult}; -use error::Error as EthError; pub use types::call_analytics::CallAnalytics; pub use types::block_import_error::BlockImportError; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 025a39c4a..eacf323b5 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -35,11 +35,10 @@ use spec::Spec; use block_queue::BlockQueueInfo; use block::{OpenBlock, SealedBlock}; use executive::Executed; -use error::{ExecutionError}; +use error::ExecutionError; use trace::LocalizedTrace; use miner::{TransactionImportResult, AccountDetails}; -use error::Error as EthError; /// Test client. pub struct TestBlockChainClient { diff --git a/ethcore/src/client/trace.rs b/ethcore/src/client/trace.rs index 42e5d4104..3ab01757e 100644 --- a/ethcore/src/client/trace.rs +++ b/ethcore/src/client/trace.rs @@ -1,13 +1,11 @@ //! Bridge between Tracedb and Blockchain. -use std::ops::Range; -use util::{Address, H256}; +use util::{H256}; use header::BlockNumber; use trace::DatabaseExtras as TraceDatabaseExtras; use blockchain::{BlockChain, BlockProvider}; use blockchain::extras::TransactionAddress; -use super::BlockID; pub use types::trace_filter::Filter; impl TraceDatabaseExtras for BlockChain { diff --git a/ethcore/src/types/block_import_error.rs b/ethcore/src/types/block_import_error.rs index 96da43c22..711751e0c 100644 --- a/ethcore/src/types/block_import_error.rs +++ b/ethcore/src/types/block_import_error.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Block import error related types + use std::mem; use ipc::binary::BinaryConvertError; use std::collections::VecDeque; @@ -22,8 +24,11 @@ use std::convert::From; #[derive(Binary, Debug)] pub enum BlockImportError { + /// Import error Import(ImportError), + /// Block error Block(BlockError), + /// Other error Other(String), } diff --git a/ethcore/src/types/block_queue_info.rs b/ethcore/src/types/block_queue_info.rs index 00e9b059f..714f84ece 100644 --- a/ethcore/src/types/block_queue_info.rs +++ b/ethcore/src/types/block_queue_info.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Block queue info types + use std::mem; use ipc::binary::BinaryConvertError; use std::collections::VecDeque; diff --git a/ethcore/src/types/call_analytics.rs b/ethcore/src/types/call_analytics.rs index 56a6fbb74..3be1299aa 100644 --- a/ethcore/src/types/call_analytics.rs +++ b/ethcore/src/types/call_analytics.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Call analytics related types + use std::mem; use ipc::binary::{BinaryConvertError, BinaryConvertable}; use std::collections::VecDeque; diff --git a/ethcore/src/types/trace_filter.rs b/ethcore/src/types/trace_filter.rs index c374f7273..05cd83bb0 100644 --- a/ethcore/src/types/trace_filter.rs +++ b/ethcore/src/types/trace_filter.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Trace filter related types + use std::mem; use ipc::binary::{BinaryConvertError, BinaryConvertable}; use std::collections::VecDeque; From ba38cc0ccc6a68bc36d4a9f140781d07e538a52c Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 30 Jun 2016 21:00:52 +0300 Subject: [PATCH 19/36] fix all warnings --- ethcore/src/client/client.rs | 2 +- ethcore/src/types/block_import_error.rs | 1 + ethcore/src/types/trace_filter.rs | 2 +- ethcore/src/types/transaction_import_result.rs | 3 ++- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 8d15272a3..3c0733ec5 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -16,5 +16,5 @@ //! Blockchain database client. -#![allow(dead_code, unused_assignments, unused_variables)] // codegen issues +#![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues include!(concat!(env!("OUT_DIR"), "/client.ipc.rs")); diff --git a/ethcore/src/types/block_import_error.rs b/ethcore/src/types/block_import_error.rs index 711751e0c..8b400613f 100644 --- a/ethcore/src/types/block_import_error.rs +++ b/ethcore/src/types/block_import_error.rs @@ -22,6 +22,7 @@ use std::collections::VecDeque; use error::{ImportError, BlockError, Error}; use std::convert::From; +/// Error dedicated to import block function #[derive(Binary, Debug)] pub enum BlockImportError { /// Import error diff --git a/ethcore/src/types/trace_filter.rs b/ethcore/src/types/trace_filter.rs index 05cd83bb0..870792d15 100644 --- a/ethcore/src/types/trace_filter.rs +++ b/ethcore/src/types/trace_filter.rs @@ -20,7 +20,7 @@ use std::mem; use ipc::binary::{BinaryConvertError, BinaryConvertable}; use std::collections::VecDeque; use std::ops::Range; -use util::{Address, H256}; +use util::{Address}; use types::ids::BlockID; /// Easy to use trace filter. diff --git a/ethcore/src/types/transaction_import_result.rs b/ethcore/src/types/transaction_import_result.rs index c2068b0bc..dc883c8c6 100644 --- a/ethcore/src/types/transaction_import_result.rs +++ b/ethcore/src/types/transaction_import_result.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::mem; +//! Transaction import result related types + use ipc::binary::BinaryConvertError; use std::collections::VecDeque; From cfcf061e41c90fe184954bd37478293efa388277 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Mon, 4 Jul 2016 14:51:50 +0300 Subject: [PATCH 20/36] tests stub --- ethcore/src/tests/mod.rs | 3 ++- ethcore/src/tests/rpc.rs | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 ethcore/src/tests/rpc.rs diff --git a/ethcore/src/tests/mod.rs b/ethcore/src/tests/mod.rs index 28c1b3b5b..db36a3762 100644 --- a/ethcore/src/tests/mod.rs +++ b/ethcore/src/tests/mod.rs @@ -15,4 +15,5 @@ // along with Parity. If not, see . pub mod helpers; -mod client; \ No newline at end of file +mod client; +mod rpc; diff --git a/ethcore/src/tests/rpc.rs b/ethcore/src/tests/rpc.rs new file mode 100644 index 000000000..f683a1d25 --- /dev/null +++ b/ethcore/src/tests/rpc.rs @@ -0,0 +1,22 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Client RPC tests + +#[test] +fn can_be_created() { + +} From b58754cd26ea0513b3b3f9b3540e49fbe56da1b4 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 5 Jul 2016 11:33:38 +0300 Subject: [PATCH 21/36] some merge bugs --- ethcore/src/client/mod.rs | 24 ++---------------------- ethcore/src/miner/transaction_queue.rs | 4 ---- ipc/rpc/src/binary.rs | 2 +- 3 files changed, 3 insertions(+), 27 deletions(-) diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index d6f9f584a..253d4cf32 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -47,26 +47,10 @@ use error::{ImportResult, ExecutionError}; use receipt::LocalizedReceipt; use trace::LocalizedTrace; use evm::Factory as EvmFactory; -<<<<<<< HEAD -use miner::{TransactionImportResult}; pub use types::call_analytics::CallAnalytics; -pub use types::block_import_error::BlockImportError; -======= pub use block_import_error::BlockImportError; pub use transaction_import::{TransactionImportResult, TransactionImportError}; -/// Options concerning what analytics we run on the call. -#[derive(Eq, PartialEq, Default, Clone, Copy, Debug)] -pub struct CallAnalytics { - /// Make a transaction trace. - pub transaction_tracing: bool, - /// Make a VM trace. - pub vm_tracing: bool, - /// Make a diff. - pub state_diffing: bool, -} ->>>>>>> master - /// Blockchain database client. Owns and manages a blockchain and a block queue. pub trait BlockChainClient : Sync + Send { /// Get raw block header data by block id. @@ -157,7 +141,7 @@ pub trait BlockChainClient : Sync + Send { fn queue_info(&self) -> BlockQueueInfo; /// Clear block queue and abort all import activity. - fn clear_queue(&self) -> bool; + fn clear_queue(&self); /// Get blockchain information. fn chain_info(&self) -> BlockChainInfo; @@ -194,14 +178,10 @@ pub trait BlockChainClient : Sync + Send { fn last_hashes(&self) -> LastHashes; /// import transactions from network/other 3rd party -<<<<<<< HEAD - fn import_transactions(&self, transactions: Vec) -> Vec>; -======= fn import_transactions(&self, transactions: Vec) -> Vec>; ->>>>>>> master /// Queue transactions for importing. - fn queue_transactions(&self, transactions: Vec) -> bool; + fn queue_transactions(&self, transactions: Vec); /// list all transactions fn pending_transactions(&self) -> Vec; diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index 3c2ad10d3..982a852e6 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -90,11 +90,7 @@ use util::hash::{Address, H256}; use util::table::*; use transaction::*; use error::{Error, TransactionError}; -<<<<<<< HEAD -pub use types::transaction_import_result::TransactionImportResult; -======= use client::TransactionImportResult; ->>>>>>> master /// Transaction origin #[derive(Clone, Copy, Debug, PartialEq, Eq)] diff --git a/ipc/rpc/src/binary.rs b/ipc/rpc/src/binary.rs index 77de07be3..cfc7420f2 100644 --- a/ipc/rpc/src/binary.rs +++ b/ipc/rpc/src/binary.rs @@ -145,7 +145,7 @@ impl BinaryConvertable for BTreeMap where K : BinaryConvertable + Or 0 => mem::size_of::() * self.len(), _ => self.iter().fold(0usize, |acc, (k, _)| acc + k.size()) } + match V::len_params() { - 0 => mem::size_of::() * self.len(), 0 => mem::size_of::() * self.len(), + 0 => mem::size_of::() * self.len(), _ => self.iter().fold(0usize, |acc, (_, v)| acc + v.size()) } } From a9a671dfc0054a65603f030c61087a059f21ab7e Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 5 Jul 2016 11:45:31 +0300 Subject: [PATCH 22/36] ethcore compilation --- ethcore/src/client/client.rs.in | 15 ++++++++------- ethcore/src/client/test_client.rs | 6 ++---- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index bb6421520..a83d59d11 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -62,23 +62,24 @@ use block_queue::{BlockQueue, BlockQueueInfo}; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient, - TraceFilter, CallAnalytics, BlockImportError}; + TraceFilter, CallAnalytics, BlockImportError, TransactionImportError, TransactionImportResult}; use client::Error as ClientError; use env_info::EnvInfo; use executive::{Executive, Executed, TransactOptions, contract_address}; use receipt::LocalizedReceipt; -pub use blockchain::CacheSize as BlockChainCacheSize; use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase}; use trace; -pub use types::blockchain_info::BlockChainInfo; -pub use types::block_status::BlockStatus; use evm::Factory as EvmFactory; -use miner::{Miner, MinerService, TransactionImportResult, AccountDetails}; -use basic_types::*; - +use miner::{Miner, MinerService, AccountDetails}; +use util::TrieFactory; use ipc::IpcConfig; use ipc::binary::{BinaryConvertable, BinaryConvertError}; +// re-export +pub use types::blockchain_info::BlockChainInfo; +pub use types::block_status::BlockStatus; +pub use blockchain::CacheSize as BlockChainCacheSize; + const MAX_TX_QUEUE_SIZE: usize = 4096; impl fmt::Display for BlockChainInfo { diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 1ac47bbec..fc11f6e97 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -459,8 +459,7 @@ impl BlockChainClient for TestBlockChainClient { } } - fn clear_queue(&self) -> bool { - true + fn clear_queue(&self) { } fn chain_info(&self) -> BlockChainInfo { @@ -503,11 +502,10 @@ impl BlockChainClient for TestBlockChainClient { .collect() } - fn queue_transactions(&self, transactions: Vec) -> bool { + fn queue_transactions(&self, transactions: Vec) { // import right here let tx = transactions.into_iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect(); self.import_transactions(tx); - true } fn pending_transactions(&self) -> Vec { From ce1a4c89527232b0f4bb58f46978bb50d85438df Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 5 Jul 2016 11:47:23 +0300 Subject: [PATCH 23/36] fix rpc compilation --- rpc/src/v1/impls/eth.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 7f9d5c1ed..9e1713eb1 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -42,7 +42,6 @@ use v1::traits::Eth; use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, CallRequest, OptionalValue, Index, Filter, Log, Receipt}; use v1::impls::{default_gas_price, dispatch_transaction, error_codes}; use serde; -use ethcore::header::Header as BlockHeader; /// Eth rpc implementation. pub struct EthClient where From c96686620a434e0b19595f6f8e3cf7ea4bf04cfc Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 5 Jul 2016 11:49:29 +0300 Subject: [PATCH 24/36] deriving attribute --- ethcore/src/client/client.rs.in | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index a83d59d11..622277f66 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -516,6 +516,8 @@ impl Client { } } +#[derive(Ipc)] +#[ipc(client_ident="RemoteClient")] impl BlockChainClient for Client { fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result { let header = self.block_header(BlockID::Latest).unwrap(); From 8052824f3f0ee8f47652ef1571c9adf0f0952514 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 5 Jul 2016 12:23:56 +0300 Subject: [PATCH 25/36] tests (and fixes) --- ethcore/src/pod_state.rs | 16 ++++++++-------- ethcore/src/tests/rpc.rs | 38 +++++++++++++++++++++++++++++++++++++ ethcore/src/types/filter.rs | 20 +++++++++++-------- 3 files changed, 58 insertions(+), 16 deletions(-) diff --git a/ethcore/src/pod_state.rs b/ethcore/src/pod_state.rs index d99344adb..c6176914b 100644 --- a/ethcore/src/pod_state.rs +++ b/ethcore/src/pod_state.rs @@ -94,14 +94,14 @@ mod test { storage: map![], } ])); - assert_eq!(super::diff_pod(&PodState::new(), &a), StateDiff(map![ + assert_eq!(super::diff_pod(&PodState::new(), &a), StateDiff{ raw: map![ 1.into() => AccountDiff{ balance: Diff::Born(69.into()), nonce: Diff::Born(0.into()), code: Diff::Born(vec![]), storage: map![], } - ])); + ]}); } #[test] @@ -111,22 +111,22 @@ mod test { 1.into() => PodAccount::new(69.into(), 0.into(), vec![], map![]), 2.into() => PodAccount::new(69.into(), 0.into(), vec![], map![]) ]); - assert_eq!(super::diff_pod(&a, &b), StateDiff(map![ + assert_eq!(super::diff_pod(&a, &b), StateDiff { raw: map![ 2.into() => AccountDiff{ balance: Diff::Born(69.into()), nonce: Diff::Born(0.into()), code: Diff::Born(vec![]), storage: map![], } - ])); - assert_eq!(super::diff_pod(&b, &a), StateDiff(map![ + ]}); + assert_eq!(super::diff_pod(&b, &a), StateDiff { raw: map![ 2.into() => AccountDiff{ balance: Diff::Died(69.into()), nonce: Diff::Died(0.into()), code: Diff::Died(vec![]), storage: map![], } - ])); + ]}); } #[test] @@ -139,14 +139,14 @@ mod test { 1.into() => PodAccount::new(69.into(), 1.into(), vec![], map![]), 2.into() => PodAccount::new(69.into(), 0.into(), vec![], map![]) ]); - assert_eq!(super::diff_pod(&a, &b), StateDiff(map![ + assert_eq!(super::diff_pod(&a, &b), StateDiff { raw: map![ 1.into() => AccountDiff{ balance: Diff::Same, nonce: Diff::Changed(0.into(), 1.into()), code: Diff::Same, storage: map![], } - ])); + ]}); } } diff --git a/ethcore/src/tests/rpc.rs b/ethcore/src/tests/rpc.rs index f683a1d25..ce1efd4bb 100644 --- a/ethcore/src/tests/rpc.rs +++ b/ethcore/src/tests/rpc.rs @@ -16,7 +16,45 @@ //! Client RPC tests +use nanoipc; +use std::sync::Arc; +use std::io::Write; +use std::sync::atomic::{Ordering, AtomicBool}; +use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, BlockID, RemoteClient}; +use block::IsBlock; +use tests::helpers::*; +use common::*; +use devtools::*; +use miner::Miner; +use crossbeam; + +pub fn run_test_worker(scope: &crossbeam::Scope, stop: Arc, socket_path: &str) { + let socket_path = socket_path.to_owned(); + scope.spawn(move || { + let client = Client::new( + ClientConfig::default(), + get_test_spec(), + dir.as_path(), + Arc::new(Miner::with_spec(get_test_spec())), + IoChannel::disconnected()).unwrap(); + let mut worker = nanoipc::Worker::new(&Arc::new(client)); + worker.add_reqrep(&socket_path).unwrap(); + while !stop.load(Ordering::Relaxed) { + worker.poll(); + } + }); +} + #[test] fn can_be_created() { + crossbeam::scope(|scope| { + let stop_guard = StopGuard::new(); + let socket_path = "ipc:///tmp/parity-client-rpc-10.ipc"; + run_test_worker(scope, stop_guard.share(), socket_path); + let remote_client = nanoipc::init_client::>(socket_path).unwrap(); + let non_existant = remote_client.block_header(BlockID::Number(188)); + + assert!(non_existant.is_none()); + }) } diff --git a/ethcore/src/types/filter.rs b/ethcore/src/types/filter.rs index af278cd92..d8d5d03bd 100644 --- a/ethcore/src/types/filter.rs +++ b/ethcore/src/types/filter.rs @@ -120,7 +120,7 @@ mod tests { from_block: BlockID::Earliest, to_block: BlockID::Latest, address: None, - topics: [None, None, None, None] + topics: vec![None, None, None, None], }; let possibilities = none_filter.bloom_possibilities(); @@ -135,9 +135,11 @@ mod tests { from_block: BlockID::Earliest, to_block: BlockID::Latest, address: Some(vec![Address::from_str("b372018f3be9e171df0581136b59d2faf73a7d5d").unwrap()]), - topics: [ + topics: vec![ Some(vec![H256::from_str("ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9").unwrap()]), - None, None, None + None, + None, + None, ] }; @@ -151,10 +153,11 @@ mod tests { from_block: BlockID::Earliest, to_block: BlockID::Latest, address: Some(vec![Address::from_str("b372018f3be9e171df0581136b59d2faf73a7d5d").unwrap()]), - topics: [ + topics: vec![ Some(vec![H256::from_str("ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9").unwrap()]), Some(vec![H256::from_str("ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9").unwrap()]), - None, None + None, + None, ] }; @@ -171,7 +174,7 @@ mod tests { Address::from_str("b372018f3be9e171df0581136b59d2faf73a7d5d").unwrap(), Address::from_str("b372018f3be9e171df0581136b59d2faf73a7d5d").unwrap(), ]), - topics: [ + topics: vec![ Some(vec![ H256::from_str("ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9").unwrap(), H256::from_str("ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9").unwrap() @@ -197,10 +200,11 @@ mod tests { from_block: BlockID::Earliest, to_block: BlockID::Latest, address: Some(vec![Address::from_str("b372018f3be9e171df0581136b59d2faf73a7d5d").unwrap()]), - topics: [ + topics: vec![ Some(vec![H256::from_str("ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9").unwrap()]), Some(vec![H256::from_str("ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa").unwrap()]), - None, None + None, + None, ] }; From 78e7101f85a0ede8e5e408e897e6dfe1bad2bd96 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 5 Jul 2016 12:48:32 +0300 Subject: [PATCH 26/36] rpc test working --- Cargo.lock | 1 + ethcore/Cargo.toml | 1 + ethcore/src/client/client.rs.in | 2 +- ethcore/src/lib.rs | 1 + ethcore/src/pod_state.rs | 4 ++-- ethcore/src/tests/rpc.rs | 17 +++++++---------- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 10a52aabb..deac947dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -257,6 +257,7 @@ dependencies = [ "ethcore-devtools 1.3.0", "ethcore-ipc 1.3.0", "ethcore-ipc-codegen 1.3.0", + "ethcore-ipc-nano 1.3.0", "ethcore-util 1.3.0", "ethjson 0.1.0", "ethstore 0.1.0", diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 6979043e3..03edcb389 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -32,6 +32,7 @@ bloomchain = "0.1" rayon = "0.3.1" ethstore = { path = "../ethstore" } semver = "0.2" +ethcore-ipc-nano = { path = "../ipc/nano" } [dependencies.hyper] git = "https://github.com/ethcore/hyper" diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client.rs.in index 622277f66..6ab4b6cc6 100644 --- a/ethcore/src/client/client.rs.in +++ b/ethcore/src/client/client.rs.in @@ -73,7 +73,7 @@ use evm::Factory as EvmFactory; use miner::{Miner, MinerService, AccountDetails}; use util::TrieFactory; use ipc::IpcConfig; -use ipc::binary::{BinaryConvertable, BinaryConvertError}; +use ipc::binary::{BinaryConvertError}; // re-export pub use types::blockchain_info::BlockChainInfo; diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index eb8f9dbc1..218619577 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -95,6 +95,7 @@ extern crate hyper; extern crate ethash; pub extern crate ethstore; extern crate semver; +extern crate ethcore_ipc_nano as nanoipc; #[cfg(test)] extern crate ethcore_devtools as devtools; #[cfg(feature = "jit" )] extern crate evmjit; diff --git a/ethcore/src/pod_state.rs b/ethcore/src/pod_state.rs index c6176914b..9a11fb33f 100644 --- a/ethcore/src/pod_state.rs +++ b/ethcore/src/pod_state.rs @@ -86,14 +86,14 @@ mod test { #[test] fn create_delete() { let a = PodState::from(map![ 1.into() => PodAccount::new(69.into(), 0.into(), vec![], map![]) ]); - assert_eq!(super::diff_pod(&a, &PodState::new()), StateDiff(map![ + assert_eq!(super::diff_pod(&a, &PodState::new()), StateDiff { raw: map![ 1.into() => AccountDiff{ balance: Diff::Died(69.into()), nonce: Diff::Died(0.into()), code: Diff::Died(vec![]), storage: map![], } - ])); + ]}); assert_eq!(super::diff_pod(&PodState::new(), &a), StateDiff{ raw: map![ 1.into() => AccountDiff{ balance: Diff::Born(69.into()), diff --git a/ethcore/src/tests/rpc.rs b/ethcore/src/tests/rpc.rs index ce1efd4bb..a25928cf8 100644 --- a/ethcore/src/tests/rpc.rs +++ b/ethcore/src/tests/rpc.rs @@ -18,26 +18,25 @@ use nanoipc; use std::sync::Arc; -use std::io::Write; use std::sync::atomic::{Ordering, AtomicBool}; -use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, BlockID, RemoteClient}; -use block::IsBlock; +use client::{Client, ClientConfig, RemoteClient}; use tests::helpers::*; -use common::*; use devtools::*; use miner::Miner; use crossbeam; +use common::IoChannel; pub fn run_test_worker(scope: &crossbeam::Scope, stop: Arc, socket_path: &str) { let socket_path = socket_path.to_owned(); scope.spawn(move || { + let temp = RandomTempPath::create_dir(); let client = Client::new( ClientConfig::default(), get_test_spec(), - dir.as_path(), + temp.as_path(), Arc::new(Miner::with_spec(get_test_spec())), IoChannel::disconnected()).unwrap(); - let mut worker = nanoipc::Worker::new(&Arc::new(client)); + let mut worker = nanoipc::Worker::new(&client); worker.add_reqrep(&socket_path).unwrap(); while !stop.load(Ordering::Relaxed) { worker.poll(); @@ -46,15 +45,13 @@ pub fn run_test_worker(scope: &crossbeam::Scope, stop: Arc, socket_p } #[test] -fn can_be_created() { +fn can_handshake() { crossbeam::scope(|scope| { let stop_guard = StopGuard::new(); let socket_path = "ipc:///tmp/parity-client-rpc-10.ipc"; run_test_worker(scope, stop_guard.share(), socket_path); let remote_client = nanoipc::init_client::>(socket_path).unwrap(); - let non_existant = remote_client.block_header(BlockID::Number(188)); - - assert!(non_existant.is_none()); + assert!(remote_client.handshake().is_ok()); }) } From fb0076d2626bf781ad031bfdc2439d5ba5ed4fdd Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 5 Jul 2016 12:50:42 +0300 Subject: [PATCH 27/36] fix warnings again --- ethcore/src/types/call_analytics.rs | 2 +- ethcore/src/types/trace_filter.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ethcore/src/types/call_analytics.rs b/ethcore/src/types/call_analytics.rs index 3be1299aa..c738c15bd 100644 --- a/ethcore/src/types/call_analytics.rs +++ b/ethcore/src/types/call_analytics.rs @@ -17,7 +17,7 @@ //! Call analytics related types use std::mem; -use ipc::binary::{BinaryConvertError, BinaryConvertable}; +use ipc::binary::{BinaryConvertError}; use std::collections::VecDeque; /// Options concerning what analytics we run on the call. diff --git a/ethcore/src/types/trace_filter.rs b/ethcore/src/types/trace_filter.rs index 870792d15..89f886af4 100644 --- a/ethcore/src/types/trace_filter.rs +++ b/ethcore/src/types/trace_filter.rs @@ -17,7 +17,7 @@ //! Trace filter related types use std::mem; -use ipc::binary::{BinaryConvertError, BinaryConvertable}; +use ipc::binary::{BinaryConvertError}; use std::collections::VecDeque; use std::ops::Range; use util::{Address}; From 747818a349c656ae903001066d7ce06fb16451cf Mon Sep 17 00:00:00 2001 From: NikVolf Date: Tue, 5 Jul 2016 17:12:06 +0300 Subject: [PATCH 28/36] rs.in -> rs --- ethcore/build.rs | 2 +- ethcore/src/client/{client.rs.in => client_ipc.rs} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename ethcore/src/client/{client.rs.in => client_ipc.rs} (100%) diff --git a/ethcore/build.rs b/ethcore/build.rs index 190147ce2..a89f2912a 100644 --- a/ethcore/build.rs +++ b/ethcore/build.rs @@ -33,7 +33,7 @@ fn main() { // client interface { - let src = Path::new("src/client/client.rs.in"); + let src = Path::new("src/client/client_ipc.rs"); let intermediate = Path::new(&out_dir).join("client.intermediate.rs.in"); let mut registry = syntex::Registry::new(); codegen::register(&mut registry); diff --git a/ethcore/src/client/client.rs.in b/ethcore/src/client/client_ipc.rs similarity index 100% rename from ethcore/src/client/client.rs.in rename to ethcore/src/client/client_ipc.rs From 29de69e39d8c2b57d0f73d9bb40f02c90bba2c40 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 6 Jul 2016 12:40:10 +0300 Subject: [PATCH 29/36] failing blockid test --- ethcore/src/tests/rpc.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/ethcore/src/tests/rpc.rs b/ethcore/src/tests/rpc.rs index a25928cf8..123f70150 100644 --- a/ethcore/src/tests/rpc.rs +++ b/ethcore/src/tests/rpc.rs @@ -19,7 +19,7 @@ use nanoipc; use std::sync::Arc; use std::sync::atomic::{Ordering, AtomicBool}; -use client::{Client, ClientConfig, RemoteClient}; +use client::{Client, BlockChainClient, ClientConfig, RemoteClient, BlockID}; use tests::helpers::*; use devtools::*; use miner::Miner; @@ -55,3 +55,17 @@ fn can_handshake() { assert!(remote_client.handshake().is_ok()); }) } + +#[test] +fn can_query_block() { + crossbeam::scope(|scope| { + let stop_guard = StopGuard::new(); + let socket_path = "ipc:///tmp/parity-client-rpc-10.ipc"; + run_test_worker(scope, stop_guard.share(), socket_path); + let remote_client = nanoipc::init_client::>(socket_path).unwrap(); + + let non_existant_block = remote_client.block_header(BlockID::Number(999)); + + assert!(non_existant_block.is_none()); + }) +} From baababea0221dc69e133dd39ab54c9d9e50c407d Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 6 Jul 2016 14:49:27 +0300 Subject: [PATCH 30/36] missing attribute --- ethcore/src/client/client_ipc.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ethcore/src/client/client_ipc.rs b/ethcore/src/client/client_ipc.rs index 2635f3f5f..1e9c487e6 100644 --- a/ethcore/src/client/client_ipc.rs +++ b/ethcore/src/client/client_ipc.rs @@ -593,6 +593,8 @@ impl Client { } } +#[derive(Ipc)] +#[ipc(client_ident="RemoteClient")] impl BlockChainClient for Client { fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result { let header = self.block_header(BlockID::Latest).unwrap(); From 1ac2987437d4740e5e71cb1c34e700f131744f1a Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 6 Jul 2016 20:07:24 +0300 Subject: [PATCH 31/36] refactored tree changes --- ethcore/build.rs | 2 +- ethcore/src/client/client.rs | 988 ++++++++++++++++++++++++++++- ethcore/src/client/client_ipc.rs | 1002 ------------------------------ ethcore/src/client/mod.rs | 10 +- 4 files changed, 994 insertions(+), 1008 deletions(-) delete mode 100644 ethcore/src/client/client_ipc.rs diff --git a/ethcore/build.rs b/ethcore/build.rs index a89f2912a..9f61851d4 100644 --- a/ethcore/build.rs +++ b/ethcore/build.rs @@ -33,7 +33,7 @@ fn main() { // client interface { - let src = Path::new("src/client/client_ipc.rs"); + let src = Path::new("src/client/client.rs"); let intermediate = Path::new(&out_dir).join("client.intermediate.rs.in"); let mut registry = syntex::Registry::new(); codegen::register(&mut registry); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 3c0733ec5..1e9c487e6 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -14,7 +14,989 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Blockchain database client. +use std::path::PathBuf; +use std::collections::{HashSet, HashMap}; +use std::ops::Deref; +use std::mem; +use std::collections::VecDeque; +use std::sync::*; +use std::path::Path; +use std::fmt; +use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; +use std::time::Instant; -#![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues -include!(concat!(env!("OUT_DIR"), "/client.ipc.rs")); +// util +use util::numbers::*; +use util::panics::*; +use util::network::*; +use util::io::*; +use util::rlp; +use util::sha3::*; +use util::{Bytes}; +use util::rlp::{RlpStream, Rlp, UntrustedRlp}; +use util::journaldb; +use util::journaldb::JournalDB; +use util::kvdb::*; +use util::Itertools; +use util::PerfTimer; +use util::View; +use util::Stream; + +// other +use views::BlockView; +use error::{ImportError, ExecutionError, BlockError, ImportResult}; +use header::BlockNumber; +use state::State; +use spec::Spec; +use engine::Engine; +use views::HeaderView; +use service::{NetSyncMessage, SyncMessage}; +use env_info::LastHashes; +use verification; +use verification::{PreverifiedBlock, Verifier}; +use block::*; +use transaction::{LocalizedTransaction, SignedTransaction, Action}; +use blockchain::extras::TransactionAddress; +use types::filter::Filter; +use log_entry::LocalizedLogEntry; +use block_queue::{BlockQueue, BlockQueueInfo}; +use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; +use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, + DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient, + TraceFilter, CallAnalytics, BlockImportError, TransactionImportError, + TransactionImportResult, Mode}; +use client::Error as ClientError; +use env_info::EnvInfo; +use executive::{Executive, Executed, TransactOptions, contract_address}; +use receipt::LocalizedReceipt; +use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase}; +use trace; +use evm::Factory as EvmFactory; +use miner::{Miner, MinerService, AccountDetails}; +use util::TrieFactory; +use ipc::IpcConfig; +use ipc::binary::{BinaryConvertError}; + +// re-export +pub use types::blockchain_info::BlockChainInfo; +pub use types::block_status::BlockStatus; +pub use blockchain::CacheSize as BlockChainCacheSize; + +const MAX_TX_QUEUE_SIZE: usize = 4096; +const MAX_QUEUE_SIZE_TO_SLEEP_ON: usize = 2; + +impl fmt::Display for BlockChainInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "#{}.{}", self.best_block_number, self.best_block_hash) + } +} + +/// Report on the status of a client. +#[derive(Default, Clone, Debug, Eq, PartialEq)] +pub struct ClientReport { + /// How many blocks have been imported so far. + pub blocks_imported: usize, + /// How many transactions have been applied so far. + pub transactions_applied: usize, + /// How much gas has been processed so far. + pub gas_processed: U256, + /// Memory used by state DB + pub state_db_mem: usize, +} + +impl ClientReport { + /// Alter internal reporting to reflect the additional `block` has been processed. + pub fn accrue_block(&mut self, block: &PreverifiedBlock) { + self.blocks_imported += 1; + self.transactions_applied += block.transactions.len(); + self.gas_processed = self.gas_processed + block.header.gas_used; + } +} + +struct SleepState { + last_activity: Option, + last_autosleep: Option, +} + +impl SleepState { + fn new(awake: bool) -> Self { + SleepState { + last_activity: match awake { false => None, true => Some(Instant::now()) }, + last_autosleep: match awake { false => Some(Instant::now()), true => None }, + } + } +} + +/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. +/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. +pub struct Client { + mode: Mode, + chain: Arc, + tracedb: Arc>, + engine: Arc>, + state_db: Mutex>, + block_queue: BlockQueue, + report: RwLock, + import_lock: Mutex<()>, + panic_handler: Arc, + verifier: Box, + vm_factory: Arc, + trie_factory: TrieFactory, + miner: Arc, + sleep_state: Mutex, + liveness: AtomicBool, + io_channel: IoChannel, + queue_transactions: AtomicUsize, +} + +const HISTORY: u64 = 1200; +// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING. +// Altering it will force a blanket DB update for *all* JournalDB-derived +// databases. +// Instead, add/upgrade the version string of the individual JournalDB-derived database +// of which you actually want force an upgrade. +const CLIENT_DB_VER_STR: &'static str = "5.3"; + +/// Get the path for the databases given the root path and information on the databases. +pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H256) -> PathBuf { + let mut dir = path.to_path_buf(); + dir.push(H64::from(genesis_hash).hex()); + //TODO: sec/fat: pruned/full versioning + // version here is a bit useless now, since it's controlled only be the pruning algo. + dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, pruning)); + dir +} + +/// Append a path element to the given path and return the string. +pub fn append_path(path: &Path, item: &str) -> String { + let mut p = path.to_path_buf(); + p.push(item); + p.to_str().unwrap().to_owned() +} + +impl Client { + /// Create a new client with given spec and DB path and custom verifier. + pub fn new( + config: ClientConfig, + spec: Spec, + path: &Path, + miner: Arc, + message_channel: IoChannel + ) -> Result, ClientError> { + let path = get_db_path(path, config.pruning, spec.genesis_header().hash()); + let gb = spec.genesis_block(); + let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path)); + let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone()))); + + let mut state_db_config = match config.db_cache_size { + None => DatabaseConfig::default(), + Some(cache_size) => DatabaseConfig::with_cache(cache_size), + }; + + if config.db_compaction == DatabaseCompactionProfile::HDD { + state_db_config = state_db_config.compaction(CompactionProfile::hdd()); + } + + let mut state_db = journaldb::new( + &append_path(&path, "state"), + config.pruning, + state_db_config + ); + + if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) { + state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); + } + + let engine = Arc::new(spec.engine); + + let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone()); + let panic_handler = PanicHandler::new_in_arc(); + panic_handler.forward_from(&block_queue); + + let awake = match config.mode { Mode::Dark(..) => false, _ => true }; + let client = Client { + sleep_state: Mutex::new(SleepState::new(awake)), + liveness: AtomicBool::new(awake), + mode: config.mode, + chain: chain, + tracedb: tracedb, + engine: engine, + state_db: Mutex::new(state_db), + block_queue: block_queue, + report: RwLock::new(Default::default()), + import_lock: Mutex::new(()), + panic_handler: panic_handler, + verifier: verification::new(config.verifier_type), + vm_factory: Arc::new(EvmFactory::new(config.vm_type)), + trie_factory: TrieFactory::new(config.trie_spec), + miner: miner, + io_channel: message_channel, + queue_transactions: AtomicUsize::new(0), + }; + Ok(Arc::new(client)) + } + + /// Flush the block import queue. + pub fn flush_queue(&self) { + self.block_queue.flush(); + } + + fn build_last_hashes(&self, parent_hash: H256) -> LastHashes { + let mut last_hashes = LastHashes::new(); + last_hashes.resize(256, H256::new()); + last_hashes[0] = parent_hash; + for i in 0..255 { + match self.chain.block_details(&last_hashes[i]) { + Some(details) => { + last_hashes[i + 1] = details.parent.clone(); + }, + None => break, + } + } + last_hashes + } + + fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result { + let engine = self.engine.deref().deref(); + let header = &block.header; + + // Check the block isn't so old we won't be able to enact it. + let best_block_number = self.chain.best_block_number(); + if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY { + warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); + return Err(()); + } + + // Verify Block Family + let verify_family_result = self.verifier.verify_block_family(&header, &block.bytes, engine, self.chain.deref()); + if let Err(e) = verify_family_result { + warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + return Err(()); + }; + + // Check if Parent is in chain + let chain_has_parent = self.chain.block_header(&header.parent_hash); + if let None = chain_has_parent { + warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); + return Err(()); + }; + + // Enact Verified Block + let parent = chain_has_parent.unwrap(); + let last_hashes = self.build_last_hashes(header.parent_hash.clone()); + let db = self.state_db.lock().unwrap().boxed_clone(); + + let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory, self.trie_factory.clone()); + if let Err(e) = enact_result { + warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + return Err(()); + }; + + // Final Verification + let locked_block = enact_result.unwrap(); + if let Err(e) = self.verifier.verify_block_final(&header, locked_block.block().header()) { + warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + return Err(()); + } + + Ok(locked_block) + } + + fn calculate_enacted_retracted(&self, import_results: &[ImportRoute]) -> (Vec, Vec) { + fn map_to_vec(map: Vec<(H256, bool)>) -> Vec { + map.into_iter().map(|(k, _v)| k).collect() + } + + // In ImportRoute we get all the blocks that have been enacted and retracted by single insert. + // Because we are doing multiple inserts some of the blocks that were enacted in import `k` + // could be retracted in import `k+1`. This is why to understand if after all inserts + // the block is enacted or retracted we iterate over all routes and at the end final state + // will be in the hashmap + let map = import_results.iter().fold(HashMap::new(), |mut map, route| { + for hash in &route.enacted { + map.insert(hash.clone(), true); + } + for hash in &route.retracted { + map.insert(hash.clone(), false); + } + map + }); + + // Split to enacted retracted (using hashmap value) + let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v); + // And convert tuples to keys + (map_to_vec(enacted), map_to_vec(retracted)) + } + + /// This is triggered by a message coming from a block queue when the block is ready for insertion + pub fn import_verified_blocks(&self, io: &IoChannel) -> usize { + let max_blocks_to_import = 64; + + let mut imported_blocks = Vec::with_capacity(max_blocks_to_import); + let mut invalid_blocks = HashSet::new(); + let mut import_results = Vec::with_capacity(max_blocks_to_import); + + let _import_lock = self.import_lock.lock(); + let _timer = PerfTimer::new("import_verified_blocks"); + let blocks = self.block_queue.drain(max_blocks_to_import); + + let original_best = self.chain_info().best_block_hash; + + for block in blocks { + let header = &block.header; + + if invalid_blocks.contains(&header.parent_hash) { + invalid_blocks.insert(header.hash()); + continue; + } + let closed_block = self.check_and_close_block(&block); + if let Err(_) = closed_block { + invalid_blocks.insert(header.hash()); + continue; + } + let closed_block = closed_block.unwrap(); + imported_blocks.push(header.hash()); + + let route = self.commit_block(closed_block, &header.hash(), &block.bytes); + import_results.push(route); + + self.report.write().unwrap().accrue_block(&block); + trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); + } + + let imported = imported_blocks.len(); + let invalid_blocks = invalid_blocks.into_iter().collect::>(); + + { + if !invalid_blocks.is_empty() { + self.block_queue.mark_as_bad(&invalid_blocks); + } + if !imported_blocks.is_empty() { + self.block_queue.mark_as_good(&imported_blocks); + } + } + + { + if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() { + let (enacted, retracted) = self.calculate_enacted_retracted(&import_results); + + if self.queue_info().is_empty() { + self.miner.chain_new_blocks(self, &imported_blocks, &invalid_blocks, &enacted, &retracted); + } + + io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { + imported: imported_blocks, + invalid: invalid_blocks, + enacted: enacted, + retracted: retracted, + sealed: Vec::new(), + })).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e)); + } + } + + if self.chain_info().best_block_hash != original_best { + self.miner.update_sealing(self); + } + + imported + } + + fn commit_block(&self, block: B, hash: &H256, block_data: &[u8]) -> ImportRoute where B: IsBlock + Drain { + let number = block.header().number(); + // Are we committing an era? + let ancient = if number >= HISTORY { + let n = number - HISTORY; + Some((n, self.chain.block_hash(n).unwrap())) + } else { + None + }; + + // Commit results + let receipts = block.receipts().clone(); + let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new)); + + // CHECK! I *think* this is fine, even if the state_root is equal to another + // already-imported block of the same number. + // TODO: Prove it with a test. + block.drain().commit(number, hash, ancient).expect("State DB commit failed."); + + // And update the chain after commit to prevent race conditions + // (when something is in chain but you are not able to fetch details) + let route = self.chain.insert_block(block_data, receipts); + self.tracedb.import(TraceImportRequest { + traces: traces, + block_hash: hash.clone(), + block_number: number, + enacted: route.enacted.clone(), + retracted: route.retracted.len() + }); + route + } + + /// Import transactions from the IO queue + pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize { + let _timer = PerfTimer::new("import_queued_transactions"); + self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst); + let fetch_account = |a: &Address| AccountDetails { + nonce: self.latest_nonce(a), + balance: self.latest_balance(a), + }; + let tx = transactions.iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect(); + let results = self.miner.import_transactions(self, tx, fetch_account); + results.len() + } + + /// Attempt to get a copy of a specific block's state. + /// + /// This will not fail if given BlockID::Latest. + /// Otherwise, this can fail (but may not) if the DB prunes state. + pub fn state_at(&self, id: BlockID) -> Option { + // fast path for latest state. + if let BlockID::Latest = id.clone() { + return Some(self.state()) + } + + let block_number = match self.block_number(id.clone()) { + Some(num) => num, + None => return None, + }; + + self.block_header(id).and_then(|header| { + let db = self.state_db.lock().unwrap().boxed_clone(); + + // early exit for pruned blocks + if db.is_pruned() && self.chain.best_block_number() >= block_number + HISTORY { + return None; + } + + let root = HeaderView::new(&header).state_root(); + + State::from_existing(db, root, self.engine.account_start_nonce(), self.trie_factory.clone()).ok() + }) + } + + /// Get a copy of the best block's state. + pub fn state(&self) -> State { + State::from_existing( + self.state_db.lock().unwrap().boxed_clone(), + HeaderView::new(&self.best_block_header()).state_root(), + self.engine.account_start_nonce(), + self.trie_factory.clone()) + .expect("State root of best block header always valid.") + } + + /// Get info on the cache. + pub fn blockchain_cache_info(&self) -> BlockChainCacheSize { + self.chain.cache_size() + } + + /// Get the report. + pub fn report(&self) -> ClientReport { + let mut report = self.report.read().unwrap().clone(); + report.state_db_mem = self.state_db.lock().unwrap().mem_used(); + report + } + + /// Tick the client. + // TODO: manage by real events. + pub fn tick(&self) { + self.chain.collect_garbage(); + self.block_queue.collect_garbage(); + + match self.mode { + Mode::Dark(timeout) => { + let mut ss = self.sleep_state.lock().unwrap(); + if let Some(t) = ss.last_activity { + if Instant::now() > t + timeout { + self.sleep(); + ss.last_activity = None; + } + } + } + Mode::Passive(timeout, wakeup_after) => { + let mut ss = self.sleep_state.lock().unwrap(); + let now = Instant::now(); + if let Some(t) = ss.last_activity { + if now > t + timeout { + self.sleep(); + ss.last_activity = None; + ss.last_autosleep = Some(now); + } + } + if let Some(t) = ss.last_autosleep { + if now > t + wakeup_after { + self.wake_up(); + ss.last_activity = Some(now); + ss.last_autosleep = None; + } + } + } + _ => {} + } + } + + /// Set up the cache behaviour. + pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) { + self.chain.configure_cache(pref_cache_size, max_cache_size); + } + + /// Look up the block number for the given block ID. + pub fn block_number(&self, id: BlockID) -> Option { + match id { + BlockID::Number(number) => Some(number), + BlockID::Hash(ref hash) => self.chain.block_number(hash), + BlockID::Earliest => Some(0), + BlockID::Latest => Some(self.chain.best_block_number()) + } + } + + fn block_hash(chain: &BlockChain, id: BlockID) -> Option { + match id { + BlockID::Hash(hash) => Some(hash), + BlockID::Number(number) => chain.block_hash(number), + BlockID::Earliest => chain.block_hash(0), + BlockID::Latest => Some(chain.best_block_hash()) + } + } + + fn transaction_address(&self, id: TransactionID) -> Option { + match id { + TransactionID::Hash(ref hash) => self.chain.transaction_address(hash), + TransactionID::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress { + block_hash: hash, + index: index, + }) + } + } + + fn wake_up(&self) { + if !self.liveness.load(AtomicOrdering::Relaxed) { + self.liveness.store(true, AtomicOrdering::Relaxed); + self.io_channel.send(NetworkIoMessage::User(SyncMessage::StartNetwork)).unwrap(); + trace!(target: "mode", "wake_up: Waking."); + } + } + + fn sleep(&self) { + if self.liveness.load(AtomicOrdering::Relaxed) { + // only sleep if the import queue is mostly empty. + if self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON { + self.liveness.store(false, AtomicOrdering::Relaxed); + self.io_channel.send(NetworkIoMessage::User(SyncMessage::StopNetwork)).unwrap(); + trace!(target: "mode", "sleep: Sleeping."); + } else { + trace!(target: "mode", "sleep: Cannot sleep - syncing ongoing."); + // TODO: Consider uncommenting. + //*self.last_activity.lock().unwrap() = Some(Instant::now()); + } + } + } +} + +#[derive(Ipc)] +#[ipc(client_ident="RemoteClient")] +impl BlockChainClient for Client { + fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result { + let header = self.block_header(BlockID::Latest).unwrap(); + let view = HeaderView::new(&header); + let last_hashes = self.build_last_hashes(view.hash()); + let env_info = EnvInfo { + number: view.number(), + author: view.author(), + timestamp: view.timestamp(), + difficulty: view.difficulty(), + last_hashes: last_hashes, + gas_used: U256::zero(), + gas_limit: U256::max_value(), + }; + // that's just a copy of the state. + let mut state = self.state(); + let sender = try!(t.sender().map_err(|e| { + let message = format!("Transaction malformed: {:?}", e); + ExecutionError::TransactionMalformed(message) + })); + let balance = state.balance(&sender); + let needed_balance = t.value + t.gas * t.gas_price; + if balance < needed_balance { + // give the sender a sufficient balance + state.add_balance(&sender, &(needed_balance - balance)); + } + let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; + let mut ret = Executive::new(&mut state, &env_info, self.engine.deref().deref(), &self.vm_factory).transact(t, options); + + // TODO gav move this into Executive. + if analytics.state_diffing { + if let Ok(ref mut x) = ret { + x.state_diff = Some(state.diff_from(self.state())); + } + } + ret + } + + fn keep_alive(&self) { + if self.mode != Mode::Active { + self.wake_up(); + (*self.sleep_state.lock().unwrap()).last_activity = Some(Instant::now()); + } + } + + fn block_header(&self, id: BlockID) -> Option { + Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) + } + + fn block_body(&self, id: BlockID) -> Option { + Self::block_hash(&self.chain, id).and_then(|hash| { + self.chain.block(&hash).map(|bytes| { + let rlp = Rlp::new(&bytes); + let mut body = RlpStream::new_list(2); + body.append_raw(rlp.at(1).as_raw(), 1); + body.append_raw(rlp.at(2).as_raw(), 1); + body.out() + }) + }) + } + + fn block(&self, id: BlockID) -> Option { + Self::block_hash(&self.chain, id).and_then(|hash| { + self.chain.block(&hash) + }) + } + + fn block_status(&self, id: BlockID) -> BlockStatus { + match Self::block_hash(&self.chain, id) { + Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain, + Some(hash) => self.block_queue.block_status(&hash), + None => BlockStatus::Unknown + } + } + + fn block_total_difficulty(&self, id: BlockID) -> Option { + Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty) + } + + fn nonce(&self, address: &Address, id: BlockID) -> Option { + self.state_at(id).map(|s| s.nonce(address)) + } + + fn block_hash(&self, id: BlockID) -> Option { + Self::block_hash(&self.chain, id) + } + + fn code(&self, address: &Address) -> Option { + self.state().code(address) + } + + fn balance(&self, address: &Address, id: BlockID) -> Option { + self.state_at(id).map(|s| s.balance(address)) + } + + fn storage_at(&self, address: &Address, position: &H256, id: BlockID) -> Option { + self.state_at(id).map(|s| s.storage_at(address, position)) + } + + fn transaction(&self, id: TransactionID) -> Option { + self.transaction_address(id).and_then(|address| self.chain.transaction(&address)) + } + + fn uncle(&self, id: UncleID) -> Option { + let index = id.position; + self.block(id.block).and_then(|block| BlockView::new(&block).uncle_rlp_at(index)) + } + + fn transaction_receipt(&self, id: TransactionID) -> Option { + self.transaction_address(id).and_then(|address| { + let t = self.chain.block(&address.block_hash) + .and_then(|block| BlockView::new(&block).localized_transaction_at(address.index)); + + match (t, self.chain.transaction_receipt(&address)) { + (Some(tx), Some(receipt)) => { + let block_hash = tx.block_hash.clone(); + let block_number = tx.block_number.clone(); + let transaction_hash = tx.hash(); + let transaction_index = tx.transaction_index; + let prior_gas_used = match tx.transaction_index { + 0 => U256::zero(), + i => { + let prior_address = TransactionAddress { block_hash: address.block_hash, index: i - 1 }; + let prior_receipt = self.chain.transaction_receipt(&prior_address).expect("Transaction receipt at `address` exists; `prior_address` has lower index in same block; qed"); + prior_receipt.gas_used + } + }; + Some(LocalizedReceipt { + transaction_hash: tx.hash(), + transaction_index: tx.transaction_index, + block_hash: tx.block_hash, + block_number: tx.block_number, + cumulative_gas_used: receipt.gas_used, + gas_used: receipt.gas_used - prior_gas_used, + contract_address: match tx.action { + Action::Call(_) => None, + Action::Create => Some(contract_address(&tx.sender().unwrap(), &tx.nonce)) + }, + logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry { + entry: log, + block_hash: block_hash.clone(), + block_number: block_number, + transaction_hash: transaction_hash.clone(), + transaction_index: transaction_index, + log_index: i + }).collect() + }) + }, + _ => None + } + }) + } + + fn tree_route(&self, from: &H256, to: &H256) -> Option { + match self.chain.is_known(from) && self.chain.is_known(to) { + true => Some(self.chain.tree_route(from.clone(), to.clone())), + false => None + } + } + + fn find_uncles(&self, hash: &H256) -> Option> { + self.chain.find_uncle_hashes(hash, self.engine.maximum_uncle_age()) + } + + fn state_data(&self, hash: &H256) -> Option { + self.state_db.lock().unwrap().state(hash) + } + + fn block_receipts(&self, hash: &H256) -> Option { + self.chain.block_receipts(hash).map(|receipts| rlp::encode(&receipts).to_vec()) + } + + fn import_block(&self, bytes: Bytes) -> Result { + { + let header = BlockView::new(&bytes).header_view(); + if self.chain.is_known(&header.sha3()) { + return Err(BlockImportError::Import(ImportError::AlreadyInChain)); + } + if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown { + return Err(BlockImportError::Block(BlockError::UnknownParent(header.parent_hash()))); + } + } + Ok(try!(self.block_queue.import_block(bytes))) + } + + fn queue_info(&self) -> BlockQueueInfo { + self.block_queue.queue_info() + } + + fn clear_queue(&self) { + self.block_queue.clear(); + } + + fn chain_info(&self) -> BlockChainInfo { + BlockChainInfo { + total_difficulty: self.chain.best_block_total_difficulty(), + pending_total_difficulty: self.chain.best_block_total_difficulty(), + genesis_hash: self.chain.genesis_hash(), + best_block_hash: self.chain.best_block_hash(), + best_block_number: From::from(self.chain.best_block_number()) + } + } + + fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option> { + match (self.block_number(from_block), self.block_number(to_block)) { + (Some(from), Some(to)) => Some(self.chain.blocks_with_bloom(bloom, from, to)), + _ => None + } + } + + fn logs(&self, filter: Filter) -> Vec { + // TODO: lock blockchain only once + + let mut blocks = filter.bloom_possibilities().iter() + .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) + .flat_map(|m| m) + // remove duplicate elements + .collect::>() + .into_iter() + .collect::>(); + + blocks.sort(); + + blocks.into_iter() + .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash))) + .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) + .filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) + .flat_map(|(number, hash, receipts, hashes)| { + let mut log_index = 0; + receipts.into_iter() + .enumerate() + .flat_map(|(index, receipt)| { + log_index += receipt.logs.len(); + receipt.logs.into_iter() + .enumerate() + .filter(|tuple| filter.matches(&tuple.1)) + .map(|(i, log)| LocalizedLogEntry { + entry: log, + block_hash: hash.clone(), + block_number: number, + transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::new), + transaction_index: index, + log_index: log_index + i + }) + .collect::>() + }) + .collect::>() + + }) + .collect() + } + + fn filter_traces(&self, filter: TraceFilter) -> Option> { + let start = self.block_number(filter.range.start); + let end = self.block_number(filter.range.end); + + if start.is_some() && end.is_some() { + let filter = trace::Filter { + range: start.unwrap() as usize..end.unwrap() as usize, + from_address: From::from(filter.from_address), + to_address: From::from(filter.to_address), + }; + + let traces = self.tracedb.filter(&filter); + Some(traces) + } else { + None + } + } + + fn trace(&self, trace: TraceId) -> Option { + let trace_address = trace.address; + self.transaction_address(trace.transaction) + .and_then(|tx_address| { + self.block_number(BlockID::Hash(tx_address.block_hash)) + .and_then(|number| self.tracedb.trace(number, tx_address.index, trace_address)) + }) + } + + fn transaction_traces(&self, transaction: TransactionID) -> Option> { + self.transaction_address(transaction) + .and_then(|tx_address| { + self.block_number(BlockID::Hash(tx_address.block_hash)) + .and_then(|number| self.tracedb.transaction_traces(number, tx_address.index)) + }) + } + + fn block_traces(&self, block: BlockID) -> Option> { + self.block_number(block) + .and_then(|number| self.tracedb.block_traces(number)) + } + + fn last_hashes(&self) -> LastHashes { + self.build_last_hashes(self.chain.best_block_hash()) + } + + fn import_transactions(&self, transactions: Vec) -> Vec> { + let fetch_account = |a: &Address| AccountDetails { + nonce: self.latest_nonce(a), + balance: self.latest_balance(a), + }; + + self.miner.import_transactions(self, transactions, &fetch_account) + .into_iter() + .map(|res| res.map_err(|e| e.into())) + .collect() + } + + fn queue_transactions(&self, transactions: Vec) { + if self.queue_transactions.load(AtomicOrdering::Relaxed) > MAX_TX_QUEUE_SIZE { + debug!("Ignoring {} transactions: queue is full", transactions.len()); + } else { + let len = transactions.len(); + match self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewTransactions(transactions))) { + Ok(_) => { + self.queue_transactions.fetch_add(len, AtomicOrdering::SeqCst); + } + Err(e) => { + debug!("Ignoring {} transactions: error queueing: {}", len, e); + } + } + } + } + + fn pending_transactions(&self) -> Vec { + self.miner.pending_transactions() + } +} + +impl MiningBlockChainClient for Client { + fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock { + let engine = self.engine.deref().deref(); + let h = self.chain.best_block_hash(); + + let mut open_block = OpenBlock::new( + engine, + &self.vm_factory, + self.trie_factory.clone(), + false, // TODO: this will need to be parameterised once we want to do immediate mining insertion. + self.state_db.lock().unwrap().boxed_clone(), + &self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"), + self.build_last_hashes(h.clone()), + author, + gas_range_target, + extra_data, + ).expect("OpenBlock::new only fails if parent state root invalid; state root of best block's header is never invalid; qed"); + + // Add uncles + self.chain + .find_uncle_headers(&h, engine.maximum_uncle_age()) + .unwrap() + .into_iter() + .take(engine.maximum_uncle_count()) + .foreach(|h| { + open_block.push_uncle(h).unwrap(); + }); + + open_block + } + + fn vm_factory(&self) -> &EvmFactory { + &self.vm_factory + } + + fn import_sealed_block(&self, block: SealedBlock) -> ImportResult { + let _import_lock = self.import_lock.lock(); + let _timer = PerfTimer::new("import_sealed_block"); + + let original_best = self.chain_info().best_block_hash; + + let h = block.header().hash(); + let number = block.header().number(); + + let block_data = block.rlp_bytes(); + let route = self.commit_block(block, &h, &block_data); + trace!(target: "client", "Imported sealed block #{} ({})", number, h); + + { + let (enacted, retracted) = self.calculate_enacted_retracted(&[route]); + self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted); + + self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { + imported: vec![h.clone()], + invalid: vec![], + enacted: enacted, + retracted: retracted, + sealed: vec![h.clone()], + })).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e)); + } + + if self.chain_info().best_block_hash != original_best { + self.miner.update_sealing(self); + } + + Ok(h) + } +} + +impl MayPanic for Client { + fn on_panic(&self, closure: F) where F: OnPanicListener { + self.panic_handler.on_panic(closure); + } +} + +impl IpcConfig for Client { } diff --git a/ethcore/src/client/client_ipc.rs b/ethcore/src/client/client_ipc.rs deleted file mode 100644 index 1e9c487e6..000000000 --- a/ethcore/src/client/client_ipc.rs +++ /dev/null @@ -1,1002 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::path::PathBuf; -use std::collections::{HashSet, HashMap}; -use std::ops::Deref; -use std::mem; -use std::collections::VecDeque; -use std::sync::*; -use std::path::Path; -use std::fmt; -use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; -use std::time::Instant; - -// util -use util::numbers::*; -use util::panics::*; -use util::network::*; -use util::io::*; -use util::rlp; -use util::sha3::*; -use util::{Bytes}; -use util::rlp::{RlpStream, Rlp, UntrustedRlp}; -use util::journaldb; -use util::journaldb::JournalDB; -use util::kvdb::*; -use util::Itertools; -use util::PerfTimer; -use util::View; -use util::Stream; - -// other -use views::BlockView; -use error::{ImportError, ExecutionError, BlockError, ImportResult}; -use header::BlockNumber; -use state::State; -use spec::Spec; -use engine::Engine; -use views::HeaderView; -use service::{NetSyncMessage, SyncMessage}; -use env_info::LastHashes; -use verification; -use verification::{PreverifiedBlock, Verifier}; -use block::*; -use transaction::{LocalizedTransaction, SignedTransaction, Action}; -use blockchain::extras::TransactionAddress; -use types::filter::Filter; -use log_entry::LocalizedLogEntry; -use block_queue::{BlockQueue, BlockQueueInfo}; -use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; -use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, - DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient, - TraceFilter, CallAnalytics, BlockImportError, TransactionImportError, - TransactionImportResult, Mode}; -use client::Error as ClientError; -use env_info::EnvInfo; -use executive::{Executive, Executed, TransactOptions, contract_address}; -use receipt::LocalizedReceipt; -use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase}; -use trace; -use evm::Factory as EvmFactory; -use miner::{Miner, MinerService, AccountDetails}; -use util::TrieFactory; -use ipc::IpcConfig; -use ipc::binary::{BinaryConvertError}; - -// re-export -pub use types::blockchain_info::BlockChainInfo; -pub use types::block_status::BlockStatus; -pub use blockchain::CacheSize as BlockChainCacheSize; - -const MAX_TX_QUEUE_SIZE: usize = 4096; -const MAX_QUEUE_SIZE_TO_SLEEP_ON: usize = 2; - -impl fmt::Display for BlockChainInfo { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "#{}.{}", self.best_block_number, self.best_block_hash) - } -} - -/// Report on the status of a client. -#[derive(Default, Clone, Debug, Eq, PartialEq)] -pub struct ClientReport { - /// How many blocks have been imported so far. - pub blocks_imported: usize, - /// How many transactions have been applied so far. - pub transactions_applied: usize, - /// How much gas has been processed so far. - pub gas_processed: U256, - /// Memory used by state DB - pub state_db_mem: usize, -} - -impl ClientReport { - /// Alter internal reporting to reflect the additional `block` has been processed. - pub fn accrue_block(&mut self, block: &PreverifiedBlock) { - self.blocks_imported += 1; - self.transactions_applied += block.transactions.len(); - self.gas_processed = self.gas_processed + block.header.gas_used; - } -} - -struct SleepState { - last_activity: Option, - last_autosleep: Option, -} - -impl SleepState { - fn new(awake: bool) -> Self { - SleepState { - last_activity: match awake { false => None, true => Some(Instant::now()) }, - last_autosleep: match awake { false => Some(Instant::now()), true => None }, - } - } -} - -/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. -/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. -pub struct Client { - mode: Mode, - chain: Arc, - tracedb: Arc>, - engine: Arc>, - state_db: Mutex>, - block_queue: BlockQueue, - report: RwLock, - import_lock: Mutex<()>, - panic_handler: Arc, - verifier: Box, - vm_factory: Arc, - trie_factory: TrieFactory, - miner: Arc, - sleep_state: Mutex, - liveness: AtomicBool, - io_channel: IoChannel, - queue_transactions: AtomicUsize, -} - -const HISTORY: u64 = 1200; -// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING. -// Altering it will force a blanket DB update for *all* JournalDB-derived -// databases. -// Instead, add/upgrade the version string of the individual JournalDB-derived database -// of which you actually want force an upgrade. -const CLIENT_DB_VER_STR: &'static str = "5.3"; - -/// Get the path for the databases given the root path and information on the databases. -pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H256) -> PathBuf { - let mut dir = path.to_path_buf(); - dir.push(H64::from(genesis_hash).hex()); - //TODO: sec/fat: pruned/full versioning - // version here is a bit useless now, since it's controlled only be the pruning algo. - dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, pruning)); - dir -} - -/// Append a path element to the given path and return the string. -pub fn append_path(path: &Path, item: &str) -> String { - let mut p = path.to_path_buf(); - p.push(item); - p.to_str().unwrap().to_owned() -} - -impl Client { - /// Create a new client with given spec and DB path and custom verifier. - pub fn new( - config: ClientConfig, - spec: Spec, - path: &Path, - miner: Arc, - message_channel: IoChannel - ) -> Result, ClientError> { - let path = get_db_path(path, config.pruning, spec.genesis_header().hash()); - let gb = spec.genesis_block(); - let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path)); - let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone()))); - - let mut state_db_config = match config.db_cache_size { - None => DatabaseConfig::default(), - Some(cache_size) => DatabaseConfig::with_cache(cache_size), - }; - - if config.db_compaction == DatabaseCompactionProfile::HDD { - state_db_config = state_db_config.compaction(CompactionProfile::hdd()); - } - - let mut state_db = journaldb::new( - &append_path(&path, "state"), - config.pruning, - state_db_config - ); - - if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) { - state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); - } - - let engine = Arc::new(spec.engine); - - let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone()); - let panic_handler = PanicHandler::new_in_arc(); - panic_handler.forward_from(&block_queue); - - let awake = match config.mode { Mode::Dark(..) => false, _ => true }; - let client = Client { - sleep_state: Mutex::new(SleepState::new(awake)), - liveness: AtomicBool::new(awake), - mode: config.mode, - chain: chain, - tracedb: tracedb, - engine: engine, - state_db: Mutex::new(state_db), - block_queue: block_queue, - report: RwLock::new(Default::default()), - import_lock: Mutex::new(()), - panic_handler: panic_handler, - verifier: verification::new(config.verifier_type), - vm_factory: Arc::new(EvmFactory::new(config.vm_type)), - trie_factory: TrieFactory::new(config.trie_spec), - miner: miner, - io_channel: message_channel, - queue_transactions: AtomicUsize::new(0), - }; - Ok(Arc::new(client)) - } - - /// Flush the block import queue. - pub fn flush_queue(&self) { - self.block_queue.flush(); - } - - fn build_last_hashes(&self, parent_hash: H256) -> LastHashes { - let mut last_hashes = LastHashes::new(); - last_hashes.resize(256, H256::new()); - last_hashes[0] = parent_hash; - for i in 0..255 { - match self.chain.block_details(&last_hashes[i]) { - Some(details) => { - last_hashes[i + 1] = details.parent.clone(); - }, - None => break, - } - } - last_hashes - } - - fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result { - let engine = self.engine.deref().deref(); - let header = &block.header; - - // Check the block isn't so old we won't be able to enact it. - let best_block_number = self.chain.best_block_number(); - if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY { - warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); - return Err(()); - } - - // Verify Block Family - let verify_family_result = self.verifier.verify_block_family(&header, &block.bytes, engine, self.chain.deref()); - if let Err(e) = verify_family_result { - warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - return Err(()); - }; - - // Check if Parent is in chain - let chain_has_parent = self.chain.block_header(&header.parent_hash); - if let None = chain_has_parent { - warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); - return Err(()); - }; - - // Enact Verified Block - let parent = chain_has_parent.unwrap(); - let last_hashes = self.build_last_hashes(header.parent_hash.clone()); - let db = self.state_db.lock().unwrap().boxed_clone(); - - let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory, self.trie_factory.clone()); - if let Err(e) = enact_result { - warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - return Err(()); - }; - - // Final Verification - let locked_block = enact_result.unwrap(); - if let Err(e) = self.verifier.verify_block_final(&header, locked_block.block().header()) { - warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - return Err(()); - } - - Ok(locked_block) - } - - fn calculate_enacted_retracted(&self, import_results: &[ImportRoute]) -> (Vec, Vec) { - fn map_to_vec(map: Vec<(H256, bool)>) -> Vec { - map.into_iter().map(|(k, _v)| k).collect() - } - - // In ImportRoute we get all the blocks that have been enacted and retracted by single insert. - // Because we are doing multiple inserts some of the blocks that were enacted in import `k` - // could be retracted in import `k+1`. This is why to understand if after all inserts - // the block is enacted or retracted we iterate over all routes and at the end final state - // will be in the hashmap - let map = import_results.iter().fold(HashMap::new(), |mut map, route| { - for hash in &route.enacted { - map.insert(hash.clone(), true); - } - for hash in &route.retracted { - map.insert(hash.clone(), false); - } - map - }); - - // Split to enacted retracted (using hashmap value) - let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v); - // And convert tuples to keys - (map_to_vec(enacted), map_to_vec(retracted)) - } - - /// This is triggered by a message coming from a block queue when the block is ready for insertion - pub fn import_verified_blocks(&self, io: &IoChannel) -> usize { - let max_blocks_to_import = 64; - - let mut imported_blocks = Vec::with_capacity(max_blocks_to_import); - let mut invalid_blocks = HashSet::new(); - let mut import_results = Vec::with_capacity(max_blocks_to_import); - - let _import_lock = self.import_lock.lock(); - let _timer = PerfTimer::new("import_verified_blocks"); - let blocks = self.block_queue.drain(max_blocks_to_import); - - let original_best = self.chain_info().best_block_hash; - - for block in blocks { - let header = &block.header; - - if invalid_blocks.contains(&header.parent_hash) { - invalid_blocks.insert(header.hash()); - continue; - } - let closed_block = self.check_and_close_block(&block); - if let Err(_) = closed_block { - invalid_blocks.insert(header.hash()); - continue; - } - let closed_block = closed_block.unwrap(); - imported_blocks.push(header.hash()); - - let route = self.commit_block(closed_block, &header.hash(), &block.bytes); - import_results.push(route); - - self.report.write().unwrap().accrue_block(&block); - trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); - } - - let imported = imported_blocks.len(); - let invalid_blocks = invalid_blocks.into_iter().collect::>(); - - { - if !invalid_blocks.is_empty() { - self.block_queue.mark_as_bad(&invalid_blocks); - } - if !imported_blocks.is_empty() { - self.block_queue.mark_as_good(&imported_blocks); - } - } - - { - if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() { - let (enacted, retracted) = self.calculate_enacted_retracted(&import_results); - - if self.queue_info().is_empty() { - self.miner.chain_new_blocks(self, &imported_blocks, &invalid_blocks, &enacted, &retracted); - } - - io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { - imported: imported_blocks, - invalid: invalid_blocks, - enacted: enacted, - retracted: retracted, - sealed: Vec::new(), - })).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e)); - } - } - - if self.chain_info().best_block_hash != original_best { - self.miner.update_sealing(self); - } - - imported - } - - fn commit_block(&self, block: B, hash: &H256, block_data: &[u8]) -> ImportRoute where B: IsBlock + Drain { - let number = block.header().number(); - // Are we committing an era? - let ancient = if number >= HISTORY { - let n = number - HISTORY; - Some((n, self.chain.block_hash(n).unwrap())) - } else { - None - }; - - // Commit results - let receipts = block.receipts().clone(); - let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new)); - - // CHECK! I *think* this is fine, even if the state_root is equal to another - // already-imported block of the same number. - // TODO: Prove it with a test. - block.drain().commit(number, hash, ancient).expect("State DB commit failed."); - - // And update the chain after commit to prevent race conditions - // (when something is in chain but you are not able to fetch details) - let route = self.chain.insert_block(block_data, receipts); - self.tracedb.import(TraceImportRequest { - traces: traces, - block_hash: hash.clone(), - block_number: number, - enacted: route.enacted.clone(), - retracted: route.retracted.len() - }); - route - } - - /// Import transactions from the IO queue - pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize { - let _timer = PerfTimer::new("import_queued_transactions"); - self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst); - let fetch_account = |a: &Address| AccountDetails { - nonce: self.latest_nonce(a), - balance: self.latest_balance(a), - }; - let tx = transactions.iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect(); - let results = self.miner.import_transactions(self, tx, fetch_account); - results.len() - } - - /// Attempt to get a copy of a specific block's state. - /// - /// This will not fail if given BlockID::Latest. - /// Otherwise, this can fail (but may not) if the DB prunes state. - pub fn state_at(&self, id: BlockID) -> Option { - // fast path for latest state. - if let BlockID::Latest = id.clone() { - return Some(self.state()) - } - - let block_number = match self.block_number(id.clone()) { - Some(num) => num, - None => return None, - }; - - self.block_header(id).and_then(|header| { - let db = self.state_db.lock().unwrap().boxed_clone(); - - // early exit for pruned blocks - if db.is_pruned() && self.chain.best_block_number() >= block_number + HISTORY { - return None; - } - - let root = HeaderView::new(&header).state_root(); - - State::from_existing(db, root, self.engine.account_start_nonce(), self.trie_factory.clone()).ok() - }) - } - - /// Get a copy of the best block's state. - pub fn state(&self) -> State { - State::from_existing( - self.state_db.lock().unwrap().boxed_clone(), - HeaderView::new(&self.best_block_header()).state_root(), - self.engine.account_start_nonce(), - self.trie_factory.clone()) - .expect("State root of best block header always valid.") - } - - /// Get info on the cache. - pub fn blockchain_cache_info(&self) -> BlockChainCacheSize { - self.chain.cache_size() - } - - /// Get the report. - pub fn report(&self) -> ClientReport { - let mut report = self.report.read().unwrap().clone(); - report.state_db_mem = self.state_db.lock().unwrap().mem_used(); - report - } - - /// Tick the client. - // TODO: manage by real events. - pub fn tick(&self) { - self.chain.collect_garbage(); - self.block_queue.collect_garbage(); - - match self.mode { - Mode::Dark(timeout) => { - let mut ss = self.sleep_state.lock().unwrap(); - if let Some(t) = ss.last_activity { - if Instant::now() > t + timeout { - self.sleep(); - ss.last_activity = None; - } - } - } - Mode::Passive(timeout, wakeup_after) => { - let mut ss = self.sleep_state.lock().unwrap(); - let now = Instant::now(); - if let Some(t) = ss.last_activity { - if now > t + timeout { - self.sleep(); - ss.last_activity = None; - ss.last_autosleep = Some(now); - } - } - if let Some(t) = ss.last_autosleep { - if now > t + wakeup_after { - self.wake_up(); - ss.last_activity = Some(now); - ss.last_autosleep = None; - } - } - } - _ => {} - } - } - - /// Set up the cache behaviour. - pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) { - self.chain.configure_cache(pref_cache_size, max_cache_size); - } - - /// Look up the block number for the given block ID. - pub fn block_number(&self, id: BlockID) -> Option { - match id { - BlockID::Number(number) => Some(number), - BlockID::Hash(ref hash) => self.chain.block_number(hash), - BlockID::Earliest => Some(0), - BlockID::Latest => Some(self.chain.best_block_number()) - } - } - - fn block_hash(chain: &BlockChain, id: BlockID) -> Option { - match id { - BlockID::Hash(hash) => Some(hash), - BlockID::Number(number) => chain.block_hash(number), - BlockID::Earliest => chain.block_hash(0), - BlockID::Latest => Some(chain.best_block_hash()) - } - } - - fn transaction_address(&self, id: TransactionID) -> Option { - match id { - TransactionID::Hash(ref hash) => self.chain.transaction_address(hash), - TransactionID::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress { - block_hash: hash, - index: index, - }) - } - } - - fn wake_up(&self) { - if !self.liveness.load(AtomicOrdering::Relaxed) { - self.liveness.store(true, AtomicOrdering::Relaxed); - self.io_channel.send(NetworkIoMessage::User(SyncMessage::StartNetwork)).unwrap(); - trace!(target: "mode", "wake_up: Waking."); - } - } - - fn sleep(&self) { - if self.liveness.load(AtomicOrdering::Relaxed) { - // only sleep if the import queue is mostly empty. - if self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON { - self.liveness.store(false, AtomicOrdering::Relaxed); - self.io_channel.send(NetworkIoMessage::User(SyncMessage::StopNetwork)).unwrap(); - trace!(target: "mode", "sleep: Sleeping."); - } else { - trace!(target: "mode", "sleep: Cannot sleep - syncing ongoing."); - // TODO: Consider uncommenting. - //*self.last_activity.lock().unwrap() = Some(Instant::now()); - } - } - } -} - -#[derive(Ipc)] -#[ipc(client_ident="RemoteClient")] -impl BlockChainClient for Client { - fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result { - let header = self.block_header(BlockID::Latest).unwrap(); - let view = HeaderView::new(&header); - let last_hashes = self.build_last_hashes(view.hash()); - let env_info = EnvInfo { - number: view.number(), - author: view.author(), - timestamp: view.timestamp(), - difficulty: view.difficulty(), - last_hashes: last_hashes, - gas_used: U256::zero(), - gas_limit: U256::max_value(), - }; - // that's just a copy of the state. - let mut state = self.state(); - let sender = try!(t.sender().map_err(|e| { - let message = format!("Transaction malformed: {:?}", e); - ExecutionError::TransactionMalformed(message) - })); - let balance = state.balance(&sender); - let needed_balance = t.value + t.gas * t.gas_price; - if balance < needed_balance { - // give the sender a sufficient balance - state.add_balance(&sender, &(needed_balance - balance)); - } - let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; - let mut ret = Executive::new(&mut state, &env_info, self.engine.deref().deref(), &self.vm_factory).transact(t, options); - - // TODO gav move this into Executive. - if analytics.state_diffing { - if let Ok(ref mut x) = ret { - x.state_diff = Some(state.diff_from(self.state())); - } - } - ret - } - - fn keep_alive(&self) { - if self.mode != Mode::Active { - self.wake_up(); - (*self.sleep_state.lock().unwrap()).last_activity = Some(Instant::now()); - } - } - - fn block_header(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) - } - - fn block_body(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| { - self.chain.block(&hash).map(|bytes| { - let rlp = Rlp::new(&bytes); - let mut body = RlpStream::new_list(2); - body.append_raw(rlp.at(1).as_raw(), 1); - body.append_raw(rlp.at(2).as_raw(), 1); - body.out() - }) - }) - } - - fn block(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| { - self.chain.block(&hash) - }) - } - - fn block_status(&self, id: BlockID) -> BlockStatus { - match Self::block_hash(&self.chain, id) { - Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain, - Some(hash) => self.block_queue.block_status(&hash), - None => BlockStatus::Unknown - } - } - - fn block_total_difficulty(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty) - } - - fn nonce(&self, address: &Address, id: BlockID) -> Option { - self.state_at(id).map(|s| s.nonce(address)) - } - - fn block_hash(&self, id: BlockID) -> Option { - Self::block_hash(&self.chain, id) - } - - fn code(&self, address: &Address) -> Option { - self.state().code(address) - } - - fn balance(&self, address: &Address, id: BlockID) -> Option { - self.state_at(id).map(|s| s.balance(address)) - } - - fn storage_at(&self, address: &Address, position: &H256, id: BlockID) -> Option { - self.state_at(id).map(|s| s.storage_at(address, position)) - } - - fn transaction(&self, id: TransactionID) -> Option { - self.transaction_address(id).and_then(|address| self.chain.transaction(&address)) - } - - fn uncle(&self, id: UncleID) -> Option { - let index = id.position; - self.block(id.block).and_then(|block| BlockView::new(&block).uncle_rlp_at(index)) - } - - fn transaction_receipt(&self, id: TransactionID) -> Option { - self.transaction_address(id).and_then(|address| { - let t = self.chain.block(&address.block_hash) - .and_then(|block| BlockView::new(&block).localized_transaction_at(address.index)); - - match (t, self.chain.transaction_receipt(&address)) { - (Some(tx), Some(receipt)) => { - let block_hash = tx.block_hash.clone(); - let block_number = tx.block_number.clone(); - let transaction_hash = tx.hash(); - let transaction_index = tx.transaction_index; - let prior_gas_used = match tx.transaction_index { - 0 => U256::zero(), - i => { - let prior_address = TransactionAddress { block_hash: address.block_hash, index: i - 1 }; - let prior_receipt = self.chain.transaction_receipt(&prior_address).expect("Transaction receipt at `address` exists; `prior_address` has lower index in same block; qed"); - prior_receipt.gas_used - } - }; - Some(LocalizedReceipt { - transaction_hash: tx.hash(), - transaction_index: tx.transaction_index, - block_hash: tx.block_hash, - block_number: tx.block_number, - cumulative_gas_used: receipt.gas_used, - gas_used: receipt.gas_used - prior_gas_used, - contract_address: match tx.action { - Action::Call(_) => None, - Action::Create => Some(contract_address(&tx.sender().unwrap(), &tx.nonce)) - }, - logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry { - entry: log, - block_hash: block_hash.clone(), - block_number: block_number, - transaction_hash: transaction_hash.clone(), - transaction_index: transaction_index, - log_index: i - }).collect() - }) - }, - _ => None - } - }) - } - - fn tree_route(&self, from: &H256, to: &H256) -> Option { - match self.chain.is_known(from) && self.chain.is_known(to) { - true => Some(self.chain.tree_route(from.clone(), to.clone())), - false => None - } - } - - fn find_uncles(&self, hash: &H256) -> Option> { - self.chain.find_uncle_hashes(hash, self.engine.maximum_uncle_age()) - } - - fn state_data(&self, hash: &H256) -> Option { - self.state_db.lock().unwrap().state(hash) - } - - fn block_receipts(&self, hash: &H256) -> Option { - self.chain.block_receipts(hash).map(|receipts| rlp::encode(&receipts).to_vec()) - } - - fn import_block(&self, bytes: Bytes) -> Result { - { - let header = BlockView::new(&bytes).header_view(); - if self.chain.is_known(&header.sha3()) { - return Err(BlockImportError::Import(ImportError::AlreadyInChain)); - } - if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown { - return Err(BlockImportError::Block(BlockError::UnknownParent(header.parent_hash()))); - } - } - Ok(try!(self.block_queue.import_block(bytes))) - } - - fn queue_info(&self) -> BlockQueueInfo { - self.block_queue.queue_info() - } - - fn clear_queue(&self) { - self.block_queue.clear(); - } - - fn chain_info(&self) -> BlockChainInfo { - BlockChainInfo { - total_difficulty: self.chain.best_block_total_difficulty(), - pending_total_difficulty: self.chain.best_block_total_difficulty(), - genesis_hash: self.chain.genesis_hash(), - best_block_hash: self.chain.best_block_hash(), - best_block_number: From::from(self.chain.best_block_number()) - } - } - - fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option> { - match (self.block_number(from_block), self.block_number(to_block)) { - (Some(from), Some(to)) => Some(self.chain.blocks_with_bloom(bloom, from, to)), - _ => None - } - } - - fn logs(&self, filter: Filter) -> Vec { - // TODO: lock blockchain only once - - let mut blocks = filter.bloom_possibilities().iter() - .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) - .flat_map(|m| m) - // remove duplicate elements - .collect::>() - .into_iter() - .collect::>(); - - blocks.sort(); - - blocks.into_iter() - .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash))) - .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) - .filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) - .flat_map(|(number, hash, receipts, hashes)| { - let mut log_index = 0; - receipts.into_iter() - .enumerate() - .flat_map(|(index, receipt)| { - log_index += receipt.logs.len(); - receipt.logs.into_iter() - .enumerate() - .filter(|tuple| filter.matches(&tuple.1)) - .map(|(i, log)| LocalizedLogEntry { - entry: log, - block_hash: hash.clone(), - block_number: number, - transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::new), - transaction_index: index, - log_index: log_index + i - }) - .collect::>() - }) - .collect::>() - - }) - .collect() - } - - fn filter_traces(&self, filter: TraceFilter) -> Option> { - let start = self.block_number(filter.range.start); - let end = self.block_number(filter.range.end); - - if start.is_some() && end.is_some() { - let filter = trace::Filter { - range: start.unwrap() as usize..end.unwrap() as usize, - from_address: From::from(filter.from_address), - to_address: From::from(filter.to_address), - }; - - let traces = self.tracedb.filter(&filter); - Some(traces) - } else { - None - } - } - - fn trace(&self, trace: TraceId) -> Option { - let trace_address = trace.address; - self.transaction_address(trace.transaction) - .and_then(|tx_address| { - self.block_number(BlockID::Hash(tx_address.block_hash)) - .and_then(|number| self.tracedb.trace(number, tx_address.index, trace_address)) - }) - } - - fn transaction_traces(&self, transaction: TransactionID) -> Option> { - self.transaction_address(transaction) - .and_then(|tx_address| { - self.block_number(BlockID::Hash(tx_address.block_hash)) - .and_then(|number| self.tracedb.transaction_traces(number, tx_address.index)) - }) - } - - fn block_traces(&self, block: BlockID) -> Option> { - self.block_number(block) - .and_then(|number| self.tracedb.block_traces(number)) - } - - fn last_hashes(&self) -> LastHashes { - self.build_last_hashes(self.chain.best_block_hash()) - } - - fn import_transactions(&self, transactions: Vec) -> Vec> { - let fetch_account = |a: &Address| AccountDetails { - nonce: self.latest_nonce(a), - balance: self.latest_balance(a), - }; - - self.miner.import_transactions(self, transactions, &fetch_account) - .into_iter() - .map(|res| res.map_err(|e| e.into())) - .collect() - } - - fn queue_transactions(&self, transactions: Vec) { - if self.queue_transactions.load(AtomicOrdering::Relaxed) > MAX_TX_QUEUE_SIZE { - debug!("Ignoring {} transactions: queue is full", transactions.len()); - } else { - let len = transactions.len(); - match self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewTransactions(transactions))) { - Ok(_) => { - self.queue_transactions.fetch_add(len, AtomicOrdering::SeqCst); - } - Err(e) => { - debug!("Ignoring {} transactions: error queueing: {}", len, e); - } - } - } - } - - fn pending_transactions(&self) -> Vec { - self.miner.pending_transactions() - } -} - -impl MiningBlockChainClient for Client { - fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock { - let engine = self.engine.deref().deref(); - let h = self.chain.best_block_hash(); - - let mut open_block = OpenBlock::new( - engine, - &self.vm_factory, - self.trie_factory.clone(), - false, // TODO: this will need to be parameterised once we want to do immediate mining insertion. - self.state_db.lock().unwrap().boxed_clone(), - &self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"), - self.build_last_hashes(h.clone()), - author, - gas_range_target, - extra_data, - ).expect("OpenBlock::new only fails if parent state root invalid; state root of best block's header is never invalid; qed"); - - // Add uncles - self.chain - .find_uncle_headers(&h, engine.maximum_uncle_age()) - .unwrap() - .into_iter() - .take(engine.maximum_uncle_count()) - .foreach(|h| { - open_block.push_uncle(h).unwrap(); - }); - - open_block - } - - fn vm_factory(&self) -> &EvmFactory { - &self.vm_factory - } - - fn import_sealed_block(&self, block: SealedBlock) -> ImportResult { - let _import_lock = self.import_lock.lock(); - let _timer = PerfTimer::new("import_sealed_block"); - - let original_best = self.chain_info().best_block_hash; - - let h = block.header().hash(); - let number = block.header().number(); - - let block_data = block.rlp_bytes(); - let route = self.commit_block(block, &h, &block_data); - trace!(target: "client", "Imported sealed block #{} ({})", number, h); - - { - let (enacted, retracted) = self.calculate_enacted_retracted(&[route]); - self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted); - - self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { - imported: vec![h.clone()], - invalid: vec![], - enacted: enacted, - retracted: retracted, - sealed: vec![h.clone()], - })).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e)); - } - - if self.chain_info().best_block_hash != original_best { - self.miner.update_sealing(self); - } - - Ok(h) - } -} - -impl MayPanic for Client { - fn on_panic(&self, closure: F) where F: OnPanicListener { - self.panic_handler.on_panic(closure); - } -} - -impl IpcConfig for Client { } diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index ac8cbe5f2..80b7568f6 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -16,7 +16,6 @@ //! Blockchain database client. -mod client; mod config; mod error; mod test_client; @@ -51,9 +50,16 @@ pub use types::call_analytics::CallAnalytics; pub use block_import_error::BlockImportError; pub use transaction_import::{TransactionImportResult, TransactionImportError}; +mod client { + //! Blockchain database client. + + #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues + include!(concat!(env!("OUT_DIR"), "/client.ipc.rs")); +} + /// Blockchain database client. Owns and manages a blockchain and a block queue. pub trait BlockChainClient : Sync + Send { - + /// Should be called by any external-facing interface when actively using the client. /// To minimise chatter, there's no need to call more than once every 30s. fn keep_alive(&self) {} From 5dd5983568f5bd69dd995555367cc95f50cc3bc0 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 6 Jul 2016 20:25:42 +0300 Subject: [PATCH 32/36] paste reformat mess fix --- ethcore/src/client/client.rs | 93 ++++++++++++++++++------------------ 1 file changed, 46 insertions(+), 47 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 0426ec9b3..17b93723b 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -712,7 +712,7 @@ impl BlockChainClient for Client { fn transaction_receipt(&self, id: TransactionID) -> Option { self.transaction_address(id).and_then(|address| { let t = self.chain.block(&address.block_hash) - .and_then(|block| BlockView::new(&block).localized_transaction_at(address.index)); + .and_then(|block| BlockView::new(&block).localized_transaction_at(address.index)); match (t, self.chain.transaction_receipt(&address)) { (Some(tx), Some(receipt)) => { @@ -815,42 +815,41 @@ impl BlockChainClient for Client { // TODO: lock blockchain only once let mut blocks = filter.bloom_possibilities().iter() - .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) - .flat_map(|m| m) - // remove duplicate elements - .collect::>() - .into_iter() - .collect::>(); + .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) + .flat_map(|m| m) + // remove duplicate elements + .collect::>() + .into_iter() + .collect::>(); blocks.sort(); blocks.into_iter() - .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash))) - .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) - .filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) - .flat_map(|(number, hash, receipts, hashes)| { - let mut log_index = 0; - receipts.into_iter() - .enumerate() - .flat_map(|(index, receipt)| { - log_index += receipt.logs.len(); - receipt.logs.into_iter() - .enumerate() - .filter(|tuple| filter.matches(&tuple.1)) - .map(|(i, log)| LocalizedLogEntry { - entry: log, - block_hash: hash.clone(), - block_number: number, - transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::new), - transaction_index: index, - log_index: log_index + i - }) + .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash))) + .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) + .filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) + .flat_map(|(number, hash, receipts, hashes)| { + let mut log_index = 0; + receipts.into_iter() + .enumerate() + .flat_map(|(index, receipt)| { + log_index += receipt.logs.len(); + receipt.logs.into_iter() + .enumerate() + .filter(|tuple| filter.matches(&tuple.1)) + .map(|(i, log)| LocalizedLogEntry { + entry: log, + block_hash: hash.clone(), + block_number: number, + transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::new), + transaction_index: index, + log_index: log_index + i + }) + .collect::>() + }) .collect::>() }) - .collect::>() - - }) - .collect() + .collect() } fn filter_traces(&self, filter: TraceFilter) -> Option> { @@ -874,23 +873,23 @@ impl BlockChainClient for Client { fn trace(&self, trace: TraceId) -> Option { let trace_address = trace.address; self.transaction_address(trace.transaction) - .and_then(|tx_address| { - self.block_number(BlockID::Hash(tx_address.block_hash)) - .and_then(|number| self.tracedb.trace(number, tx_address.index, trace_address)) - }) + .and_then(|tx_address| { + self.block_number(BlockID::Hash(tx_address.block_hash)) + .and_then(|number| self.tracedb.trace(number, tx_address.index, trace_address)) + }) } fn transaction_traces(&self, transaction: TransactionID) -> Option> { self.transaction_address(transaction) - .and_then(|tx_address| { - self.block_number(BlockID::Hash(tx_address.block_hash)) - .and_then(|number| self.tracedb.transaction_traces(number, tx_address.index)) - }) + .and_then(|tx_address| { + self.block_number(BlockID::Hash(tx_address.block_hash)) + .and_then(|number| self.tracedb.transaction_traces(number, tx_address.index)) + }) } fn block_traces(&self, block: BlockID) -> Option> { self.block_number(block) - .and_then(|number| self.tracedb.block_traces(number)) + .and_then(|number| self.tracedb.block_traces(number)) } fn last_hashes(&self) -> LastHashes { @@ -938,13 +937,13 @@ impl MiningBlockChainClient for Client { // Add uncles self.chain - .find_uncle_headers(&h, engine.maximum_uncle_age()) - .unwrap() - .into_iter() - .take(engine.maximum_uncle_count()) - .foreach(|h| { - open_block.push_uncle(h).unwrap(); - }); + .find_uncle_headers(&h, engine.maximum_uncle_age()) + .unwrap() + .into_iter() + .take(engine.maximum_uncle_count()) + .foreach(|h| { + open_block.push_uncle(h).unwrap(); + }); open_block } From b1ca41dea89319cc7be9ddecdb6aeee1e4f4b3e9 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 6 Jul 2016 20:31:48 +0300 Subject: [PATCH 33/36] pub mod actually --- ethcore/src/client/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index e38090185..7a24dc94b 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -51,7 +51,7 @@ pub use block_import_error::BlockImportError; pub use transaction_import::TransactionImportResult; pub use transaction_import::TransactionImportError; -mod client { +pub mod client { //! Blockchain database client. #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues From f06cff48b9e157061a4de9a27df3601753d1817b Mon Sep 17 00:00:00 2001 From: NikVolf Date: Wed, 6 Jul 2016 20:33:20 +0300 Subject: [PATCH 34/36] intendation fix --- ethcore/src/client/client.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 17b93723b..a23e47929 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -844,10 +844,10 @@ impl BlockChainClient for Client { transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::new), transaction_index: index, log_index: log_index + i - }) - .collect::>() + }) + .collect::>() }) - .collect::>() + .collect::>() }) .collect() } From 326ea592589d0a68b7526bb9291860c4242b728d Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 7 Jul 2016 11:07:27 +0300 Subject: [PATCH 35/36] enum fix & block query test --- ipc/codegen/src/serialization.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/ipc/codegen/src/serialization.rs b/ipc/codegen/src/serialization.rs index 9c58e198e..0ab70f93a 100644 --- a/ipc/codegen/src/serialization.rs +++ b/ipc/codegen/src/serialization.rs @@ -707,7 +707,12 @@ fn binary_expr_variant( let buffer = &mut buffer[1..]; $write_expr }), - read: quote_arm!(cx, $variant_index_ident => { $read_expr } ), + read: quote_arm!(cx, + $variant_index_ident => { + let buffer = &buffer[1..]; + $read_expr + } + ), }) }, ast::VariantData::Struct(ref fields, _) => { @@ -742,7 +747,12 @@ fn binary_expr_variant( let buffer = &mut buffer[1..]; $write_expr }), - read: quote_arm!(cx, $variant_index_ident => { $read_expr } ), + read: quote_arm!(cx, + $variant_index_ident => { + let buffer = &buffer[1..]; + $read_expr + } + ), }) }, } From 3c046556b5e54c89919acaeb0f345f3f85c39213 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 7 Jul 2016 11:14:37 +0300 Subject: [PATCH 36/36] fix test url --- ethcore/src/tests/rpc.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/src/tests/rpc.rs b/ethcore/src/tests/rpc.rs index 123f70150..786389905 100644 --- a/ethcore/src/tests/rpc.rs +++ b/ethcore/src/tests/rpc.rs @@ -60,7 +60,7 @@ fn can_handshake() { fn can_query_block() { crossbeam::scope(|scope| { let stop_guard = StopGuard::new(); - let socket_path = "ipc:///tmp/parity-client-rpc-10.ipc"; + let socket_path = "ipc:///tmp/parity-client-rpc-20.ipc"; run_test_worker(scope, stop_guard.share(), socket_path); let remote_client = nanoipc::init_client::>(socket_path).unwrap();