Merge branch 'master' into rpc_poll_ids
This commit is contained in:
		
						commit
						03ffcd0e64
					
				
							
								
								
									
										2
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										2
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							@ -215,11 +215,13 @@ name = "ethcore-rpc"
 | 
			
		||||
version = "0.9.99"
 | 
			
		||||
dependencies = [
 | 
			
		||||
 "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)",
 | 
			
		||||
 "ethash 0.9.99",
 | 
			
		||||
 "ethcore 0.9.99",
 | 
			
		||||
 "ethcore-util 0.9.99",
 | 
			
		||||
 "ethsync 0.9.99",
 | 
			
		||||
 "jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
 | 
			
		||||
 "jsonrpc-http-server 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
 | 
			
		||||
 "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
 | 
			
		||||
 "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
 | 
			
		||||
 "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
 | 
			
		||||
 "serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
 | 
			
		||||
 | 
			
		||||
@ -172,7 +172,8 @@ fn get_data_size(block_number: u64) -> usize {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[inline]
 | 
			
		||||
fn get_seedhash(block_number: u64) -> H256 {
 | 
			
		||||
/// Given the `block_number`, determine the seed hash for Ethash.
 | 
			
		||||
pub fn get_seedhash(block_number: u64) -> H256 {
 | 
			
		||||
	let epochs = block_number / ETHASH_EPOCH_LENGTH;
 | 
			
		||||
	let mut ret: H256 = [0u8; 32];
 | 
			
		||||
	for _ in 0..epochs {
 | 
			
		||||
 | 
			
		||||
@ -24,7 +24,7 @@ mod compute;
 | 
			
		||||
 | 
			
		||||
use std::mem;
 | 
			
		||||
use compute::Light;
 | 
			
		||||
pub use compute::{quick_get_difficulty, H256, ProofOfWork, ETHASH_EPOCH_LENGTH};
 | 
			
		||||
pub use compute::{get_seedhash, quick_get_difficulty, H256, ProofOfWork, ETHASH_EPOCH_LENGTH};
 | 
			
		||||
 | 
			
		||||
use std::sync::{Arc, Mutex};
 | 
			
		||||
 | 
			
		||||
@ -35,7 +35,7 @@ struct LightCache {
 | 
			
		||||
	prev: Option<Arc<Light>>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Lighy/Full cache manager
 | 
			
		||||
/// Light/Full cache manager.
 | 
			
		||||
pub struct EthashManager {
 | 
			
		||||
	cache: Mutex<LightCache>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1 +1 @@
 | 
			
		||||
Subproject commit f32954b3ddb5af2dc3dc9ec6d9a28bee848fdf70
 | 
			
		||||
Subproject commit 99afe8f5aad7bca5d0f1b1685390a4dea32d73c3
 | 
			
		||||
@ -21,7 +21,7 @@
 | 
			
		||||
use common::*;
 | 
			
		||||
use engine::*;
 | 
			
		||||
use state::*;
 | 
			
		||||
use verification::PreVerifiedBlock;
 | 
			
		||||
use verification::PreverifiedBlock;
 | 
			
		||||
 | 
			
		||||
/// A block, encoded as it is on the block chain.
 | 
			
		||||
// TODO: rename to Block
 | 
			
		||||
@ -155,9 +155,9 @@ pub struct OpenBlock<'x> {
 | 
			
		||||
/// Just like OpenBlock, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
 | 
			
		||||
/// and collected the uncles.
 | 
			
		||||
///
 | 
			
		||||
/// There is no function available to push a transaction. If you want that you'll need to `reopen()` it.
 | 
			
		||||
pub struct ClosedBlock<'x> {
 | 
			
		||||
	open_block: OpenBlock<'x>,
 | 
			
		||||
/// There is no function available to push a transaction.
 | 
			
		||||
pub struct ClosedBlock {
 | 
			
		||||
	block: ExecutedBlock,
 | 
			
		||||
	uncle_bytes: Bytes,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -178,10 +178,12 @@ impl<'x> OpenBlock<'x> {
 | 
			
		||||
			last_hashes: last_hashes,
 | 
			
		||||
		};
 | 
			
		||||
 | 
			
		||||
		r.block.base.header.set_number(parent.number() + 1);
 | 
			
		||||
		r.block.base.header.set_author(author);
 | 
			
		||||
		r.block.base.header.set_extra_data(extra_data);
 | 
			
		||||
		r.block.base.header.set_timestamp_now();
 | 
			
		||||
		r.block.base.header.parent_hash = parent.hash();
 | 
			
		||||
		r.block.base.header.number = parent.number + 1;
 | 
			
		||||
		r.block.base.header.author = author;
 | 
			
		||||
		r.block.base.header.set_timestamp_now(parent.timestamp());
 | 
			
		||||
		r.block.base.header.extra_data = extra_data;
 | 
			
		||||
		r.block.base.header.note_dirty();
 | 
			
		||||
 | 
			
		||||
		engine.populate_from_parent(&mut r.block.base.header, parent);
 | 
			
		||||
		engine.on_new_block(&mut r.block);
 | 
			
		||||
@ -259,7 +261,7 @@ impl<'x> OpenBlock<'x> {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Turn this into a `ClosedBlock`. A BlockChain must be provided in order to figure out the uncles.
 | 
			
		||||
	pub fn close(self) -> ClosedBlock<'x> {
 | 
			
		||||
	pub fn close(self) -> ClosedBlock {
 | 
			
		||||
		let mut s = self;
 | 
			
		||||
		s.engine.on_close_block(&mut s.block);
 | 
			
		||||
		s.block.base.header.transactions_root = ordered_trie_root(s.block.base.transactions.iter().map(|ref e| e.rlp_bytes().to_vec()).collect());
 | 
			
		||||
@ -271,7 +273,10 @@ impl<'x> OpenBlock<'x> {
 | 
			
		||||
		s.block.base.header.gas_used = s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used);
 | 
			
		||||
		s.block.base.header.note_dirty();
 | 
			
		||||
 | 
			
		||||
		ClosedBlock::new(s, uncle_bytes)
 | 
			
		||||
		ClosedBlock {
 | 
			
		||||
			block: s.block, 
 | 
			
		||||
			uncle_bytes: uncle_bytes,
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -279,38 +284,40 @@ impl<'x> IsBlock for OpenBlock<'x> {
 | 
			
		||||
	fn block(&self) -> &ExecutedBlock { &self.block }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<'x> IsBlock for ClosedBlock<'x> {
 | 
			
		||||
	fn block(&self) -> &ExecutedBlock { &self.open_block.block }
 | 
			
		||||
impl<'x> IsBlock for ClosedBlock {
 | 
			
		||||
	fn block(&self) -> &ExecutedBlock { &self.block }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<'x> ClosedBlock<'x> {
 | 
			
		||||
	fn new(open_block: OpenBlock<'x>, uncle_bytes: Bytes) -> Self {
 | 
			
		||||
		ClosedBlock {
 | 
			
		||||
			open_block: open_block,
 | 
			
		||||
			uncle_bytes: uncle_bytes,
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
impl ClosedBlock {
 | 
			
		||||
	/// Get the hash of the header without seal arguments.
 | 
			
		||||
	pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) }
 | 
			
		||||
 | 
			
		||||
	/// Provide a valid seal in order to turn this into a `SealedBlock`.
 | 
			
		||||
	///
 | 
			
		||||
	/// NOTE: This does not check the validity of `seal` with the engine.
 | 
			
		||||
	pub fn seal(self, seal: Vec<Bytes>) -> Result<SealedBlock, BlockError> {
 | 
			
		||||
	pub fn seal(self, engine: &Engine, seal: Vec<Bytes>) -> Result<SealedBlock, BlockError> {
 | 
			
		||||
		let mut s = self;
 | 
			
		||||
		if seal.len() != s.open_block.engine.seal_fields() {
 | 
			
		||||
			return Err(BlockError::InvalidSealArity(Mismatch{expected: s.open_block.engine.seal_fields(), found: seal.len()}));
 | 
			
		||||
		if seal.len() != engine.seal_fields() {
 | 
			
		||||
			return Err(BlockError::InvalidSealArity(Mismatch{expected: engine.seal_fields(), found: seal.len()}));
 | 
			
		||||
		}
 | 
			
		||||
		s.open_block.block.base.header.set_seal(seal);
 | 
			
		||||
		Ok(SealedBlock { block: s.open_block.block, uncle_bytes: s.uncle_bytes })
 | 
			
		||||
		s.block.base.header.set_seal(seal);
 | 
			
		||||
		Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes })
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Turn this back into an `OpenBlock`.
 | 
			
		||||
	pub fn reopen(self) -> OpenBlock<'x> { self.open_block }
 | 
			
		||||
	/// Provide a valid seal in order to turn this into a `SealedBlock`.
 | 
			
		||||
	/// This does check the validity of `seal` with the engine.
 | 
			
		||||
	/// Returns the `ClosedBlock` back again if the seal is no good.
 | 
			
		||||
	pub fn try_seal(self, engine: &Engine, seal: Vec<Bytes>) -> Result<SealedBlock, ClosedBlock> {
 | 
			
		||||
		let mut s = self;
 | 
			
		||||
		s.block.base.header.set_seal(seal);
 | 
			
		||||
		match engine.verify_block_seal(&s.block.base.header) {
 | 
			
		||||
			Err(_) => Err(s),
 | 
			
		||||
			_ => Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes }),
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Drop this object and return the underlieing database.
 | 
			
		||||
	pub fn drain(self) -> JournalDB { self.open_block.block.state.drop().1 }
 | 
			
		||||
	pub fn drain(self) -> JournalDB { self.block.state.drop().1 }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl SealedBlock {
 | 
			
		||||
@ -332,7 +339,7 @@ impl IsBlock for SealedBlock {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Enact the block given by block header, transactions and uncles
 | 
			
		||||
pub fn enact<'x>(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock<'x>, Error> {
 | 
			
		||||
pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock, Error> {
 | 
			
		||||
	{
 | 
			
		||||
		if ::log::max_log_level() >= ::log::LogLevel::Trace {
 | 
			
		||||
			let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce());
 | 
			
		||||
@ -350,14 +357,14 @@ pub fn enact<'x>(header: &Header, transactions: &[SignedTransaction], uncles: &[
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
 | 
			
		||||
pub fn enact_bytes<'x>(block_bytes: &[u8], engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock<'x>, Error> {
 | 
			
		||||
pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock, Error> {
 | 
			
		||||
	let block = BlockView::new(block_bytes);
 | 
			
		||||
	let header = block.header();
 | 
			
		||||
	enact(&header, &block.transactions(), &block.uncles(), engine, db, parent, last_hashes)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
 | 
			
		||||
pub fn enact_verified<'x>(block: &PreVerifiedBlock, engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock<'x>, Error> {
 | 
			
		||||
pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock, Error> {
 | 
			
		||||
	let view = BlockView::new(&block.bytes);
 | 
			
		||||
	enact(&block.header, &block.transactions, &view.uncles(), engine, db, parent, last_hashes)
 | 
			
		||||
}
 | 
			
		||||
@ -365,7 +372,7 @@ pub fn enact_verified<'x>(block: &PreVerifiedBlock, engine: &'x Engine, db: Jour
 | 
			
		||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
 | 
			
		||||
pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<SealedBlock, Error> {
 | 
			
		||||
	let header = BlockView::new(block_bytes).header_view();
 | 
			
		||||
	Ok(try!(try!(enact_bytes(block_bytes, engine, db, parent, last_hashes)).seal(header.seal())))
 | 
			
		||||
	Ok(try!(try!(enact_bytes(block_bytes, engine, db, parent, last_hashes)).seal(engine, header.seal())))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[cfg(test)]
 | 
			
		||||
@ -386,7 +393,7 @@ mod tests {
 | 
			
		||||
		let last_hashes = vec![genesis_header.hash()];
 | 
			
		||||
		let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]);
 | 
			
		||||
		let b = b.close();
 | 
			
		||||
		let _ = b.seal(vec![]);
 | 
			
		||||
		let _ = b.seal(engine.deref(), vec![]);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	#[test]
 | 
			
		||||
@ -398,7 +405,7 @@ mod tests {
 | 
			
		||||
		let mut db_result = get_temp_journal_db();
 | 
			
		||||
		let mut db = db_result.take();
 | 
			
		||||
		engine.spec().ensure_db_good(&mut db);
 | 
			
		||||
		let b = OpenBlock::new(engine.deref(), db, &genesis_header, vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(vec![]).unwrap();
 | 
			
		||||
		let b = OpenBlock::new(engine.deref(), db, &genesis_header, vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(engine.deref(), vec![]).unwrap();
 | 
			
		||||
		let orig_bytes = b.rlp_bytes();
 | 
			
		||||
		let orig_db = b.drain();
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -28,7 +28,7 @@ use service::*;
 | 
			
		||||
use client::BlockStatus;
 | 
			
		||||
use util::panics::*;
 | 
			
		||||
 | 
			
		||||
known_heap_size!(0, UnVerifiedBlock, VerifyingBlock, PreVerifiedBlock);
 | 
			
		||||
known_heap_size!(0, UnverifiedBlock, VerifyingBlock, PreverifiedBlock);
 | 
			
		||||
 | 
			
		||||
const MIN_MEM_LIMIT: usize = 16384;
 | 
			
		||||
const MIN_QUEUE_LIMIT: usize = 512;
 | 
			
		||||
@ -105,14 +105,14 @@ pub struct BlockQueue {
 | 
			
		||||
	max_mem_use: usize,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct UnVerifiedBlock {
 | 
			
		||||
struct UnverifiedBlock {
 | 
			
		||||
	header: Header,
 | 
			
		||||
	bytes: Bytes,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct VerifyingBlock {
 | 
			
		||||
	hash: H256,
 | 
			
		||||
	block: Option<PreVerifiedBlock>,
 | 
			
		||||
	block: Option<PreverifiedBlock>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct QueueSignal {
 | 
			
		||||
@ -134,8 +134,8 @@ impl QueueSignal {
 | 
			
		||||
 | 
			
		||||
#[derive(Default)]
 | 
			
		||||
struct Verification {
 | 
			
		||||
	unverified: VecDeque<UnVerifiedBlock>,
 | 
			
		||||
	verified: VecDeque<PreVerifiedBlock>,
 | 
			
		||||
	unverified: VecDeque<UnverifiedBlock>,
 | 
			
		||||
	verified: VecDeque<PreverifiedBlock>,
 | 
			
		||||
	verifying: VecDeque<VerifyingBlock>,
 | 
			
		||||
	bad: HashSet<H256>,
 | 
			
		||||
}
 | 
			
		||||
@ -244,7 +244,7 @@ impl BlockQueue {
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn drain_verifying(verifying: &mut VecDeque<VerifyingBlock>, verified: &mut VecDeque<PreVerifiedBlock>, bad: &mut HashSet<H256>) {
 | 
			
		||||
	fn drain_verifying(verifying: &mut VecDeque<VerifyingBlock>, verified: &mut VecDeque<PreverifiedBlock>, bad: &mut HashSet<H256>) {
 | 
			
		||||
		while !verifying.is_empty() && verifying.front().unwrap().block.is_some() {
 | 
			
		||||
			let block = verifying.pop_front().unwrap().block.unwrap();
 | 
			
		||||
			if bad.contains(&block.header.parent_hash) {
 | 
			
		||||
@ -289,31 +289,31 @@ impl BlockQueue {
 | 
			
		||||
		let header = BlockView::new(&bytes).header();
 | 
			
		||||
		let h = header.hash();
 | 
			
		||||
		if self.processing.read().unwrap().contains(&h) {
 | 
			
		||||
			return Err(ImportError::AlreadyQueued);
 | 
			
		||||
			return Err(x!(ImportError::AlreadyQueued));
 | 
			
		||||
		}
 | 
			
		||||
		{
 | 
			
		||||
			let mut verification = self.verification.lock().unwrap();
 | 
			
		||||
			if verification.bad.contains(&h) {
 | 
			
		||||
				return Err(ImportError::Bad(None));
 | 
			
		||||
				return Err(x!(ImportError::KnownBad));
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if verification.bad.contains(&header.parent_hash) {
 | 
			
		||||
				verification.bad.insert(h.clone());
 | 
			
		||||
				return Err(ImportError::Bad(None));
 | 
			
		||||
				return Err(x!(ImportError::KnownBad));
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		match verify_block_basic(&header, &bytes, self.engine.deref().deref()) {
 | 
			
		||||
			Ok(()) => {
 | 
			
		||||
				self.processing.write().unwrap().insert(h.clone());
 | 
			
		||||
				self.verification.lock().unwrap().unverified.push_back(UnVerifiedBlock { header: header, bytes: bytes });
 | 
			
		||||
				self.verification.lock().unwrap().unverified.push_back(UnverifiedBlock { header: header, bytes: bytes });
 | 
			
		||||
				self.more_to_verify.notify_all();
 | 
			
		||||
				Ok(h)
 | 
			
		||||
			},
 | 
			
		||||
			Err(err) => {
 | 
			
		||||
				warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err);
 | 
			
		||||
				self.verification.lock().unwrap().bad.insert(h.clone());
 | 
			
		||||
				Err(From::from(err))
 | 
			
		||||
				Err(err)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -352,7 +352,7 @@ impl BlockQueue {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Removes up to `max` verified blocks from the queue
 | 
			
		||||
	pub fn drain(&mut self, max: usize) -> Vec<PreVerifiedBlock> {
 | 
			
		||||
	pub fn drain(&mut self, max: usize) -> Vec<PreverifiedBlock> {
 | 
			
		||||
		let mut verification = self.verification.lock().unwrap();
 | 
			
		||||
		let count = min(max, verification.verified.len());
 | 
			
		||||
		let mut result = Vec::with_capacity(count);
 | 
			
		||||
@ -455,7 +455,7 @@ mod tests {
 | 
			
		||||
		match duplicate_import {
 | 
			
		||||
			Err(e) => {
 | 
			
		||||
				match e {
 | 
			
		||||
					ImportError::AlreadyQueued => {},
 | 
			
		||||
					Error::Import(ImportError::AlreadyQueued) => {},
 | 
			
		||||
					_ => { panic!("must return AlreadyQueued error"); }
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
@ -473,6 +473,12 @@ impl BlockChain {
 | 
			
		||||
		self.extras_db.write(batch).unwrap();
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Given a block's `parent`, find every block header which represents a valid uncle.
 | 
			
		||||
	pub fn find_uncle_headers(&self, _parent: &H256) -> Vec<Header> {
 | 
			
		||||
		// TODO
 | 
			
		||||
		Vec::new()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Get inserted block info which is critical to preapre extras updates.
 | 
			
		||||
	fn block_info(&self, block_bytes: &[u8]) -> BlockInfo {
 | 
			
		||||
		let block = BlockView::new(block_bytes);
 | 
			
		||||
 | 
			
		||||
@ -15,7 +15,7 @@
 | 
			
		||||
// along with Parity.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
//! Multilevel blockchain bloom filter.
 | 
			
		||||
//! 
 | 
			
		||||
//!
 | 
			
		||||
//! ```not_run
 | 
			
		||||
//! extern crate ethcore_util as util;
 | 
			
		||||
//! extern crate ethcore;
 | 
			
		||||
@ -23,33 +23,33 @@
 | 
			
		||||
//! use util::sha3::*;
 | 
			
		||||
//! use util::hash::*;
 | 
			
		||||
//! use ethcore::chainfilter::*;
 | 
			
		||||
//! 
 | 
			
		||||
//!
 | 
			
		||||
//! fn main() {
 | 
			
		||||
//!		let (index_size, bloom_levels) = (16, 3);
 | 
			
		||||
//!		let mut cache = MemoryCache::new();
 | 
			
		||||
//!		
 | 
			
		||||
//!
 | 
			
		||||
//!		let address = Address::from_str("ef2d6d194084c2de36e0dabfce45d046b37d1106").unwrap();
 | 
			
		||||
//!		
 | 
			
		||||
//!
 | 
			
		||||
//!		// borrow cache for reading inside the scope
 | 
			
		||||
//!		let modified_blooms = {
 | 
			
		||||
//!			let filter = ChainFilter::new(&cache, index_size, bloom_levels);	
 | 
			
		||||
//!			let filter = ChainFilter::new(&cache, index_size, bloom_levels);
 | 
			
		||||
//!			let block_number = 39;
 | 
			
		||||
//!			let mut bloom = H2048::new();
 | 
			
		||||
//!			bloom.shift_bloomed(&address.sha3());
 | 
			
		||||
//!			filter.add_bloom(&bloom, block_number)
 | 
			
		||||
//!		};
 | 
			
		||||
//!		
 | 
			
		||||
//!
 | 
			
		||||
//!		// number of updated blooms is equal number of levels
 | 
			
		||||
//!		assert_eq!(modified_blooms.len(), bloom_levels as usize);
 | 
			
		||||
//!
 | 
			
		||||
//!		// lets inserts modified blooms into the cache
 | 
			
		||||
//!		cache.insert_blooms(modified_blooms);
 | 
			
		||||
//!		
 | 
			
		||||
//!
 | 
			
		||||
//!		// borrow cache for another reading operations
 | 
			
		||||
//!		{
 | 
			
		||||
//!			let filter = ChainFilter::new(&cache, index_size, bloom_levels);	
 | 
			
		||||
//!			let filter = ChainFilter::new(&cache, index_size, bloom_levels);
 | 
			
		||||
//!			let blocks = filter.blocks_with_address(&address, 10, 40);
 | 
			
		||||
//!			assert_eq!(blocks.len(), 1);	
 | 
			
		||||
//!			assert_eq!(blocks.len(), 1);
 | 
			
		||||
//!			assert_eq!(blocks[0], 39);
 | 
			
		||||
//!		}
 | 
			
		||||
//! }
 | 
			
		||||
@ -71,7 +71,7 @@ pub struct ChainFilter<'a, D>
 | 
			
		||||
impl<'a, D> ChainFilter<'a, D> where D: FilterDataSource
 | 
			
		||||
{
 | 
			
		||||
	/// Creates new filter instance.
 | 
			
		||||
	/// 
 | 
			
		||||
	///
 | 
			
		||||
	/// Borrows `FilterDataSource` for reading.
 | 
			
		||||
	pub fn new(data_source: &'a D, index_size: usize, levels: u8) -> Self {
 | 
			
		||||
		ChainFilter {
 | 
			
		||||
@ -88,7 +88,7 @@ impl<'a, D> ChainFilter<'a, D> where D: FilterDataSource
 | 
			
		||||
			None => return None,
 | 
			
		||||
			Some(level_bloom) => match level {
 | 
			
		||||
				// if we are on the lowest level
 | 
			
		||||
				0 => return match offset < to_block {
 | 
			
		||||
				0 => return match offset <= to_block {
 | 
			
		||||
					// take the value if its smaller than to_block
 | 
			
		||||
					true if level_bloom.contains(bloom) => Some(vec![offset]),
 | 
			
		||||
					// return None if it is is equal to to_block
 | 
			
		||||
@ -153,7 +153,7 @@ impl<'a, D> ChainFilter<'a, D> where D: FilterDataSource
 | 
			
		||||
			for i in 0..blooms.len() {
 | 
			
		||||
 | 
			
		||||
				let index = self.indexer.bloom_index(block_number + i, level);
 | 
			
		||||
				let new_bloom = {	
 | 
			
		||||
				let new_bloom = {
 | 
			
		||||
					// use new blooms before db blooms where necessary
 | 
			
		||||
					let bloom_at = | index | { result.get(&index).cloned().or_else(|| self.data_source.bloom_at_index(&index)) };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -22,7 +22,7 @@ use util::sha3::*;
 | 
			
		||||
use chainfilter::{BloomIndex, FilterDataSource, ChainFilter};
 | 
			
		||||
 | 
			
		||||
/// In memory cache for blooms.
 | 
			
		||||
/// 
 | 
			
		||||
///
 | 
			
		||||
/// Stores all blooms in HashMap, which indexes them by `BloomIndex`.
 | 
			
		||||
pub struct MemoryCache {
 | 
			
		||||
	blooms: HashMap<BloomIndex, H2048>,
 | 
			
		||||
@ -35,7 +35,7 @@ impl MemoryCache {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// inserts all blooms into cache
 | 
			
		||||
	/// 
 | 
			
		||||
	///
 | 
			
		||||
	/// if bloom at given index already exists, overwrites it
 | 
			
		||||
	pub fn insert_blooms(&mut self, blooms: HashMap<BloomIndex, H2048>) {
 | 
			
		||||
		self.blooms.extend(blooms);
 | 
			
		||||
@ -81,13 +81,13 @@ fn test_topic_basic_search() {
 | 
			
		||||
 | 
			
		||||
	{
 | 
			
		||||
		let filter = ChainFilter::new(&cache, index_size, bloom_levels);
 | 
			
		||||
		let blocks = filter.blocks_with_bloom(&to_bloom(&topic), 0, 23);
 | 
			
		||||
		let blocks = filter.blocks_with_bloom(&to_bloom(&topic), 0, 22);
 | 
			
		||||
		assert_eq!(blocks.len(), 0);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	{
 | 
			
		||||
		let filter = ChainFilter::new(&cache, index_size, bloom_levels);
 | 
			
		||||
		let blocks = filter.blocks_with_bloom(&to_bloom(&topic), 23, 24);
 | 
			
		||||
		let blocks = filter.blocks_with_bloom(&to_bloom(&topic), 23, 23);
 | 
			
		||||
		assert_eq!(blocks.len(), 1);
 | 
			
		||||
		assert_eq!(blocks[0], 23);
 | 
			
		||||
	}
 | 
			
		||||
@ -144,7 +144,7 @@ fn test_reset_chain_head_simple() {
 | 
			
		||||
 | 
			
		||||
	cache.insert_blooms(modified_blooms_3);
 | 
			
		||||
 | 
			
		||||
	
 | 
			
		||||
 | 
			
		||||
	let reset_modified_blooms = {
 | 
			
		||||
		let filter = ChainFilter::new(&cache, index_size, bloom_levels);
 | 
			
		||||
		filter.reset_chain_head(&[to_bloom(&topic_4), to_bloom(&topic_5)], 15, 17)
 | 
			
		||||
@ -183,7 +183,7 @@ fn for_each_bloom<F>(bytes: &[u8], mut f: F) where F: FnMut(usize, &H2048) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn for_each_log<F>(bytes: &[u8], mut f: F) where F: FnMut(usize, &Address, &[H256]) {
 | 
			
		||||
	let mut reader = BufReader::new(bytes);	
 | 
			
		||||
	let mut reader = BufReader::new(bytes);
 | 
			
		||||
	let mut line = String::new();
 | 
			
		||||
	while reader.read_line(&mut line).unwrap() > 0 {
 | 
			
		||||
		{
 | 
			
		||||
@ -235,11 +235,11 @@ fn test_chainfilter_real_data_short_searches() {
 | 
			
		||||
	for_each_log(include_bytes!("logs.txt"), | block_number, address, topics | {
 | 
			
		||||
		println!("block_number: {:?}", block_number);
 | 
			
		||||
		let filter = ChainFilter::new(&cache, index_size, bloom_levels);
 | 
			
		||||
		let blocks = filter.blocks_with_bloom(&to_bloom(address), block_number, block_number + 1);
 | 
			
		||||
		let blocks = filter.blocks_with_bloom(&to_bloom(address), block_number, block_number);
 | 
			
		||||
		assert_eq!(blocks.len(), 1);
 | 
			
		||||
		for (i, topic) in topics.iter().enumerate() {
 | 
			
		||||
			println!("topic: {:?}", i);
 | 
			
		||||
			let blocks = filter.blocks_with_bloom(&to_bloom(topic), block_number, block_number + 1);
 | 
			
		||||
			let blocks = filter.blocks_with_bloom(&to_bloom(topic), block_number, block_number);
 | 
			
		||||
			assert_eq!(blocks.len(), 1);
 | 
			
		||||
		}
 | 
			
		||||
	});
 | 
			
		||||
 | 
			
		||||
@ -21,7 +21,7 @@ use util::panics::*;
 | 
			
		||||
use blockchain::{BlockChain, BlockProvider};
 | 
			
		||||
use views::BlockView;
 | 
			
		||||
use error::*;
 | 
			
		||||
use header::{BlockNumber, Header};
 | 
			
		||||
use header::{BlockNumber};
 | 
			
		||||
use state::State;
 | 
			
		||||
use spec::Spec;
 | 
			
		||||
use engine::Engine;
 | 
			
		||||
@ -179,7 +179,7 @@ pub struct ClientReport {
 | 
			
		||||
 | 
			
		||||
impl ClientReport {
 | 
			
		||||
	/// Alter internal reporting to reflect the additional `block` has been processed.
 | 
			
		||||
	pub fn accrue_block(&mut self, block: &PreVerifiedBlock) {
 | 
			
		||||
	pub fn accrue_block(&mut self, block: &PreverifiedBlock) {
 | 
			
		||||
		self.blocks_imported += 1;
 | 
			
		||||
		self.transactions_applied += block.transactions.len();
 | 
			
		||||
		self.gas_processed = self.gas_processed + block.header.gas_used;
 | 
			
		||||
@ -196,6 +196,11 @@ pub struct Client {
 | 
			
		||||
	report: RwLock<ClientReport>,
 | 
			
		||||
	import_lock: Mutex<()>,
 | 
			
		||||
	panic_handler: Arc<PanicHandler>,
 | 
			
		||||
 | 
			
		||||
	// for sealing...
 | 
			
		||||
	sealing_block: Mutex<Option<ClosedBlock>>,
 | 
			
		||||
	author: RwLock<Address>,
 | 
			
		||||
	extra_data: RwLock<Bytes>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const HISTORY: u64 = 1000;
 | 
			
		||||
@ -231,7 +236,10 @@ impl Client {
 | 
			
		||||
			block_queue: RwLock::new(block_queue),
 | 
			
		||||
			report: RwLock::new(Default::default()),
 | 
			
		||||
			import_lock: Mutex::new(()),
 | 
			
		||||
			panic_handler: panic_handler
 | 
			
		||||
			panic_handler: panic_handler,
 | 
			
		||||
			sealing_block: Mutex::new(None),
 | 
			
		||||
			author: RwLock::new(Address::new()),
 | 
			
		||||
			extra_data: RwLock::new(Vec::new()),
 | 
			
		||||
		}))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -240,10 +248,10 @@ impl Client {
 | 
			
		||||
		self.block_queue.write().unwrap().flush();
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn build_last_hashes(&self, header: &Header) -> LastHashes {
 | 
			
		||||
	fn build_last_hashes(&self, parent_hash: H256) -> LastHashes {
 | 
			
		||||
		let mut last_hashes = LastHashes::new();
 | 
			
		||||
		last_hashes.resize(256, H256::new());
 | 
			
		||||
		last_hashes[0] = header.parent_hash.clone();
 | 
			
		||||
		last_hashes[0] = parent_hash;
 | 
			
		||||
		let chain = self.chain.read().unwrap();
 | 
			
		||||
		for i in 0..255 {
 | 
			
		||||
			match chain.block_details(&last_hashes[i]) {
 | 
			
		||||
@ -256,7 +264,7 @@ impl Client {
 | 
			
		||||
		last_hashes
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn check_and_close_block(&self, block: &PreVerifiedBlock) -> Result<ClosedBlock, ()> {
 | 
			
		||||
	fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result<ClosedBlock, ()> {
 | 
			
		||||
		let engine = self.engine.deref().deref();
 | 
			
		||||
		let header = &block.header;
 | 
			
		||||
 | 
			
		||||
@ -276,7 +284,7 @@ impl Client {
 | 
			
		||||
 | 
			
		||||
		// Enact Verified Block
 | 
			
		||||
		let parent = chain_has_parent.unwrap();
 | 
			
		||||
		let last_hashes = self.build_last_hashes(header);
 | 
			
		||||
		let last_hashes = self.build_last_hashes(header.parent_hash.clone());
 | 
			
		||||
		let db = self.state_db.lock().unwrap().clone();
 | 
			
		||||
 | 
			
		||||
		let enact_result = enact_verified(&block, engine, db, &parent, last_hashes);
 | 
			
		||||
@ -305,6 +313,8 @@ impl Client {
 | 
			
		||||
		let _import_lock = self.import_lock.lock();
 | 
			
		||||
		let blocks = self.block_queue.write().unwrap().drain(max_blocks_to_import);
 | 
			
		||||
 | 
			
		||||
		let original_best = self.chain_info().best_block_hash;
 | 
			
		||||
 | 
			
		||||
		for block in blocks {
 | 
			
		||||
			let header = &block.header;
 | 
			
		||||
 | 
			
		||||
@ -360,6 +370,10 @@ impl Client {
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if self.chain_info().best_block_hash != original_best {
 | 
			
		||||
			self.prepare_sealing();
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		imported
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -406,8 +420,82 @@ impl Client {
 | 
			
		||||
			BlockId::Latest => Some(self.chain.read().unwrap().best_block_number())
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Get the author that we will seal blocks as.
 | 
			
		||||
	pub fn author(&self) -> Address {
 | 
			
		||||
		self.author.read().unwrap().clone()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Set the author that we will seal blocks as.
 | 
			
		||||
	pub fn set_author(&self, author: Address) {
 | 
			
		||||
		*self.author.write().unwrap() = author;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Get the extra_data that we will seal blocks wuth.
 | 
			
		||||
	pub fn extra_data(&self) -> Bytes {
 | 
			
		||||
		self.extra_data.read().unwrap().clone()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Set the extra_data that we will seal blocks with.
 | 
			
		||||
	pub fn set_extra_data(&self, extra_data: Bytes) {
 | 
			
		||||
		*self.extra_data.write().unwrap() = extra_data;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// New chain head event. Restart mining operation.
 | 
			
		||||
	pub fn prepare_sealing(&self) {
 | 
			
		||||
		let h = self.chain.read().unwrap().best_block_hash();
 | 
			
		||||
		let mut b = OpenBlock::new(
 | 
			
		||||
			self.engine.deref().deref(),
 | 
			
		||||
			self.state_db.lock().unwrap().clone(),
 | 
			
		||||
			match self.chain.read().unwrap().block_header(&h) { Some(ref x) => x, None => {return;} },
 | 
			
		||||
			self.build_last_hashes(h.clone()),
 | 
			
		||||
			self.author(),
 | 
			
		||||
			self.extra_data()
 | 
			
		||||
		);
 | 
			
		||||
 | 
			
		||||
		self.chain.read().unwrap().find_uncle_headers(&h).into_iter().foreach(|h| { b.push_uncle(h).unwrap(); });
 | 
			
		||||
 | 
			
		||||
		// TODO: push transactions.
 | 
			
		||||
 | 
			
		||||
		let b = b.close();
 | 
			
		||||
		trace!("Sealing: number={}, hash={}, diff={}", b.hash(), b.block().header().difficulty(), b.block().header().number());
 | 
			
		||||
		*self.sealing_block.lock().unwrap() = Some(b);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock.
 | 
			
		||||
	pub fn sealing_block(&self) -> &Mutex<Option<ClosedBlock>> {
 | 
			
		||||
		if self.sealing_block.lock().unwrap().is_none() {
 | 
			
		||||
			self.prepare_sealing();
 | 
			
		||||
		}
 | 
			
		||||
		&self.sealing_block
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Submit `seal` as a valid solution for the header of `pow_hash`.
 | 
			
		||||
	/// Will check the seal, but not actually insert the block into the chain.
 | 
			
		||||
	pub fn submit_seal(&self, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> {
 | 
			
		||||
		let mut maybe_b = self.sealing_block.lock().unwrap();
 | 
			
		||||
		match *maybe_b {
 | 
			
		||||
			Some(ref b) if b.hash() == pow_hash => {}
 | 
			
		||||
			_ => { return Err(Error::PowHashInvalid); }
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		let b = maybe_b.take();
 | 
			
		||||
		match b.unwrap().try_seal(self.engine.deref().deref(), seal) {
 | 
			
		||||
			Err(old) => {
 | 
			
		||||
				*maybe_b = Some(old);
 | 
			
		||||
				Err(Error::PowInvalid)
 | 
			
		||||
			}
 | 
			
		||||
			Ok(sealed) => {
 | 
			
		||||
				// TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice.
 | 
			
		||||
				try!(self.import_block(sealed.rlp_bytes()));
 | 
			
		||||
				Ok(())
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TODO: need MinerService MinerIoHandler
 | 
			
		||||
 | 
			
		||||
impl BlockChainClient for Client {
 | 
			
		||||
	fn block_header(&self, id: BlockId) -> Option<Bytes> {
 | 
			
		||||
		let chain = self.chain.read().unwrap();
 | 
			
		||||
@ -485,12 +573,14 @@ impl BlockChainClient for Client {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn import_block(&self, bytes: Bytes) -> ImportResult {
 | 
			
		||||
		let header = BlockView::new(&bytes).header();
 | 
			
		||||
		if self.chain.read().unwrap().is_known(&header.hash()) {
 | 
			
		||||
			return Err(ImportError::AlreadyInChain);
 | 
			
		||||
		}
 | 
			
		||||
		if self.block_status(BlockId::Hash(header.parent_hash)) == BlockStatus::Unknown {
 | 
			
		||||
			return Err(ImportError::UnknownParent);
 | 
			
		||||
		{
 | 
			
		||||
			let header = BlockView::new(&bytes).header_view();
 | 
			
		||||
			if self.chain.read().unwrap().is_known(&header.sha3()) {
 | 
			
		||||
				return Err(x!(ImportError::AlreadyInChain));
 | 
			
		||||
			}
 | 
			
		||||
			if self.block_status(BlockId::Hash(header.parent_hash())) == BlockStatus::Unknown {
 | 
			
		||||
				return Err(x!(BlockError::UnknownParent(header.parent_hash())));
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		self.block_queue.write().unwrap().import_block(bytes)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -30,8 +30,6 @@ pub trait Engine : Sync + Send {
 | 
			
		||||
 | 
			
		||||
	/// The number of additional header fields required for this engine.
 | 
			
		||||
	fn seal_fields(&self) -> usize { 0 }
 | 
			
		||||
	/// Default values of the additional fields RLP-encoded in a raw (non-list) harness.
 | 
			
		||||
	fn seal_rlp(&self) -> Bytes { vec![] }
 | 
			
		||||
 | 
			
		||||
	/// Additional engine-specific information for the user/developer concerning `header`.
 | 
			
		||||
	fn extra_info(&self, _header: &Header) -> HashMap<String, String> { HashMap::new() }
 | 
			
		||||
@ -76,9 +74,20 @@ pub trait Engine : Sync + Send {
 | 
			
		||||
	/// Verify a particular transaction is valid.
 | 
			
		||||
	fn verify_transaction(&self, _t: &SignedTransaction, _header: &Header) -> Result<(), Error> { Ok(()) }
 | 
			
		||||
 | 
			
		||||
	/// Don't forget to call Super::populateFromParent when subclassing & overriding.
 | 
			
		||||
	/// Verify the seal of a block. This is an auxilliary method that actually just calls other `verify_` methods
 | 
			
		||||
	/// to get the job done. By default it must pass `verify_basic` and `verify_block_unordered`. If more or fewer
 | 
			
		||||
	/// methods are needed for an Engine, this may be overridden.
 | 
			
		||||
	fn verify_block_seal(&self, header: &Header) -> Result<(), Error> {
 | 
			
		||||
		self.verify_block_basic(header, None).and_then(|_| self.verify_block_unordered(header, None))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Don't forget to call Super::populate_from_parent when subclassing & overriding.
 | 
			
		||||
	// TODO: consider including State in the params.
 | 
			
		||||
	fn populate_from_parent(&self, _header: &mut Header, _parent: &Header) {}
 | 
			
		||||
	fn populate_from_parent(&self, header: &mut Header, parent: &Header) {
 | 
			
		||||
		header.difficulty = parent.difficulty;
 | 
			
		||||
		header.gas_limit = parent.gas_limit;
 | 
			
		||||
		header.note_dirty();
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic
 | 
			
		||||
	// from Spec into here and removing the Spec::builtins field.
 | 
			
		||||
 | 
			
		||||
@ -131,25 +131,14 @@ pub enum BlockError {
 | 
			
		||||
#[derive(Debug)]
 | 
			
		||||
/// Import to the block queue result
 | 
			
		||||
pub enum ImportError {
 | 
			
		||||
	/// Bad block detected
 | 
			
		||||
	Bad(Option<Error>),
 | 
			
		||||
	/// Already in the block chain
 | 
			
		||||
	/// Already in the block chain.
 | 
			
		||||
	AlreadyInChain,
 | 
			
		||||
	/// Already in the block queue
 | 
			
		||||
	/// Already in the block queue.
 | 
			
		||||
	AlreadyQueued,
 | 
			
		||||
	/// Unknown parent
 | 
			
		||||
	UnknownParent,
 | 
			
		||||
	/// Already marked as bad from a previous import (could mean parent is bad).
 | 
			
		||||
	KnownBad,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<Error> for ImportError {
 | 
			
		||||
	fn from(err: Error) -> ImportError {
 | 
			
		||||
		ImportError::Bad(Some(err))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Result of import block operation.
 | 
			
		||||
pub type ImportResult = Result<H256, ImportError>;
 | 
			
		||||
 | 
			
		||||
#[derive(Debug)]
 | 
			
		||||
/// General error type which should be capable of representing all errors in ethcore.
 | 
			
		||||
pub enum Error {
 | 
			
		||||
@ -163,14 +152,29 @@ pub enum Error {
 | 
			
		||||
	Execution(ExecutionError),
 | 
			
		||||
	/// Error concerning transaction processing.
 | 
			
		||||
	Transaction(TransactionError),
 | 
			
		||||
	/// Error concerning block import.
 | 
			
		||||
	Import(ImportError),
 | 
			
		||||
	/// PoW hash is invalid or out of date.
 | 
			
		||||
	PowHashInvalid,
 | 
			
		||||
	/// The value of the nonce or mishash is invalid.
 | 
			
		||||
	PowInvalid,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Result of import block operation.
 | 
			
		||||
pub type ImportResult = Result<H256, Error>;
 | 
			
		||||
 | 
			
		||||
impl From<TransactionError> for Error {
 | 
			
		||||
	fn from(err: TransactionError) -> Error {
 | 
			
		||||
		Error::Transaction(err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<ImportError> for Error {
 | 
			
		||||
	fn from(err: ImportError) -> Error {
 | 
			
		||||
		Error::Import(err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<BlockError> for Error {
 | 
			
		||||
	fn from(err: BlockError) -> Error {
 | 
			
		||||
		Error::Block(err)
 | 
			
		||||
 | 
			
		||||
@ -74,8 +74,6 @@ impl Engine for Ethash {
 | 
			
		||||
	fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) }
 | 
			
		||||
	// Two fields - mix
 | 
			
		||||
	fn seal_fields(&self) -> usize { 2 }
 | 
			
		||||
	// Two empty data items in RLP.
 | 
			
		||||
	fn seal_rlp(&self) -> Bytes { encode(&H64::new()).to_vec() }
 | 
			
		||||
 | 
			
		||||
	/// Additional engine-specific information for the user/developer concerning `header`.
 | 
			
		||||
	fn extra_info(&self, _header: &Header) -> HashMap<String, String> { HashMap::new() }
 | 
			
		||||
@ -106,7 +104,7 @@ impl Engine for Ethash {
 | 
			
		||||
				max(gas_floor_target, gas_limit - gas_limit / bound_divisor + x!(1) + (header.gas_used * x!(6) / x!(5)) / bound_divisor)
 | 
			
		||||
			}
 | 
			
		||||
		};
 | 
			
		||||
 | 
			
		||||
		header.note_dirty();
 | 
			
		||||
//		info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number, header.difficulty, header.gas_limit);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -144,9 +142,10 @@ impl Engine for Ethash {
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		let difficulty = Ethash::boundary_to_difficulty(&Ethash::from_ethash(quick_get_difficulty(
 | 
			
		||||
				&Ethash::to_ethash(header.bare_hash()),
 | 
			
		||||
				header.nonce().low_u64(),
 | 
			
		||||
				&Ethash::to_ethash(header.mix_hash()))));
 | 
			
		||||
			&Ethash::to_ethash(header.bare_hash()),
 | 
			
		||||
			header.nonce().low_u64(),
 | 
			
		||||
			&Ethash::to_ethash(header.mix_hash())
 | 
			
		||||
		)));
 | 
			
		||||
		if difficulty < header.difficulty {
 | 
			
		||||
			return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty), max: None, found: difficulty })));
 | 
			
		||||
		}
 | 
			
		||||
@ -241,10 +240,21 @@ impl Ethash {
 | 
			
		||||
		target
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn boundary_to_difficulty(boundary: &H256) -> U256 {
 | 
			
		||||
	/// Convert an Ethash boundary to its original difficulty. Basically just `f(x) = 2^256 / x`.
 | 
			
		||||
	pub fn boundary_to_difficulty(boundary: &H256) -> U256 {
 | 
			
		||||
		U256::from((U512::one() << 256) / x!(U256::from(boundary.as_slice())))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Convert an Ethash difficulty to the target boundary. Basically just `f(x) = 2^256 / x`.
 | 
			
		||||
	pub fn difficulty_to_boundary(difficulty: &U256) -> H256 {
 | 
			
		||||
		x!(U256::from((U512::one() << 256) / x!(difficulty)))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Given the `block_number`, determine the seed hash for Ethash.
 | 
			
		||||
	pub fn get_seedhash(number: BlockNumber) -> H256 {
 | 
			
		||||
		Self::from_ethash(ethash::get_seedhash(number))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn to_ethash(hash: H256) -> EH256 {
 | 
			
		||||
		unsafe { mem::transmute(hash) }
 | 
			
		||||
	}
 | 
			
		||||
@ -255,12 +265,20 @@ impl Ethash {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Header {
 | 
			
		||||
	fn nonce(&self) -> H64 {
 | 
			
		||||
	/// Get the none field of the header.
 | 
			
		||||
	pub fn nonce(&self) -> H64 {
 | 
			
		||||
		decode(&self.seal()[1])
 | 
			
		||||
	}
 | 
			
		||||
	fn mix_hash(&self) -> H256 {
 | 
			
		||||
 | 
			
		||||
	/// Get the mix hash field of the header.
 | 
			
		||||
	pub fn mix_hash(&self) -> H256 {
 | 
			
		||||
		decode(&self.seal()[0])
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Set the nonce and mix hash fields of the header.
 | 
			
		||||
	pub fn set_nonce_and_mix_hash(&mut self, nonce: &H64, mix_hash: &H256) {
 | 
			
		||||
		self.seal = vec![encode(mix_hash).to_vec(), encode(nonce).to_vec()];
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[cfg(test)]
 | 
			
		||||
 | 
			
		||||
@ -102,10 +102,12 @@ impl Header {
 | 
			
		||||
		Self::default()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/// Get the number field of the header.
 | 
			
		||||
	pub fn number(&self) -> BlockNumber { self.number }
 | 
			
		||||
	/// Get the parent_hash field of the header.
 | 
			
		||||
	pub fn parent_hash(&self) -> &H256 { &self.parent_hash }
 | 
			
		||||
	/// Get the timestamp field of the header.
 | 
			
		||||
	pub fn timestamp(&self) -> u64 { self.timestamp }
 | 
			
		||||
	/// Get the number field of the header.
 | 
			
		||||
	pub fn number(&self) -> BlockNumber { self.number }
 | 
			
		||||
	/// Get the author field of the header.
 | 
			
		||||
	pub fn author(&self) -> &Address { &self.author }
 | 
			
		||||
 | 
			
		||||
@ -127,11 +129,13 @@ impl Header {
 | 
			
		||||
	// TODO: seal_at, set_seal_at &c.
 | 
			
		||||
 | 
			
		||||
	/// Set the number field of the header.
 | 
			
		||||
	pub fn set_number(&mut self, a: BlockNumber) { self.number = a; self.note_dirty(); }
 | 
			
		||||
	pub fn set_parent_hash(&mut self, a: H256) { self.parent_hash = a; self.note_dirty(); }
 | 
			
		||||
	/// Set the timestamp field of the header.
 | 
			
		||||
	pub fn set_timestamp(&mut self, a: u64) { self.timestamp = a; self.note_dirty(); }
 | 
			
		||||
	/// Set the timestamp field of the header to the current time.
 | 
			
		||||
	pub fn set_timestamp_now(&mut self) { self.timestamp = now_utc().to_timespec().sec as u64; self.note_dirty(); }
 | 
			
		||||
	pub fn set_timestamp_now(&mut self, but_later_than: u64) { self.timestamp = max(now_utc().to_timespec().sec as u64, but_later_than + 1); self.note_dirty(); }
 | 
			
		||||
	/// Set the number field of the header.
 | 
			
		||||
	pub fn set_number(&mut self, a: BlockNumber) { self.number = a; self.note_dirty(); }
 | 
			
		||||
	/// Set the author field of the header.
 | 
			
		||||
	pub fn set_author(&mut self, a: Address) { if a != self.author { self.author = a; self.note_dirty(); } }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -115,7 +115,7 @@ declare_test!{StateTests_stSolidityTest, "StateTests/stSolidityTest"}
 | 
			
		||||
declare_test!{StateTests_stSpecialTest, "StateTests/stSpecialTest"}
 | 
			
		||||
declare_test!{StateTests_stSystemOperationsTest, "StateTests/stSystemOperationsTest"}
 | 
			
		||||
declare_test!{StateTests_stTransactionTest, "StateTests/stTransactionTest"}
 | 
			
		||||
//declare_test!{StateTests_stTransitionTest, "StateTests/stTransitionTest"}
 | 
			
		||||
declare_test!{StateTests_stTransitionTest, "StateTests/stTransitionTest"}
 | 
			
		||||
declare_test!{StateTests_stWalletTest, "StateTests/stWalletTest"}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -15,6 +15,7 @@
 | 
			
		||||
// along with Parity.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
use client::{BlockChainClient, Client, ClientConfig, BlockId};
 | 
			
		||||
use block::IsBlock;
 | 
			
		||||
use tests::helpers::*;
 | 
			
		||||
use common::*;
 | 
			
		||||
use devtools::*;
 | 
			
		||||
@ -106,3 +107,22 @@ fn can_collect_garbage() {
 | 
			
		||||
	client.tick();
 | 
			
		||||
	assert!(client.blockchain_cache_info().blocks < 100 * 1024);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#[test]
 | 
			
		||||
fn can_mine() {
 | 
			
		||||
	let dummy_blocks = get_good_dummy_block_seq(2);
 | 
			
		||||
	let client_result = get_test_client_with_blocks(vec![dummy_blocks[0].clone()]);
 | 
			
		||||
	let client = client_result.reference();
 | 
			
		||||
	let b = client.sealing_block();
 | 
			
		||||
	let pow_hash = {
 | 
			
		||||
		let u = b.lock().unwrap();
 | 
			
		||||
		match *u {
 | 
			
		||||
			Some(ref b) => {
 | 
			
		||||
				assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().sha3());
 | 
			
		||||
				b.hash()
 | 
			
		||||
			}
 | 
			
		||||
			None => { panic!(); }
 | 
			
		||||
		}	
 | 
			
		||||
	};
 | 
			
		||||
	assert!(client.submit_seal(pow_hash, vec![]).is_ok());
 | 
			
		||||
}
 | 
			
		||||
@ -26,7 +26,7 @@ use engine::Engine;
 | 
			
		||||
use blockchain::*;
 | 
			
		||||
 | 
			
		||||
/// Preprocessed block data gathered in `verify_block_unordered` call
 | 
			
		||||
pub struct PreVerifiedBlock {
 | 
			
		||||
pub struct PreverifiedBlock {
 | 
			
		||||
	/// Populated block header
 | 
			
		||||
	pub header: Header,
 | 
			
		||||
	/// Populated block transactions
 | 
			
		||||
@ -55,8 +55,8 @@ pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Res
 | 
			
		||||
 | 
			
		||||
/// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash.
 | 
			
		||||
/// Still operates on a individual block
 | 
			
		||||
/// Returns a PreVerifiedBlock structure populated with transactions
 | 
			
		||||
pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> Result<PreVerifiedBlock, Error> {
 | 
			
		||||
/// Returns a PreverifiedBlock structure populated with transactions
 | 
			
		||||
pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> Result<PreverifiedBlock, Error> {
 | 
			
		||||
	try!(engine.verify_block_unordered(&header, Some(&bytes)));
 | 
			
		||||
	for u in Rlp::new(&bytes).at(2).iter().map(|rlp| rlp.as_val::<Header>()) {
 | 
			
		||||
		try!(engine.verify_block_unordered(&u, None));
 | 
			
		||||
@ -70,7 +70,7 @@ pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) ->
 | 
			
		||||
			transactions.push(t);
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	Ok(PreVerifiedBlock {
 | 
			
		||||
	Ok(PreverifiedBlock {
 | 
			
		||||
		header: header,
 | 
			
		||||
		transactions: transactions,
 | 
			
		||||
		bytes: bytes,
 | 
			
		||||
 | 
			
		||||
@ -85,6 +85,10 @@ Options:
 | 
			
		||||
  --jsonrpc-url URL        Specify URL for JSON-RPC API server [default: 127.0.0.1:8545].
 | 
			
		||||
  --jsonrpc-cors URL       Specify CORS header for JSON-RPC API responses [default: null].
 | 
			
		||||
 | 
			
		||||
  --author ADDRESS         Specify the block author (aka "coinbase") address for sending block rewards
 | 
			
		||||
                           from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63].
 | 
			
		||||
  --extra-data STRING      Specify a custom extra-data for authored blocks, no more than 32 characters.
 | 
			
		||||
 | 
			
		||||
  -l --logging LOGGING     Specify the logging level.
 | 
			
		||||
  -v --version             Show information about version.
 | 
			
		||||
  -h --help                Show this screen.
 | 
			
		||||
@ -114,6 +118,8 @@ struct Args {
 | 
			
		||||
	flag_jsonrpc_cors: String,
 | 
			
		||||
	flag_logging: Option<String>,
 | 
			
		||||
	flag_version: bool,
 | 
			
		||||
	flag_author: String,
 | 
			
		||||
	flag_extra_data: Option<String>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn setup_log(init: &Option<String>) {
 | 
			
		||||
@ -196,6 +202,18 @@ impl Configuration {
 | 
			
		||||
		self.args.flag_db_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn author(&self) -> Address {
 | 
			
		||||
		Address::from_str(&self.args.flag_author).unwrap_or_else(|_| die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn extra_data(&self) -> Bytes {
 | 
			
		||||
		match self.args.flag_extra_data {
 | 
			
		||||
			Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(),
 | 
			
		||||
			None => version_data(),
 | 
			
		||||
			Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); }
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn _keys_path(&self) -> String {
 | 
			
		||||
		self.args.flag_keys_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap())
 | 
			
		||||
	}
 | 
			
		||||
@ -296,6 +314,8 @@ impl Configuration {
 | 
			
		||||
		client_config.queue.max_mem_use = self.args.flag_queue_max_size;
 | 
			
		||||
		let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap();
 | 
			
		||||
		let client = service.client().clone();
 | 
			
		||||
		client.set_author(self.author());
 | 
			
		||||
		client.set_extra_data(self.extra_data());
 | 
			
		||||
 | 
			
		||||
		// Sync
 | 
			
		||||
		let sync = EthSync::register(service.network(), sync_config, client);
 | 
			
		||||
@ -354,7 +374,6 @@ impl Default for Informant {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl Informant {
 | 
			
		||||
 | 
			
		||||
	fn format_bytes(b: usize) -> String {
 | 
			
		||||
		match binary_prefix(b as f64) {
 | 
			
		||||
			Standalone(bytes)   => format!("{} bytes", bytes),
 | 
			
		||||
 | 
			
		||||
@ -9,12 +9,14 @@ build = "build.rs"
 | 
			
		||||
[lib]
 | 
			
		||||
 | 
			
		||||
[dependencies]
 | 
			
		||||
log = "0.3"
 | 
			
		||||
serde = "0.7.0"
 | 
			
		||||
serde_json = "0.7.0"
 | 
			
		||||
jsonrpc-core = "1.2"
 | 
			
		||||
jsonrpc-http-server = "2.1"
 | 
			
		||||
ethcore-util = { path = "../util" }
 | 
			
		||||
ethcore = { path = "../ethcore" }
 | 
			
		||||
ethash = { path = "../ethash" }
 | 
			
		||||
ethsync = { path = "../sync" }
 | 
			
		||||
clippy = { version = "0.0.44", optional = true }
 | 
			
		||||
rustc-serialize = "0.3"
 | 
			
		||||
 | 
			
		||||
@ -15,13 +15,18 @@
 | 
			
		||||
// along with Parity.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
//! Eth rpc implementation.
 | 
			
		||||
use std::sync::{Arc, Weak, Mutex};
 | 
			
		||||
use std::collections::HashMap;
 | 
			
		||||
use std::sync::{Arc, Weak, Mutex, RwLock};
 | 
			
		||||
use ethsync::{EthSync, SyncState};
 | 
			
		||||
use jsonrpc_core::*;
 | 
			
		||||
use util::numbers::*;
 | 
			
		||||
use util::sha3::*;
 | 
			
		||||
use util::rlp::encode;
 | 
			
		||||
use ethcore::client::*;
 | 
			
		||||
use ethcore::block::{IsBlock};
 | 
			
		||||
use ethcore::views::*;
 | 
			
		||||
//#[macro_use] extern crate log;
 | 
			
		||||
use ethcore::ethereum::Ethash;
 | 
			
		||||
use ethcore::ethereum::denominations::shannon;
 | 
			
		||||
use v1::traits::{Eth, EthFilter};
 | 
			
		||||
use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, OptionalValue, Index, Filter, Log};
 | 
			
		||||
@ -30,7 +35,8 @@ use v1::helpers::{PollFilter, PollManager};
 | 
			
		||||
/// Eth rpc implementation.
 | 
			
		||||
pub struct EthClient {
 | 
			
		||||
	client: Weak<Client>,
 | 
			
		||||
	sync: Weak<EthSync>
 | 
			
		||||
	sync: Weak<EthSync>,
 | 
			
		||||
	hashrates: RwLock<HashMap<H256, u64>>,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl EthClient {
 | 
			
		||||
@ -38,7 +44,8 @@ impl EthClient {
 | 
			
		||||
	pub fn new(client: &Arc<Client>, sync: &Arc<EthSync>) -> Self {
 | 
			
		||||
		EthClient {
 | 
			
		||||
			client: Arc::downgrade(client),
 | 
			
		||||
			sync: Arc::downgrade(sync)
 | 
			
		||||
			sync: Arc::downgrade(sync),
 | 
			
		||||
			hashrates: RwLock::new(HashMap::new()),
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -125,7 +132,7 @@ impl Eth for EthClient {
 | 
			
		||||
	// TODO: return real value of mining once it's implemented.
 | 
			
		||||
	fn is_mining(&self, params: Params) -> Result<Value, Error> {
 | 
			
		||||
		match params {
 | 
			
		||||
			Params::None => Ok(Value::Bool(false)),
 | 
			
		||||
			Params::None => to_value(&!self.hashrates.read().unwrap().is_empty()),
 | 
			
		||||
			_ => Err(Error::invalid_params())
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -133,7 +140,7 @@ impl Eth for EthClient {
 | 
			
		||||
	// TODO: return real hashrate once we have mining
 | 
			
		||||
	fn hashrate(&self, params: Params) -> Result<Value, Error> {
 | 
			
		||||
		match params {
 | 
			
		||||
			Params::None => to_value(&U256::zero()),
 | 
			
		||||
			Params::None => to_value(&self.hashrates.read().unwrap().iter().fold(0u64, |sum, (_, v)| sum + v)),
 | 
			
		||||
			_ => Err(Error::invalid_params())
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@ -209,6 +216,43 @@ impl Eth for EthClient {
 | 
			
		||||
				to_value(&logs)
 | 
			
		||||
			})
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn work(&self, params: Params) -> Result<Value, Error> {
 | 
			
		||||
		match params {
 | 
			
		||||
			Params::None => {
 | 
			
		||||
				let c = take_weak!(self.client);
 | 
			
		||||
				let u = c.sealing_block().lock().unwrap();
 | 
			
		||||
				match *u {
 | 
			
		||||
					Some(ref b) => {
 | 
			
		||||
						let pow_hash = b.hash();
 | 
			
		||||
						let target = Ethash::difficulty_to_boundary(b.block().header().difficulty());
 | 
			
		||||
						let seed_hash = Ethash::get_seedhash(b.block().header().number());
 | 
			
		||||
						to_value(&(pow_hash, seed_hash, target))
 | 
			
		||||
					}
 | 
			
		||||
					_ => Err(Error::invalid_params())
 | 
			
		||||
				}
 | 
			
		||||
			},
 | 
			
		||||
			_ => Err(Error::invalid_params())
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn submit_work(&self, params: Params) -> Result<Value, Error> {
 | 
			
		||||
		from_params::<(H64, H256, H256)>(params).and_then(|(nonce, pow_hash, mix_hash)| {
 | 
			
		||||
//			trace!("Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash);
 | 
			
		||||
			let c = take_weak!(self.client);
 | 
			
		||||
			let seal = vec![encode(&mix_hash).to_vec(), encode(&nonce).to_vec()];
 | 
			
		||||
			let r = c.submit_seal(pow_hash, seal);
 | 
			
		||||
			to_value(&r.is_ok())
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn submit_hashrate(&self, params: Params) -> Result<Value, Error> {
 | 
			
		||||
		// TODO: Index should be U256.
 | 
			
		||||
		from_params::<(Index, H256)>(params).and_then(|(rate, id)| {
 | 
			
		||||
			self.hashrates.write().unwrap().insert(id, rate.value() as u64);
 | 
			
		||||
			to_value(&true)
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Eth filter rpc implementation.
 | 
			
		||||
 | 
			
		||||
@ -31,7 +31,7 @@ impl Web3 for Web3Client {
 | 
			
		||||
	fn client_version(&self, params: Params) -> Result<Value, Error> {
 | 
			
		||||
		match params {
 | 
			
		||||
			Params::None => {
 | 
			
		||||
				Ok(Value::String(version())),
 | 
			
		||||
				Ok(Value::String(version().to_owned().replace("Parity/", "Parity//"))),
 | 
			
		||||
			}
 | 
			
		||||
			_ => Err(Error::invalid_params())
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
@ -477,19 +477,19 @@ impl ChainSync {
 | 
			
		||||
		// TODO: Decompose block and add to self.headers and self.bodies instead
 | 
			
		||||
		if header.number == From::from(self.current_base_block() + 1) {
 | 
			
		||||
			match io.chain().import_block(block_rlp.as_raw().to_vec()) {
 | 
			
		||||
				Err(ImportError::AlreadyInChain) => {
 | 
			
		||||
				Err(Error::Import(ImportError::AlreadyInChain)) => {
 | 
			
		||||
					trace!(target: "sync", "New block already in chain {:?}", h);
 | 
			
		||||
				},
 | 
			
		||||
				Err(ImportError::AlreadyQueued) => {
 | 
			
		||||
				Err(Error::Import(ImportError::AlreadyQueued)) => {
 | 
			
		||||
					trace!(target: "sync", "New block already queued {:?}", h);
 | 
			
		||||
				},
 | 
			
		||||
				Ok(_) => {
 | 
			
		||||
					self.last_imported_block = Some(header.number);
 | 
			
		||||
					trace!(target: "sync", "New block queued {:?}", h);
 | 
			
		||||
				},
 | 
			
		||||
				Err(ImportError::UnknownParent) => {
 | 
			
		||||
				Err(Error::Block(BlockError::UnknownParent(p))) => {
 | 
			
		||||
					unknown = true;
 | 
			
		||||
					trace!(target: "sync", "New block with unknown parent {:?}", h);
 | 
			
		||||
					trace!(target: "sync", "New block with unknown parent ({:?}) {:?}", p, h);
 | 
			
		||||
				},
 | 
			
		||||
				Err(e) => {
 | 
			
		||||
					debug!(target: "sync", "Bad new block {:?} : {:?}", h, e);
 | 
			
		||||
@ -781,12 +781,12 @@ impl ChainSync {
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				match io.chain().import_block(block_rlp.out()) {
 | 
			
		||||
					Err(ImportError::AlreadyInChain) => {
 | 
			
		||||
					Err(Error::Import(ImportError::AlreadyInChain)) => {
 | 
			
		||||
						trace!(target: "sync", "Block already in chain {:?}", h);
 | 
			
		||||
						self.last_imported_block = Some(headers.0 + i as BlockNumber);
 | 
			
		||||
						self.last_imported_hash = Some(h.clone());
 | 
			
		||||
					},
 | 
			
		||||
					Err(ImportError::AlreadyQueued) => {
 | 
			
		||||
					Err(Error::Import(ImportError::AlreadyQueued)) => {
 | 
			
		||||
						trace!(target: "sync", "Block already queued {:?}", h);
 | 
			
		||||
						self.last_imported_block = Some(headers.0 + i as BlockNumber);
 | 
			
		||||
						self.last_imported_hash = Some(h.clone());
 | 
			
		||||
 | 
			
		||||
@ -40,20 +40,13 @@ use std::fmt;
 | 
			
		||||
use std::cmp;
 | 
			
		||||
 | 
			
		||||
use std::mem;
 | 
			
		||||
use std::ops;
 | 
			
		||||
use std::slice;
 | 
			
		||||
use std::result;
 | 
			
		||||
use std::option;
 | 
			
		||||
use std::str::{FromStr};
 | 
			
		||||
use std::convert::From;
 | 
			
		||||
use std::hash::{Hash, Hasher};
 | 
			
		||||
use std::ops::*;
 | 
			
		||||
use std::cmp::*;
 | 
			
		||||
use std::collections::*;
 | 
			
		||||
 | 
			
		||||
use serde;
 | 
			
		||||
use rustc_serialize::json::Json;
 | 
			
		||||
use rustc_serialize::base64::FromBase64;
 | 
			
		||||
use rustc_serialize::hex::{FromHex, FromHexError, ToHex};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -1273,6 +1266,33 @@ impl From<U512> for U256 {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<'a> From<&'a U256> for U512 {
 | 
			
		||||
	fn from(value: &'a U256) -> U512 {
 | 
			
		||||
		let U256(ref arr) = *value;
 | 
			
		||||
		let mut ret = [0; 8];
 | 
			
		||||
		ret[0] = arr[0];
 | 
			
		||||
		ret[1] = arr[1];
 | 
			
		||||
		ret[2] = arr[2];
 | 
			
		||||
		ret[3] = arr[3];
 | 
			
		||||
		U512(ret)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl<'a> From<&'a U512> for U256 {
 | 
			
		||||
	fn from(value: &'a U512) -> U256 {
 | 
			
		||||
		let U512(ref arr) = *value;
 | 
			
		||||
		if arr[4] | arr[5] | arr[6] | arr[7] != 0 {
 | 
			
		||||
			panic!("Overflow");
 | 
			
		||||
		}
 | 
			
		||||
		let mut ret = [0; 4];
 | 
			
		||||
		ret[0] = arr[0];
 | 
			
		||||
		ret[1] = arr[1];
 | 
			
		||||
		ret[2] = arr[2];
 | 
			
		||||
		ret[3] = arr[3];
 | 
			
		||||
		U256(ret)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
impl From<U256> for U128 {
 | 
			
		||||
	fn from(value: U256) -> U128 {
 | 
			
		||||
		let U256(ref arr) = value;
 | 
			
		||||
 | 
			
		||||
@ -99,10 +99,20 @@ mod tests {
 | 
			
		||||
	use common::*;
 | 
			
		||||
	use keys::store::SecretStore;
 | 
			
		||||
 | 
			
		||||
	fn test_path() -> &'static str {
 | 
			
		||||
		match ::std::fs::metadata("res") {
 | 
			
		||||
			Ok(_) => "res/geth_keystore",
 | 
			
		||||
			Err(_) => "util/res/geth_keystore"
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fn test_path_param(param_val: &'static str) -> String {
 | 
			
		||||
		test_path().to_owned() + param_val
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	#[test]
 | 
			
		||||
	fn can_enumerate() {
 | 
			
		||||
		let keys = enumerate_geth_keys(Path::new("res/geth_keystore")).unwrap();
 | 
			
		||||
		let keys = enumerate_geth_keys(Path::new(test_path())).unwrap();
 | 
			
		||||
		assert_eq!(2, keys.len());
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@ -110,7 +120,7 @@ mod tests {
 | 
			
		||||
	fn can_import() {
 | 
			
		||||
		let temp = ::devtools::RandomTempPath::create_dir();
 | 
			
		||||
		let mut secret_store = SecretStore::new_in(temp.as_path());
 | 
			
		||||
		import_geth_key(&mut secret_store, Path::new("res/geth_keystore/UTC--2016-02-17T09-20-45.721400158Z--3f49624084b67849c7b4e805c5988c21a430f9d9")).unwrap();
 | 
			
		||||
		import_geth_key(&mut secret_store, Path::new(&test_path_param("/UTC--2016-02-17T09-20-45.721400158Z--3f49624084b67849c7b4e805c5988c21a430f9d9"))).unwrap();
 | 
			
		||||
		let key = secret_store.account(&Address::from_str("3f49624084b67849c7b4e805c5988c21a430f9d9").unwrap());
 | 
			
		||||
		assert!(key.is_some());
 | 
			
		||||
	}
 | 
			
		||||
@ -119,7 +129,7 @@ mod tests {
 | 
			
		||||
	fn can_import_directory() {
 | 
			
		||||
		let temp = ::devtools::RandomTempPath::create_dir();
 | 
			
		||||
		let mut secret_store = SecretStore::new_in(temp.as_path());
 | 
			
		||||
		import_geth_keys(&mut secret_store, Path::new("res/geth_keystore")).unwrap();
 | 
			
		||||
		import_geth_keys(&mut secret_store, Path::new(test_path())).unwrap();
 | 
			
		||||
 | 
			
		||||
		let key = secret_store.account(&Address::from_str("3f49624084b67849c7b4e805c5988c21a430f9d9").unwrap());
 | 
			
		||||
		assert!(key.is_some());
 | 
			
		||||
@ -134,7 +144,7 @@ mod tests {
 | 
			
		||||
		let temp = ::devtools::RandomTempPath::create_dir();
 | 
			
		||||
		{
 | 
			
		||||
			let mut secret_store = SecretStore::new_in(temp.as_path());
 | 
			
		||||
			import_geth_keys(&mut secret_store, Path::new("res/geth_keystore")).unwrap();
 | 
			
		||||
			import_geth_keys(&mut secret_store, Path::new(test_path())).unwrap();
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		let key_directory = KeyDirectory::new(&temp.as_path());
 | 
			
		||||
@ -156,7 +166,7 @@ mod tests {
 | 
			
		||||
 | 
			
		||||
		let temp = ::devtools::RandomTempPath::create_dir();
 | 
			
		||||
		let mut secret_store = SecretStore::new_in(temp.as_path());
 | 
			
		||||
		import_geth_keys(&mut secret_store, Path::new("res/geth_keystore")).unwrap();
 | 
			
		||||
		import_geth_keys(&mut secret_store, Path::new(test_path())).unwrap();
 | 
			
		||||
 | 
			
		||||
		let val = secret_store.get::<Bytes>(&H128::from_str("62a0ad73556d496a8e1c0783d30d3ace").unwrap(), "123");
 | 
			
		||||
		assert!(val.is_ok());
 | 
			
		||||
 | 
			
		||||
@ -18,6 +18,7 @@
 | 
			
		||||
 | 
			
		||||
use std::fs::File;
 | 
			
		||||
use common::*;
 | 
			
		||||
use rlp::{Stream, RlpStream};
 | 
			
		||||
use target_info::Target;
 | 
			
		||||
use rustc_version;
 | 
			
		||||
 | 
			
		||||
@ -69,5 +70,19 @@ pub fn contents(name: &str) -> Result<Bytes, UtilError> {
 | 
			
		||||
 | 
			
		||||
/// Get the standard version string for this software.
 | 
			
		||||
pub fn version() -> String {
 | 
			
		||||
	format!("Parity//{}-{}-{}/{}-{}-{}/rustc{}", env!("CARGO_PKG_VERSION"), short_sha(), commit_date().replace("-", ""), Target::arch(), Target::os(), Target::env(), rustc_version::version())
 | 
			
		||||
	format!("Parity/v{}-{}-{}/{}-{}-{}/rustc{}", env!("CARGO_PKG_VERSION"), short_sha(), commit_date().replace("-", ""), Target::arch(), Target::os(), Target::env(), rustc_version::version())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Get the standard version data for this software.
 | 
			
		||||
pub fn version_data() -> Bytes {
 | 
			
		||||
	let mut s = RlpStream::new_list(4);
 | 
			
		||||
	let v =
 | 
			
		||||
		(u32::from_str(env!("CARGO_PKG_VERSION_MAJOR")).unwrap() << 16) +
 | 
			
		||||
		(u32::from_str(env!("CARGO_PKG_VERSION_MINOR")).unwrap() << 8) +
 | 
			
		||||
		u32::from_str(env!("CARGO_PKG_VERSION_PATCH")).unwrap();
 | 
			
		||||
	s.append(&v);
 | 
			
		||||
	s.append(&"Parity");
 | 
			
		||||
	s.append(&format!("{}", rustc_version::version()));
 | 
			
		||||
	s.append(&&Target::os()[0..2]);
 | 
			
		||||
	s.out()
 | 
			
		||||
}
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user