Merge pull request #547 from ethcore/mining

Mining
This commit is contained in:
Gav Wood 2016-03-02 13:00:22 +01:00
commit 162300a4a6
22 changed files with 382 additions and 115 deletions

2
Cargo.lock generated
View File

@ -215,11 +215,13 @@ name = "ethcore-rpc"
version = "0.9.99" version = "0.9.99"
dependencies = [ dependencies = [
"clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 0.9.99",
"ethcore 0.9.99", "ethcore 0.9.99",
"ethcore-util 0.9.99", "ethcore-util 0.9.99",
"ethsync 0.9.99", "ethsync 0.9.99",
"jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-http-server 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-http-server 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -172,7 +172,8 @@ fn get_data_size(block_number: u64) -> usize {
} }
#[inline] #[inline]
fn get_seedhash(block_number: u64) -> H256 { /// Given the `block_number`, determine the seed hash for Ethash.
pub fn get_seedhash(block_number: u64) -> H256 {
let epochs = block_number / ETHASH_EPOCH_LENGTH; let epochs = block_number / ETHASH_EPOCH_LENGTH;
let mut ret: H256 = [0u8; 32]; let mut ret: H256 = [0u8; 32];
for _ in 0..epochs { for _ in 0..epochs {

View File

@ -24,7 +24,7 @@ mod compute;
use std::mem; use std::mem;
use compute::Light; use compute::Light;
pub use compute::{quick_get_difficulty, H256, ProofOfWork, ETHASH_EPOCH_LENGTH}; pub use compute::{get_seedhash, quick_get_difficulty, H256, ProofOfWork, ETHASH_EPOCH_LENGTH};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
@ -35,7 +35,7 @@ struct LightCache {
prev: Option<Arc<Light>>, prev: Option<Arc<Light>>,
} }
/// Lighy/Full cache manager /// Light/Full cache manager.
pub struct EthashManager { pub struct EthashManager {
cache: Mutex<LightCache>, cache: Mutex<LightCache>,
} }

@ -1 +1 @@
Subproject commit f32954b3ddb5af2dc3dc9ec6d9a28bee848fdf70 Subproject commit 99afe8f5aad7bca5d0f1b1685390a4dea32d73c3

View File

@ -21,7 +21,7 @@
use common::*; use common::*;
use engine::*; use engine::*;
use state::*; use state::*;
use verification::PreVerifiedBlock; use verification::PreverifiedBlock;
/// A block, encoded as it is on the block chain. /// A block, encoded as it is on the block chain.
// TODO: rename to Block // TODO: rename to Block
@ -155,9 +155,9 @@ pub struct OpenBlock<'x> {
/// Just like OpenBlock, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields, /// Just like OpenBlock, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
/// and collected the uncles. /// and collected the uncles.
/// ///
/// There is no function available to push a transaction. If you want that you'll need to `reopen()` it. /// There is no function available to push a transaction.
pub struct ClosedBlock<'x> { pub struct ClosedBlock {
open_block: OpenBlock<'x>, block: ExecutedBlock,
uncle_bytes: Bytes, uncle_bytes: Bytes,
} }
@ -178,10 +178,12 @@ impl<'x> OpenBlock<'x> {
last_hashes: last_hashes, last_hashes: last_hashes,
}; };
r.block.base.header.set_number(parent.number() + 1); r.block.base.header.parent_hash = parent.hash();
r.block.base.header.set_author(author); r.block.base.header.number = parent.number + 1;
r.block.base.header.set_extra_data(extra_data); r.block.base.header.author = author;
r.block.base.header.set_timestamp_now(); r.block.base.header.set_timestamp_now(parent.timestamp());
r.block.base.header.extra_data = extra_data;
r.block.base.header.note_dirty();
engine.populate_from_parent(&mut r.block.base.header, parent); engine.populate_from_parent(&mut r.block.base.header, parent);
engine.on_new_block(&mut r.block); engine.on_new_block(&mut r.block);
@ -259,7 +261,7 @@ impl<'x> OpenBlock<'x> {
} }
/// Turn this into a `ClosedBlock`. A BlockChain must be provided in order to figure out the uncles. /// Turn this into a `ClosedBlock`. A BlockChain must be provided in order to figure out the uncles.
pub fn close(self) -> ClosedBlock<'x> { pub fn close(self) -> ClosedBlock {
let mut s = self; let mut s = self;
s.engine.on_close_block(&mut s.block); s.engine.on_close_block(&mut s.block);
s.block.base.header.transactions_root = ordered_trie_root(s.block.base.transactions.iter().map(|ref e| e.rlp_bytes().to_vec()).collect()); s.block.base.header.transactions_root = ordered_trie_root(s.block.base.transactions.iter().map(|ref e| e.rlp_bytes().to_vec()).collect());
@ -271,7 +273,10 @@ impl<'x> OpenBlock<'x> {
s.block.base.header.gas_used = s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used); s.block.base.header.gas_used = s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used);
s.block.base.header.note_dirty(); s.block.base.header.note_dirty();
ClosedBlock::new(s, uncle_bytes) ClosedBlock {
block: s.block,
uncle_bytes: uncle_bytes,
}
} }
} }
@ -279,38 +284,40 @@ impl<'x> IsBlock for OpenBlock<'x> {
fn block(&self) -> &ExecutedBlock { &self.block } fn block(&self) -> &ExecutedBlock { &self.block }
} }
impl<'x> IsBlock for ClosedBlock<'x> { impl<'x> IsBlock for ClosedBlock {
fn block(&self) -> &ExecutedBlock { &self.open_block.block } fn block(&self) -> &ExecutedBlock { &self.block }
} }
impl<'x> ClosedBlock<'x> { impl ClosedBlock {
fn new(open_block: OpenBlock<'x>, uncle_bytes: Bytes) -> Self {
ClosedBlock {
open_block: open_block,
uncle_bytes: uncle_bytes,
}
}
/// Get the hash of the header without seal arguments. /// Get the hash of the header without seal arguments.
pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) } pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) }
/// Provide a valid seal in order to turn this into a `SealedBlock`. /// Provide a valid seal in order to turn this into a `SealedBlock`.
/// ///
/// NOTE: This does not check the validity of `seal` with the engine. /// NOTE: This does not check the validity of `seal` with the engine.
pub fn seal(self, seal: Vec<Bytes>) -> Result<SealedBlock, BlockError> { pub fn seal(self, engine: &Engine, seal: Vec<Bytes>) -> Result<SealedBlock, BlockError> {
let mut s = self; let mut s = self;
if seal.len() != s.open_block.engine.seal_fields() { if seal.len() != engine.seal_fields() {
return Err(BlockError::InvalidSealArity(Mismatch{expected: s.open_block.engine.seal_fields(), found: seal.len()})); return Err(BlockError::InvalidSealArity(Mismatch{expected: engine.seal_fields(), found: seal.len()}));
} }
s.open_block.block.base.header.set_seal(seal); s.block.base.header.set_seal(seal);
Ok(SealedBlock { block: s.open_block.block, uncle_bytes: s.uncle_bytes }) Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes })
} }
/// Turn this back into an `OpenBlock`. /// Provide a valid seal in order to turn this into a `SealedBlock`.
pub fn reopen(self) -> OpenBlock<'x> { self.open_block } /// This does check the validity of `seal` with the engine.
/// Returns the `ClosedBlock` back again if the seal is no good.
pub fn try_seal(self, engine: &Engine, seal: Vec<Bytes>) -> Result<SealedBlock, ClosedBlock> {
let mut s = self;
s.block.base.header.set_seal(seal);
match engine.verify_block_seal(&s.block.base.header) {
Err(_) => Err(s),
_ => Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes }),
}
}
/// Drop this object and return the underlieing database. /// Drop this object and return the underlieing database.
pub fn drain(self) -> JournalDB { self.open_block.block.state.drop().1 } pub fn drain(self) -> JournalDB { self.block.state.drop().1 }
} }
impl SealedBlock { impl SealedBlock {
@ -332,7 +339,7 @@ impl IsBlock for SealedBlock {
} }
/// Enact the block given by block header, transactions and uncles /// Enact the block given by block header, transactions and uncles
pub fn enact<'x>(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock<'x>, Error> { pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock, Error> {
{ {
if ::log::max_log_level() >= ::log::LogLevel::Trace { if ::log::max_log_level() >= ::log::LogLevel::Trace {
let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce()); let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce());
@ -350,14 +357,14 @@ pub fn enact<'x>(header: &Header, transactions: &[SignedTransaction], uncles: &[
} }
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
pub fn enact_bytes<'x>(block_bytes: &[u8], engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock<'x>, Error> { pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock, Error> {
let block = BlockView::new(block_bytes); let block = BlockView::new(block_bytes);
let header = block.header(); let header = block.header();
enact(&header, &block.transactions(), &block.uncles(), engine, db, parent, last_hashes) enact(&header, &block.transactions(), &block.uncles(), engine, db, parent, last_hashes)
} }
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
pub fn enact_verified<'x>(block: &PreVerifiedBlock, engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock<'x>, Error> { pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock, Error> {
let view = BlockView::new(&block.bytes); let view = BlockView::new(&block.bytes);
enact(&block.header, &block.transactions, &view.uncles(), engine, db, parent, last_hashes) enact(&block.header, &block.transactions, &view.uncles(), engine, db, parent, last_hashes)
} }
@ -365,7 +372,7 @@ pub fn enact_verified<'x>(block: &PreVerifiedBlock, engine: &'x Engine, db: Jour
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<SealedBlock, Error> { pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<SealedBlock, Error> {
let header = BlockView::new(block_bytes).header_view(); let header = BlockView::new(block_bytes).header_view();
Ok(try!(try!(enact_bytes(block_bytes, engine, db, parent, last_hashes)).seal(header.seal()))) Ok(try!(try!(enact_bytes(block_bytes, engine, db, parent, last_hashes)).seal(engine, header.seal())))
} }
#[cfg(test)] #[cfg(test)]
@ -386,7 +393,7 @@ mod tests {
let last_hashes = vec![genesis_header.hash()]; let last_hashes = vec![genesis_header.hash()];
let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]);
let b = b.close(); let b = b.close();
let _ = b.seal(vec![]); let _ = b.seal(engine.deref(), vec![]);
} }
#[test] #[test]
@ -398,7 +405,7 @@ mod tests {
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_journal_db();
let mut db = db_result.take(); let mut db = db_result.take();
engine.spec().ensure_db_good(&mut db); engine.spec().ensure_db_good(&mut db);
let b = OpenBlock::new(engine.deref(), db, &genesis_header, vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(vec![]).unwrap(); let b = OpenBlock::new(engine.deref(), db, &genesis_header, vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(engine.deref(), vec![]).unwrap();
let orig_bytes = b.rlp_bytes(); let orig_bytes = b.rlp_bytes();
let orig_db = b.drain(); let orig_db = b.drain();

View File

@ -28,7 +28,7 @@ use service::*;
use client::BlockStatus; use client::BlockStatus;
use util::panics::*; use util::panics::*;
known_heap_size!(0, UnVerifiedBlock, VerifyingBlock, PreVerifiedBlock); known_heap_size!(0, UnverifiedBlock, VerifyingBlock, PreverifiedBlock);
const MIN_MEM_LIMIT: usize = 16384; const MIN_MEM_LIMIT: usize = 16384;
const MIN_QUEUE_LIMIT: usize = 512; const MIN_QUEUE_LIMIT: usize = 512;
@ -105,14 +105,14 @@ pub struct BlockQueue {
max_mem_use: usize, max_mem_use: usize,
} }
struct UnVerifiedBlock { struct UnverifiedBlock {
header: Header, header: Header,
bytes: Bytes, bytes: Bytes,
} }
struct VerifyingBlock { struct VerifyingBlock {
hash: H256, hash: H256,
block: Option<PreVerifiedBlock>, block: Option<PreverifiedBlock>,
} }
struct QueueSignal { struct QueueSignal {
@ -134,8 +134,8 @@ impl QueueSignal {
#[derive(Default)] #[derive(Default)]
struct Verification { struct Verification {
unverified: VecDeque<UnVerifiedBlock>, unverified: VecDeque<UnverifiedBlock>,
verified: VecDeque<PreVerifiedBlock>, verified: VecDeque<PreverifiedBlock>,
verifying: VecDeque<VerifyingBlock>, verifying: VecDeque<VerifyingBlock>,
bad: HashSet<H256>, bad: HashSet<H256>,
} }
@ -244,7 +244,7 @@ impl BlockQueue {
} }
} }
fn drain_verifying(verifying: &mut VecDeque<VerifyingBlock>, verified: &mut VecDeque<PreVerifiedBlock>, bad: &mut HashSet<H256>) { fn drain_verifying(verifying: &mut VecDeque<VerifyingBlock>, verified: &mut VecDeque<PreverifiedBlock>, bad: &mut HashSet<H256>) {
while !verifying.is_empty() && verifying.front().unwrap().block.is_some() { while !verifying.is_empty() && verifying.front().unwrap().block.is_some() {
let block = verifying.pop_front().unwrap().block.unwrap(); let block = verifying.pop_front().unwrap().block.unwrap();
if bad.contains(&block.header.parent_hash) { if bad.contains(&block.header.parent_hash) {
@ -289,31 +289,31 @@ impl BlockQueue {
let header = BlockView::new(&bytes).header(); let header = BlockView::new(&bytes).header();
let h = header.hash(); let h = header.hash();
if self.processing.read().unwrap().contains(&h) { if self.processing.read().unwrap().contains(&h) {
return Err(ImportError::AlreadyQueued); return Err(x!(ImportError::AlreadyQueued));
} }
{ {
let mut verification = self.verification.lock().unwrap(); let mut verification = self.verification.lock().unwrap();
if verification.bad.contains(&h) { if verification.bad.contains(&h) {
return Err(ImportError::Bad(None)); return Err(x!(ImportError::KnownBad));
} }
if verification.bad.contains(&header.parent_hash) { if verification.bad.contains(&header.parent_hash) {
verification.bad.insert(h.clone()); verification.bad.insert(h.clone());
return Err(ImportError::Bad(None)); return Err(x!(ImportError::KnownBad));
} }
} }
match verify_block_basic(&header, &bytes, self.engine.deref().deref()) { match verify_block_basic(&header, &bytes, self.engine.deref().deref()) {
Ok(()) => { Ok(()) => {
self.processing.write().unwrap().insert(h.clone()); self.processing.write().unwrap().insert(h.clone());
self.verification.lock().unwrap().unverified.push_back(UnVerifiedBlock { header: header, bytes: bytes }); self.verification.lock().unwrap().unverified.push_back(UnverifiedBlock { header: header, bytes: bytes });
self.more_to_verify.notify_all(); self.more_to_verify.notify_all();
Ok(h) Ok(h)
}, },
Err(err) => { Err(err) => {
warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err); warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err);
self.verification.lock().unwrap().bad.insert(h.clone()); self.verification.lock().unwrap().bad.insert(h.clone());
Err(From::from(err)) Err(err)
} }
} }
} }
@ -352,7 +352,7 @@ impl BlockQueue {
} }
/// Removes up to `max` verified blocks from the queue /// Removes up to `max` verified blocks from the queue
pub fn drain(&mut self, max: usize) -> Vec<PreVerifiedBlock> { pub fn drain(&mut self, max: usize) -> Vec<PreverifiedBlock> {
let mut verification = self.verification.lock().unwrap(); let mut verification = self.verification.lock().unwrap();
let count = min(max, verification.verified.len()); let count = min(max, verification.verified.len());
let mut result = Vec::with_capacity(count); let mut result = Vec::with_capacity(count);
@ -455,7 +455,7 @@ mod tests {
match duplicate_import { match duplicate_import {
Err(e) => { Err(e) => {
match e { match e {
ImportError::AlreadyQueued => {}, Error::Import(ImportError::AlreadyQueued) => {},
_ => { panic!("must return AlreadyQueued error"); } _ => { panic!("must return AlreadyQueued error"); }
} }
} }

View File

@ -473,6 +473,12 @@ impl BlockChain {
self.extras_db.write(batch).unwrap(); self.extras_db.write(batch).unwrap();
} }
/// Given a block's `parent`, find every block header which represents a valid uncle.
pub fn find_uncle_headers(&self, _parent: &H256) -> Vec<Header> {
// TODO
Vec::new()
}
/// Get inserted block info which is critical to preapre extras updates. /// Get inserted block info which is critical to preapre extras updates.
fn block_info(&self, block_bytes: &[u8]) -> BlockInfo { fn block_info(&self, block_bytes: &[u8]) -> BlockInfo {
let block = BlockView::new(block_bytes); let block = BlockView::new(block_bytes);

View File

@ -21,7 +21,7 @@ use util::panics::*;
use blockchain::{BlockChain, BlockProvider}; use blockchain::{BlockChain, BlockProvider};
use views::BlockView; use views::BlockView;
use error::*; use error::*;
use header::{BlockNumber, Header}; use header::{BlockNumber};
use state::State; use state::State;
use spec::Spec; use spec::Spec;
use engine::Engine; use engine::Engine;
@ -176,7 +176,7 @@ pub struct ClientReport {
impl ClientReport { impl ClientReport {
/// Alter internal reporting to reflect the additional `block` has been processed. /// Alter internal reporting to reflect the additional `block` has been processed.
pub fn accrue_block(&mut self, block: &PreVerifiedBlock) { pub fn accrue_block(&mut self, block: &PreverifiedBlock) {
self.blocks_imported += 1; self.blocks_imported += 1;
self.transactions_applied += block.transactions.len(); self.transactions_applied += block.transactions.len();
self.gas_processed = self.gas_processed + block.header.gas_used; self.gas_processed = self.gas_processed + block.header.gas_used;
@ -193,6 +193,11 @@ pub struct Client {
report: RwLock<ClientReport>, report: RwLock<ClientReport>,
import_lock: Mutex<()>, import_lock: Mutex<()>,
panic_handler: Arc<PanicHandler>, panic_handler: Arc<PanicHandler>,
// for sealing...
sealing_block: Mutex<Option<ClosedBlock>>,
author: RwLock<Address>,
extra_data: RwLock<Bytes>,
} }
const HISTORY: u64 = 1000; const HISTORY: u64 = 1000;
@ -228,7 +233,10 @@ impl Client {
block_queue: RwLock::new(block_queue), block_queue: RwLock::new(block_queue),
report: RwLock::new(Default::default()), report: RwLock::new(Default::default()),
import_lock: Mutex::new(()), import_lock: Mutex::new(()),
panic_handler: panic_handler panic_handler: panic_handler,
sealing_block: Mutex::new(None),
author: RwLock::new(Address::new()),
extra_data: RwLock::new(Vec::new()),
})) }))
} }
@ -237,10 +245,10 @@ impl Client {
self.block_queue.write().unwrap().flush(); self.block_queue.write().unwrap().flush();
} }
fn build_last_hashes(&self, header: &Header) -> LastHashes { fn build_last_hashes(&self, parent_hash: H256) -> LastHashes {
let mut last_hashes = LastHashes::new(); let mut last_hashes = LastHashes::new();
last_hashes.resize(256, H256::new()); last_hashes.resize(256, H256::new());
last_hashes[0] = header.parent_hash.clone(); last_hashes[0] = parent_hash;
let chain = self.chain.read().unwrap(); let chain = self.chain.read().unwrap();
for i in 0..255 { for i in 0..255 {
match chain.block_details(&last_hashes[i]) { match chain.block_details(&last_hashes[i]) {
@ -253,7 +261,7 @@ impl Client {
last_hashes last_hashes
} }
fn check_and_close_block(&self, block: &PreVerifiedBlock) -> Result<ClosedBlock, ()> { fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result<ClosedBlock, ()> {
let engine = self.engine.deref().deref(); let engine = self.engine.deref().deref();
let header = &block.header; let header = &block.header;
@ -273,7 +281,7 @@ impl Client {
// Enact Verified Block // Enact Verified Block
let parent = chain_has_parent.unwrap(); let parent = chain_has_parent.unwrap();
let last_hashes = self.build_last_hashes(header); let last_hashes = self.build_last_hashes(header.parent_hash.clone());
let db = self.state_db.lock().unwrap().clone(); let db = self.state_db.lock().unwrap().clone();
let enact_result = enact_verified(&block, engine, db, &parent, last_hashes); let enact_result = enact_verified(&block, engine, db, &parent, last_hashes);
@ -302,6 +310,8 @@ impl Client {
let _import_lock = self.import_lock.lock(); let _import_lock = self.import_lock.lock();
let blocks = self.block_queue.write().unwrap().drain(max_blocks_to_import); let blocks = self.block_queue.write().unwrap().drain(max_blocks_to_import);
let original_best = self.chain_info().best_block_hash;
for block in blocks { for block in blocks {
let header = &block.header; let header = &block.header;
@ -357,6 +367,10 @@ impl Client {
} }
} }
if self.chain_info().best_block_hash != original_best {
self.prepare_sealing();
}
imported imported
} }
@ -403,8 +417,82 @@ impl Client {
BlockId::Latest => Some(self.chain.read().unwrap().best_block_number()) BlockId::Latest => Some(self.chain.read().unwrap().best_block_number())
} }
} }
/// Get the author that we will seal blocks as.
pub fn author(&self) -> Address {
self.author.read().unwrap().clone()
}
/// Set the author that we will seal blocks as.
pub fn set_author(&self, author: Address) {
*self.author.write().unwrap() = author;
}
/// Get the extra_data that we will seal blocks wuth.
pub fn extra_data(&self) -> Bytes {
self.extra_data.read().unwrap().clone()
}
/// Set the extra_data that we will seal blocks with.
pub fn set_extra_data(&self, extra_data: Bytes) {
*self.extra_data.write().unwrap() = extra_data;
}
/// New chain head event. Restart mining operation.
pub fn prepare_sealing(&self) {
let h = self.chain.read().unwrap().best_block_hash();
let mut b = OpenBlock::new(
self.engine.deref().deref(),
self.state_db.lock().unwrap().clone(),
match self.chain.read().unwrap().block_header(&h) { Some(ref x) => x, None => {return;} },
self.build_last_hashes(h.clone()),
self.author(),
self.extra_data()
);
self.chain.read().unwrap().find_uncle_headers(&h).into_iter().foreach(|h| { b.push_uncle(h).unwrap(); });
// TODO: push transactions.
let b = b.close();
trace!("Sealing: number={}, hash={}, diff={}", b.hash(), b.block().header().difficulty(), b.block().header().number());
*self.sealing_block.lock().unwrap() = Some(b);
}
/// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock.
pub fn sealing_block(&self) -> &Mutex<Option<ClosedBlock>> {
if self.sealing_block.lock().unwrap().is_none() {
self.prepare_sealing();
}
&self.sealing_block
}
/// Submit `seal` as a valid solution for the header of `pow_hash`.
/// Will check the seal, but not actually insert the block into the chain.
pub fn submit_seal(&self, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> {
let mut maybe_b = self.sealing_block.lock().unwrap();
match *maybe_b {
Some(ref b) if b.hash() == pow_hash => {}
_ => { return Err(Error::PowHashInvalid); }
}
let b = maybe_b.take();
match b.unwrap().try_seal(self.engine.deref().deref(), seal) {
Err(old) => {
*maybe_b = Some(old);
Err(Error::PowInvalid)
}
Ok(sealed) => {
// TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice.
try!(self.import_block(sealed.rlp_bytes()));
Ok(())
}
}
}
} }
// TODO: need MinerService MinerIoHandler
impl BlockChainClient for Client { impl BlockChainClient for Client {
fn block_header(&self, id: BlockId) -> Option<Bytes> { fn block_header(&self, id: BlockId) -> Option<Bytes> {
let chain = self.chain.read().unwrap(); let chain = self.chain.read().unwrap();
@ -477,12 +565,14 @@ impl BlockChainClient for Client {
} }
fn import_block(&self, bytes: Bytes) -> ImportResult { fn import_block(&self, bytes: Bytes) -> ImportResult {
let header = BlockView::new(&bytes).header(); {
if self.chain.read().unwrap().is_known(&header.hash()) { let header = BlockView::new(&bytes).header_view();
return Err(ImportError::AlreadyInChain); if self.chain.read().unwrap().is_known(&header.sha3()) {
} return Err(x!(ImportError::AlreadyInChain));
if self.block_status(BlockId::Hash(header.parent_hash)) == BlockStatus::Unknown { }
return Err(ImportError::UnknownParent); if self.block_status(BlockId::Hash(header.parent_hash())) == BlockStatus::Unknown {
return Err(x!(BlockError::UnknownParent(header.parent_hash())));
}
} }
self.block_queue.write().unwrap().import_block(bytes) self.block_queue.write().unwrap().import_block(bytes)
} }

View File

@ -30,8 +30,6 @@ pub trait Engine : Sync + Send {
/// The number of additional header fields required for this engine. /// The number of additional header fields required for this engine.
fn seal_fields(&self) -> usize { 0 } fn seal_fields(&self) -> usize { 0 }
/// Default values of the additional fields RLP-encoded in a raw (non-list) harness.
fn seal_rlp(&self) -> Bytes { vec![] }
/// Additional engine-specific information for the user/developer concerning `header`. /// Additional engine-specific information for the user/developer concerning `header`.
fn extra_info(&self, _header: &Header) -> HashMap<String, String> { HashMap::new() } fn extra_info(&self, _header: &Header) -> HashMap<String, String> { HashMap::new() }
@ -76,9 +74,20 @@ pub trait Engine : Sync + Send {
/// Verify a particular transaction is valid. /// Verify a particular transaction is valid.
fn verify_transaction(&self, _t: &SignedTransaction, _header: &Header) -> Result<(), Error> { Ok(()) } fn verify_transaction(&self, _t: &SignedTransaction, _header: &Header) -> Result<(), Error> { Ok(()) }
/// Don't forget to call Super::populateFromParent when subclassing & overriding. /// Verify the seal of a block. This is an auxilliary method that actually just calls other `verify_` methods
/// to get the job done. By default it must pass `verify_basic` and `verify_block_unordered`. If more or fewer
/// methods are needed for an Engine, this may be overridden.
fn verify_block_seal(&self, header: &Header) -> Result<(), Error> {
self.verify_block_basic(header, None).and_then(|_| self.verify_block_unordered(header, None))
}
/// Don't forget to call Super::populate_from_parent when subclassing & overriding.
// TODO: consider including State in the params. // TODO: consider including State in the params.
fn populate_from_parent(&self, _header: &mut Header, _parent: &Header) {} fn populate_from_parent(&self, header: &mut Header, parent: &Header) {
header.difficulty = parent.difficulty;
header.gas_limit = parent.gas_limit;
header.note_dirty();
}
// TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic // TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic
// from Spec into here and removing the Spec::builtins field. // from Spec into here and removing the Spec::builtins field.

View File

@ -131,25 +131,14 @@ pub enum BlockError {
#[derive(Debug)] #[derive(Debug)]
/// Import to the block queue result /// Import to the block queue result
pub enum ImportError { pub enum ImportError {
/// Bad block detected /// Already in the block chain.
Bad(Option<Error>),
/// Already in the block chain
AlreadyInChain, AlreadyInChain,
/// Already in the block queue /// Already in the block queue.
AlreadyQueued, AlreadyQueued,
/// Unknown parent /// Already marked as bad from a previous import (could mean parent is bad).
UnknownParent, KnownBad,
} }
impl From<Error> for ImportError {
fn from(err: Error) -> ImportError {
ImportError::Bad(Some(err))
}
}
/// Result of import block operation.
pub type ImportResult = Result<H256, ImportError>;
#[derive(Debug)] #[derive(Debug)]
/// General error type which should be capable of representing all errors in ethcore. /// General error type which should be capable of representing all errors in ethcore.
pub enum Error { pub enum Error {
@ -163,14 +152,29 @@ pub enum Error {
Execution(ExecutionError), Execution(ExecutionError),
/// Error concerning transaction processing. /// Error concerning transaction processing.
Transaction(TransactionError), Transaction(TransactionError),
/// Error concerning block import.
Import(ImportError),
/// PoW hash is invalid or out of date.
PowHashInvalid,
/// The value of the nonce or mishash is invalid.
PowInvalid,
} }
/// Result of import block operation.
pub type ImportResult = Result<H256, Error>;
impl From<TransactionError> for Error { impl From<TransactionError> for Error {
fn from(err: TransactionError) -> Error { fn from(err: TransactionError) -> Error {
Error::Transaction(err) Error::Transaction(err)
} }
} }
impl From<ImportError> for Error {
fn from(err: ImportError) -> Error {
Error::Import(err)
}
}
impl From<BlockError> for Error { impl From<BlockError> for Error {
fn from(err: BlockError) -> Error { fn from(err: BlockError) -> Error {
Error::Block(err) Error::Block(err)

View File

@ -74,8 +74,6 @@ impl Engine for Ethash {
fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) } fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) }
// Two fields - mix // Two fields - mix
fn seal_fields(&self) -> usize { 2 } fn seal_fields(&self) -> usize { 2 }
// Two empty data items in RLP.
fn seal_rlp(&self) -> Bytes { encode(&H64::new()).to_vec() }
/// Additional engine-specific information for the user/developer concerning `header`. /// Additional engine-specific information for the user/developer concerning `header`.
fn extra_info(&self, _header: &Header) -> HashMap<String, String> { HashMap::new() } fn extra_info(&self, _header: &Header) -> HashMap<String, String> { HashMap::new() }
@ -106,7 +104,7 @@ impl Engine for Ethash {
max(gas_floor_target, gas_limit - gas_limit / bound_divisor + x!(1) + (header.gas_used * x!(6) / x!(5)) / bound_divisor) max(gas_floor_target, gas_limit - gas_limit / bound_divisor + x!(1) + (header.gas_used * x!(6) / x!(5)) / bound_divisor)
} }
}; };
header.note_dirty();
// info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number, header.difficulty, header.gas_limit); // info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number, header.difficulty, header.gas_limit);
} }
@ -144,9 +142,10 @@ impl Engine for Ethash {
} }
let difficulty = Ethash::boundary_to_difficulty(&Ethash::from_ethash(quick_get_difficulty( let difficulty = Ethash::boundary_to_difficulty(&Ethash::from_ethash(quick_get_difficulty(
&Ethash::to_ethash(header.bare_hash()), &Ethash::to_ethash(header.bare_hash()),
header.nonce().low_u64(), header.nonce().low_u64(),
&Ethash::to_ethash(header.mix_hash())))); &Ethash::to_ethash(header.mix_hash())
)));
if difficulty < header.difficulty { if difficulty < header.difficulty {
return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty), max: None, found: difficulty }))); return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty), max: None, found: difficulty })));
} }
@ -241,10 +240,21 @@ impl Ethash {
target target
} }
fn boundary_to_difficulty(boundary: &H256) -> U256 { /// Convert an Ethash boundary to its original difficulty. Basically just `f(x) = 2^256 / x`.
pub fn boundary_to_difficulty(boundary: &H256) -> U256 {
U256::from((U512::one() << 256) / x!(U256::from(boundary.as_slice()))) U256::from((U512::one() << 256) / x!(U256::from(boundary.as_slice())))
} }
/// Convert an Ethash difficulty to the target boundary. Basically just `f(x) = 2^256 / x`.
pub fn difficulty_to_boundary(difficulty: &U256) -> H256 {
x!(U256::from((U512::one() << 256) / x!(difficulty)))
}
/// Given the `block_number`, determine the seed hash for Ethash.
pub fn get_seedhash(number: BlockNumber) -> H256 {
Self::from_ethash(ethash::get_seedhash(number))
}
fn to_ethash(hash: H256) -> EH256 { fn to_ethash(hash: H256) -> EH256 {
unsafe { mem::transmute(hash) } unsafe { mem::transmute(hash) }
} }
@ -255,12 +265,20 @@ impl Ethash {
} }
impl Header { impl Header {
fn nonce(&self) -> H64 { /// Get the none field of the header.
pub fn nonce(&self) -> H64 {
decode(&self.seal()[1]) decode(&self.seal()[1])
} }
fn mix_hash(&self) -> H256 {
/// Get the mix hash field of the header.
pub fn mix_hash(&self) -> H256 {
decode(&self.seal()[0]) decode(&self.seal()[0])
} }
/// Set the nonce and mix hash fields of the header.
pub fn set_nonce_and_mix_hash(&mut self, nonce: &H64, mix_hash: &H256) {
self.seal = vec![encode(mix_hash).to_vec(), encode(nonce).to_vec()];
}
} }
#[cfg(test)] #[cfg(test)]

View File

@ -102,10 +102,12 @@ impl Header {
Self::default() Self::default()
} }
/// Get the number field of the header. /// Get the parent_hash field of the header.
pub fn number(&self) -> BlockNumber { self.number } pub fn parent_hash(&self) -> &H256 { &self.parent_hash }
/// Get the timestamp field of the header. /// Get the timestamp field of the header.
pub fn timestamp(&self) -> u64 { self.timestamp } pub fn timestamp(&self) -> u64 { self.timestamp }
/// Get the number field of the header.
pub fn number(&self) -> BlockNumber { self.number }
/// Get the author field of the header. /// Get the author field of the header.
pub fn author(&self) -> &Address { &self.author } pub fn author(&self) -> &Address { &self.author }
@ -127,11 +129,13 @@ impl Header {
// TODO: seal_at, set_seal_at &c. // TODO: seal_at, set_seal_at &c.
/// Set the number field of the header. /// Set the number field of the header.
pub fn set_number(&mut self, a: BlockNumber) { self.number = a; self.note_dirty(); } pub fn set_parent_hash(&mut self, a: H256) { self.parent_hash = a; self.note_dirty(); }
/// Set the timestamp field of the header. /// Set the timestamp field of the header.
pub fn set_timestamp(&mut self, a: u64) { self.timestamp = a; self.note_dirty(); } pub fn set_timestamp(&mut self, a: u64) { self.timestamp = a; self.note_dirty(); }
/// Set the timestamp field of the header to the current time. /// Set the timestamp field of the header to the current time.
pub fn set_timestamp_now(&mut self) { self.timestamp = now_utc().to_timespec().sec as u64; self.note_dirty(); } pub fn set_timestamp_now(&mut self, but_later_than: u64) { self.timestamp = max(now_utc().to_timespec().sec as u64, but_later_than + 1); self.note_dirty(); }
/// Set the number field of the header.
pub fn set_number(&mut self, a: BlockNumber) { self.number = a; self.note_dirty(); }
/// Set the author field of the header. /// Set the author field of the header.
pub fn set_author(&mut self, a: Address) { if a != self.author { self.author = a; self.note_dirty(); } } pub fn set_author(&mut self, a: Address) { if a != self.author { self.author = a; self.note_dirty(); } }

View File

@ -115,7 +115,7 @@ declare_test!{StateTests_stSolidityTest, "StateTests/stSolidityTest"}
declare_test!{StateTests_stSpecialTest, "StateTests/stSpecialTest"} declare_test!{StateTests_stSpecialTest, "StateTests/stSpecialTest"}
declare_test!{StateTests_stSystemOperationsTest, "StateTests/stSystemOperationsTest"} declare_test!{StateTests_stSystemOperationsTest, "StateTests/stSystemOperationsTest"}
declare_test!{StateTests_stTransactionTest, "StateTests/stTransactionTest"} declare_test!{StateTests_stTransactionTest, "StateTests/stTransactionTest"}
//declare_test!{StateTests_stTransitionTest, "StateTests/stTransitionTest"} declare_test!{StateTests_stTransitionTest, "StateTests/stTransitionTest"}
declare_test!{StateTests_stWalletTest, "StateTests/stWalletTest"} declare_test!{StateTests_stWalletTest, "StateTests/stWalletTest"}

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use client::{BlockChainClient, Client, ClientConfig, BlockId}; use client::{BlockChainClient, Client, ClientConfig, BlockId};
use block::IsBlock;
use tests::helpers::*; use tests::helpers::*;
use common::*; use common::*;
use devtools::*; use devtools::*;
@ -106,3 +107,22 @@ fn can_collect_garbage() {
client.tick(); client.tick();
assert!(client.blockchain_cache_info().blocks < 100 * 1024); assert!(client.blockchain_cache_info().blocks < 100 * 1024);
} }
#[test]
fn can_mine() {
let dummy_blocks = get_good_dummy_block_seq(2);
let client_result = get_test_client_with_blocks(vec![dummy_blocks[0].clone()]);
let client = client_result.reference();
let b = client.sealing_block();
let pow_hash = {
let u = b.lock().unwrap();
match *u {
Some(ref b) => {
assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().sha3());
b.hash()
}
None => { panic!(); }
}
};
assert!(client.submit_seal(pow_hash, vec![]).is_ok());
}

View File

@ -26,7 +26,7 @@ use engine::Engine;
use blockchain::*; use blockchain::*;
/// Preprocessed block data gathered in `verify_block_unordered` call /// Preprocessed block data gathered in `verify_block_unordered` call
pub struct PreVerifiedBlock { pub struct PreverifiedBlock {
/// Populated block header /// Populated block header
pub header: Header, pub header: Header,
/// Populated block transactions /// Populated block transactions
@ -55,8 +55,8 @@ pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Res
/// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash. /// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash.
/// Still operates on a individual block /// Still operates on a individual block
/// Returns a PreVerifiedBlock structure populated with transactions /// Returns a PreverifiedBlock structure populated with transactions
pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> Result<PreVerifiedBlock, Error> { pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> Result<PreverifiedBlock, Error> {
try!(engine.verify_block_unordered(&header, Some(&bytes))); try!(engine.verify_block_unordered(&header, Some(&bytes)));
for u in Rlp::new(&bytes).at(2).iter().map(|rlp| rlp.as_val::<Header>()) { for u in Rlp::new(&bytes).at(2).iter().map(|rlp| rlp.as_val::<Header>()) {
try!(engine.verify_block_unordered(&u, None)); try!(engine.verify_block_unordered(&u, None));
@ -70,7 +70,7 @@ pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) ->
transactions.push(t); transactions.push(t);
} }
} }
Ok(PreVerifiedBlock { Ok(PreverifiedBlock {
header: header, header: header,
transactions: transactions, transactions: transactions,
bytes: bytes, bytes: bytes,

View File

@ -85,6 +85,10 @@ Options:
--jsonrpc-url URL Specify URL for JSON-RPC API server [default: 127.0.0.1:8545]. --jsonrpc-url URL Specify URL for JSON-RPC API server [default: 127.0.0.1:8545].
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null].
--author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards
from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63].
--extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters.
-l --logging LOGGING Specify the logging level. -l --logging LOGGING Specify the logging level.
-v --version Show information about version. -v --version Show information about version.
-h --help Show this screen. -h --help Show this screen.
@ -114,6 +118,8 @@ struct Args {
flag_jsonrpc_cors: String, flag_jsonrpc_cors: String,
flag_logging: Option<String>, flag_logging: Option<String>,
flag_version: bool, flag_version: bool,
flag_author: String,
flag_extra_data: Option<String>,
} }
fn setup_log(init: &Option<String>) { fn setup_log(init: &Option<String>) {
@ -196,6 +202,18 @@ impl Configuration {
self.args.flag_db_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) self.args.flag_db_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap())
} }
fn author(&self) -> Address {
Address::from_str(&self.args.flag_author).unwrap_or_else(|_| die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author))
}
fn extra_data(&self) -> Bytes {
match self.args.flag_extra_data {
Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(),
None => version_data(),
Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); }
}
}
fn _keys_path(&self) -> String { fn _keys_path(&self) -> String {
self.args.flag_keys_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) self.args.flag_keys_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap())
} }
@ -296,6 +314,8 @@ impl Configuration {
client_config.queue.max_mem_use = self.args.flag_queue_max_size; client_config.queue.max_mem_use = self.args.flag_queue_max_size;
let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap();
let client = service.client().clone(); let client = service.client().clone();
client.set_author(self.author());
client.set_extra_data(self.extra_data());
// Sync // Sync
let sync = EthSync::register(service.network(), sync_config, client); let sync = EthSync::register(service.network(), sync_config, client);
@ -354,7 +374,6 @@ impl Default for Informant {
} }
impl Informant { impl Informant {
fn format_bytes(b: usize) -> String { fn format_bytes(b: usize) -> String {
match binary_prefix(b as f64) { match binary_prefix(b as f64) {
Standalone(bytes) => format!("{} bytes", bytes), Standalone(bytes) => format!("{} bytes", bytes),

View File

@ -9,12 +9,14 @@ build = "build.rs"
[lib] [lib]
[dependencies] [dependencies]
log = "0.3"
serde = "0.7.0" serde = "0.7.0"
serde_json = "0.7.0" serde_json = "0.7.0"
jsonrpc-core = "1.2" jsonrpc-core = "1.2"
jsonrpc-http-server = "2.1" jsonrpc-http-server = "2.1"
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
ethcore = { path = "../ethcore" } ethcore = { path = "../ethcore" }
ethash = { path = "../ethash" }
ethsync = { path = "../sync" } ethsync = { path = "../sync" }
clippy = { version = "0.0.44", optional = true } clippy = { version = "0.0.44", optional = true }
rustc-serialize = "0.3" rustc-serialize = "0.3"

View File

@ -15,13 +15,17 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Eth rpc implementation. //! Eth rpc implementation.
use std::sync::{Arc, Weak};
use ethsync::{EthSync, SyncState}; use ethsync::{EthSync, SyncState};
use jsonrpc_core::*; use jsonrpc_core::*;
use util::numbers::*; use util::numbers::*;
use util::sha3::*; use util::sha3::*;
use util::standard::{RwLock, HashMap, Arc, Weak};
use util::rlp::encode;
use ethcore::client::*; use ethcore::client::*;
use ethcore::block::{IsBlock};
use ethcore::views::*; use ethcore::views::*;
//#[macro_use] extern crate log;
use ethcore::ethereum::Ethash;
use ethcore::ethereum::denominations::shannon; use ethcore::ethereum::denominations::shannon;
use v1::traits::{Eth, EthFilter}; use v1::traits::{Eth, EthFilter};
use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, OptionalValue, Index, Filter, Log}; use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, OptionalValue, Index, Filter, Log};
@ -29,7 +33,8 @@ use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncIn
/// Eth rpc implementation. /// Eth rpc implementation.
pub struct EthClient { pub struct EthClient {
client: Weak<Client>, client: Weak<Client>,
sync: Weak<EthSync> sync: Weak<EthSync>,
hashrates: RwLock<HashMap<H256, u64>>,
} }
impl EthClient { impl EthClient {
@ -37,7 +42,8 @@ impl EthClient {
pub fn new(client: &Arc<Client>, sync: &Arc<EthSync>) -> Self { pub fn new(client: &Arc<Client>, sync: &Arc<EthSync>) -> Self {
EthClient { EthClient {
client: Arc::downgrade(client), client: Arc::downgrade(client),
sync: Arc::downgrade(sync) sync: Arc::downgrade(sync),
hashrates: RwLock::new(HashMap::new()),
} }
} }
@ -124,7 +130,7 @@ impl Eth for EthClient {
// TODO: return real value of mining once it's implemented. // TODO: return real value of mining once it's implemented.
fn is_mining(&self, params: Params) -> Result<Value, Error> { fn is_mining(&self, params: Params) -> Result<Value, Error> {
match params { match params {
Params::None => Ok(Value::Bool(false)), Params::None => to_value(&!self.hashrates.read().unwrap().is_empty()),
_ => Err(Error::invalid_params()) _ => Err(Error::invalid_params())
} }
} }
@ -132,7 +138,7 @@ impl Eth for EthClient {
// TODO: return real hashrate once we have mining // TODO: return real hashrate once we have mining
fn hashrate(&self, params: Params) -> Result<Value, Error> { fn hashrate(&self, params: Params) -> Result<Value, Error> {
match params { match params {
Params::None => to_value(&U256::zero()), Params::None => to_value(&self.hashrates.read().unwrap().iter().fold(0u64, |sum, (_, v)| sum + v)),
_ => Err(Error::invalid_params()) _ => Err(Error::invalid_params())
} }
} }
@ -208,6 +214,43 @@ impl Eth for EthClient {
to_value(&logs) to_value(&logs)
}) })
} }
fn work(&self, params: Params) -> Result<Value, Error> {
match params {
Params::None => {
let c = take_weak!(self.client);
let u = c.sealing_block().lock().unwrap();
match *u {
Some(ref b) => {
let pow_hash = b.hash();
let target = Ethash::difficulty_to_boundary(b.block().header().difficulty());
let seed_hash = Ethash::get_seedhash(b.block().header().number());
to_value(&(pow_hash, seed_hash, target))
}
_ => Err(Error::invalid_params())
}
},
_ => Err(Error::invalid_params())
}
}
fn submit_work(&self, params: Params) -> Result<Value, Error> {
from_params::<(H64, H256, H256)>(params).and_then(|(nonce, pow_hash, mix_hash)| {
// trace!("Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash);
let c = take_weak!(self.client);
let seal = vec![encode(&mix_hash).to_vec(), encode(&nonce).to_vec()];
let r = c.submit_seal(pow_hash, seal);
to_value(&r.is_ok())
})
}
fn submit_hashrate(&self, params: Params) -> Result<Value, Error> {
// TODO: Index should be U256.
from_params::<(Index, H256)>(params).and_then(|(rate, id)| {
self.hashrates.write().unwrap().insert(id, rate.value() as u64);
to_value(&true)
})
}
} }
/// Eth filter rpc implementation. /// Eth filter rpc implementation.

View File

@ -31,7 +31,7 @@ impl Web3 for Web3Client {
fn client_version(&self, params: Params) -> Result<Value, Error> { fn client_version(&self, params: Params) -> Result<Value, Error> {
match params { match params {
Params::None => { Params::None => {
Ok(Value::String(version())), Ok(Value::String(version().to_owned().replace("Parity/", "Parity//"))),
} }
_ => Err(Error::invalid_params()) _ => Err(Error::invalid_params())
} }

View File

@ -477,19 +477,19 @@ impl ChainSync {
// TODO: Decompose block and add to self.headers and self.bodies instead // TODO: Decompose block and add to self.headers and self.bodies instead
if header.number == From::from(self.current_base_block() + 1) { if header.number == From::from(self.current_base_block() + 1) {
match io.chain().import_block(block_rlp.as_raw().to_vec()) { match io.chain().import_block(block_rlp.as_raw().to_vec()) {
Err(ImportError::AlreadyInChain) => { Err(Error::Import(ImportError::AlreadyInChain)) => {
trace!(target: "sync", "New block already in chain {:?}", h); trace!(target: "sync", "New block already in chain {:?}", h);
}, },
Err(ImportError::AlreadyQueued) => { Err(Error::Import(ImportError::AlreadyQueued)) => {
trace!(target: "sync", "New block already queued {:?}", h); trace!(target: "sync", "New block already queued {:?}", h);
}, },
Ok(_) => { Ok(_) => {
self.last_imported_block = Some(header.number); self.last_imported_block = Some(header.number);
trace!(target: "sync", "New block queued {:?}", h); trace!(target: "sync", "New block queued {:?}", h);
}, },
Err(ImportError::UnknownParent) => { Err(Error::Block(BlockError::UnknownParent(p))) => {
unknown = true; unknown = true;
trace!(target: "sync", "New block with unknown parent {:?}", h); trace!(target: "sync", "New block with unknown parent ({:?}) {:?}", p, h);
}, },
Err(e) => { Err(e) => {
debug!(target: "sync", "Bad new block {:?} : {:?}", h, e); debug!(target: "sync", "Bad new block {:?} : {:?}", h, e);
@ -781,12 +781,12 @@ impl ChainSync {
} }
match io.chain().import_block(block_rlp.out()) { match io.chain().import_block(block_rlp.out()) {
Err(ImportError::AlreadyInChain) => { Err(Error::Import(ImportError::AlreadyInChain)) => {
trace!(target: "sync", "Block already in chain {:?}", h); trace!(target: "sync", "Block already in chain {:?}", h);
self.last_imported_block = Some(headers.0 + i as BlockNumber); self.last_imported_block = Some(headers.0 + i as BlockNumber);
self.last_imported_hash = Some(h.clone()); self.last_imported_hash = Some(h.clone());
}, },
Err(ImportError::AlreadyQueued) => { Err(Error::Import(ImportError::AlreadyQueued)) => {
trace!(target: "sync", "Block already queued {:?}", h); trace!(target: "sync", "Block already queued {:?}", h);
self.last_imported_block = Some(headers.0 + i as BlockNumber); self.last_imported_block = Some(headers.0 + i as BlockNumber);
self.last_imported_hash = Some(h.clone()); self.last_imported_hash = Some(h.clone());

View File

@ -1266,6 +1266,33 @@ impl From<U512> for U256 {
} }
} }
impl<'a> From<&'a U256> for U512 {
fn from(value: &'a U256) -> U512 {
let U256(ref arr) = *value;
let mut ret = [0; 8];
ret[0] = arr[0];
ret[1] = arr[1];
ret[2] = arr[2];
ret[3] = arr[3];
U512(ret)
}
}
impl<'a> From<&'a U512> for U256 {
fn from(value: &'a U512) -> U256 {
let U512(ref arr) = *value;
if arr[4] | arr[5] | arr[6] | arr[7] != 0 {
panic!("Overflow");
}
let mut ret = [0; 4];
ret[0] = arr[0];
ret[1] = arr[1];
ret[2] = arr[2];
ret[3] = arr[3];
U256(ret)
}
}
impl From<U256> for U128 { impl From<U256> for U128 {
fn from(value: U256) -> U128 { fn from(value: U256) -> U128 {
let U256(ref arr) = value; let U256(ref arr) = value;

View File

@ -18,6 +18,7 @@
use std::fs::File; use std::fs::File;
use common::*; use common::*;
use rlp::{Stream, RlpStream};
use target_info::Target; use target_info::Target;
use rustc_version; use rustc_version;
@ -69,5 +70,19 @@ pub fn contents(name: &str) -> Result<Bytes, UtilError> {
/// Get the standard version string for this software. /// Get the standard version string for this software.
pub fn version() -> String { pub fn version() -> String {
format!("Parity//{}-{}-{}/{}-{}-{}/rustc{}", env!("CARGO_PKG_VERSION"), short_sha(), commit_date().replace("-", ""), Target::arch(), Target::os(), Target::env(), rustc_version::version()) format!("Parity/v{}-{}-{}/{}-{}-{}/rustc{}", env!("CARGO_PKG_VERSION"), short_sha(), commit_date().replace("-", ""), Target::arch(), Target::os(), Target::env(), rustc_version::version())
}
/// Get the standard version data for this software.
pub fn version_data() -> Bytes {
let mut s = RlpStream::new_list(4);
let v =
(u32::from_str(env!("CARGO_PKG_VERSION_MAJOR")).unwrap() << 16) +
(u32::from_str(env!("CARGO_PKG_VERSION_MINOR")).unwrap() << 8) +
u32::from_str(env!("CARGO_PKG_VERSION_PATCH")).unwrap();
s.append(&v);
s.append(&"Parity");
s.append(&format!("{}", rustc_version::version()));
s.append(&&Target::os()[0..2]);
s.out()
} }