Merge changes from #674 into branch.
This commit is contained in:
parent
4a58e142bd
commit
08b9cc2c41
10
Cargo.lock
generated
10
Cargo.lock
generated
@ -93,6 +93,16 @@ dependencies = [
|
||||
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clippy"
|
||||
version = "0.0.49"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clippy"
|
||||
version = "0.0.50"
|
||||
|
@ -35,7 +35,7 @@ use extras::TransactionAddress;
|
||||
use filter::Filter;
|
||||
use log_entry::LocalizedLogEntry;
|
||||
use block_queue::{BlockQueue, BlockQueueInfo};
|
||||
use blockchain::{BlockChain, BlockProvider, TreeRoute};
|
||||
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
|
||||
use client::{BlockId, TransactionId, ClientConfig, BlockChainClient};
|
||||
pub use blockchain::CacheSize as BlockChainCacheSize;
|
||||
|
||||
@ -222,12 +222,39 @@ impl<V> Client<V> where V: Verifier {
|
||||
Ok(closed_block)
|
||||
}
|
||||
|
||||
fn calculate_enacted_retracted(&self, import_results: Vec<ImportRoute>) -> (Vec<H256>, Vec<H256>) {
|
||||
fn map_to_vec(map: Vec<(H256, bool)>) -> Vec<H256> {
|
||||
map.into_iter().map(|(k, _v)| k).collect()
|
||||
}
|
||||
|
||||
// In ImportRoute we get all the blocks that have been enacted and retracted by single insert.
|
||||
// Because we are doing multiple inserts some of the blocks that were enacted in import `k`
|
||||
// could be retracted in import `k+1`. This is why to understand if after all inserts
|
||||
// the block is enacted or retracted we iterate over all routes and at the end final state
|
||||
// will be in the hashmap
|
||||
let map = import_results.into_iter().fold(HashMap::new(), |mut map, route| {
|
||||
for hash in route.enacted {
|
||||
map.insert(hash, true);
|
||||
}
|
||||
for hash in route.retracted {
|
||||
map.insert(hash, false);
|
||||
}
|
||||
map
|
||||
});
|
||||
|
||||
// Split to enacted retracted (using hashmap value)
|
||||
let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v);
|
||||
// And convert tuples to keys
|
||||
(map_to_vec(enacted), map_to_vec(retracted))
|
||||
}
|
||||
|
||||
/// This is triggered by a message coming from a block queue when the block is ready for insertion
|
||||
pub fn import_verified_blocks(&self, io: &IoChannel<NetSyncMessage>) -> usize {
|
||||
let max_blocks_to_import = 128;
|
||||
|
||||
let mut good_blocks = Vec::with_capacity(max_blocks_to_import);
|
||||
let mut bad_blocks = HashSet::new();
|
||||
let mut imported_blocks = Vec::with_capacity(max_blocks_to_import);
|
||||
let mut invalid_blocks = HashSet::new();
|
||||
let mut import_results = Vec::with_capacity(max_blocks_to_import);
|
||||
|
||||
let _import_lock = self.import_lock.lock();
|
||||
let blocks = self.block_queue.drain(max_blocks_to_import);
|
||||
@ -237,16 +264,16 @@ impl<V> Client<V> where V: Verifier {
|
||||
for block in blocks {
|
||||
let header = &block.header;
|
||||
|
||||
if bad_blocks.contains(&header.parent_hash) {
|
||||
bad_blocks.insert(header.hash());
|
||||
if invalid_blocks.contains(&header.parent_hash) {
|
||||
invalid_blocks.insert(header.hash());
|
||||
continue;
|
||||
}
|
||||
let closed_block = self.check_and_close_block(&block);
|
||||
if let Err(_) = closed_block {
|
||||
bad_blocks.insert(header.hash());
|
||||
invalid_blocks.insert(header.hash());
|
||||
break;
|
||||
}
|
||||
good_blocks.push(header.hash());
|
||||
imported_blocks.push(header.hash());
|
||||
|
||||
// Are we committing an era?
|
||||
let ancient = if header.number() >= HISTORY {
|
||||
@ -265,31 +292,33 @@ impl<V> Client<V> where V: Verifier {
|
||||
|
||||
// And update the chain after commit to prevent race conditions
|
||||
// (when something is in chain but you are not able to fetch details)
|
||||
self.chain.insert_block(&block.bytes, receipts);
|
||||
let route = self.chain.insert_block(&block.bytes, receipts);
|
||||
import_results.push(route);
|
||||
|
||||
self.report.write().unwrap().accrue_block(&block);
|
||||
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
|
||||
}
|
||||
|
||||
let imported = good_blocks.len();
|
||||
let bad_blocks = bad_blocks.into_iter().collect::<Vec<H256>>();
|
||||
let imported = imported_blocks.len();
|
||||
let invalid_blocks = invalid_blocks.into_iter().collect::<Vec<H256>>();
|
||||
|
||||
{
|
||||
if !bad_blocks.is_empty() {
|
||||
self.block_queue.mark_as_bad(&bad_blocks);
|
||||
if !invalid_blocks.is_empty() {
|
||||
self.block_queue.mark_as_bad(&invalid_blocks);
|
||||
}
|
||||
if !good_blocks.is_empty() {
|
||||
self.block_queue.mark_as_good(&good_blocks);
|
||||
if !imported_blocks.is_empty() {
|
||||
self.block_queue.mark_as_good(&imported_blocks);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
if !good_blocks.is_empty() && self.block_queue.queue_info().is_empty() {
|
||||
if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() {
|
||||
let (enacted, retracted) = self.calculate_enacted_retracted(import_results);
|
||||
io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
|
||||
good: good_blocks,
|
||||
bad: bad_blocks,
|
||||
// TODO [todr] were to take those from?
|
||||
retracted: vec![],
|
||||
good: imported_blocks,
|
||||
invalid: invalid_blocks,
|
||||
enacted: enacted,
|
||||
retracted: retracted,
|
||||
})).unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -28,11 +28,13 @@ pub enum SyncMessage {
|
||||
/// New block has been imported into the blockchain
|
||||
NewChainBlocks {
|
||||
/// Hashes of blocks imported to blockchain
|
||||
good: Vec<H256>,
|
||||
/// Hashes of blocks not imported to blockchain
|
||||
bad: Vec<H256>,
|
||||
imported: Vec<H256>,
|
||||
/// Hashes of blocks not imported to blockchain (because were invalid)
|
||||
invalid: Vec<H256>,
|
||||
/// Hashes of blocks that were removed from canonical chain
|
||||
retracted: Vec<H256>,
|
||||
/// Hashes of blocks that are now included in cannonical chain
|
||||
enacted: Vec<H256>,
|
||||
},
|
||||
/// Best Block Hash in chain has been changed
|
||||
NewChainHead,
|
||||
|
@ -89,7 +89,7 @@ pub trait MinerService : Send + Sync {
|
||||
fn clear_and_reset(&self, chain: &BlockChainClient);
|
||||
|
||||
/// Called when blocks are imported to chain, updates transactions queue.
|
||||
fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], retracted: &[H256]);
|
||||
fn chain_new_blocks(&self, chain: &BlockChainClient, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256]);
|
||||
|
||||
/// New chain head event. Restart mining operation.
|
||||
fn prepare_sealing(&self, chain: &BlockChainClient);
|
||||
|
@ -150,7 +150,7 @@ impl MinerService for Miner {
|
||||
}
|
||||
}
|
||||
|
||||
fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], _retracted: &[H256]) {
|
||||
fn chain_new_blocks(&self, chain: &BlockChainClient, imported: &[H256], invalid: &[H256], enacted: &[H256], _retracted: &[H256]) {
|
||||
fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec<SignedTransaction> {
|
||||
let block = chain
|
||||
.block(BlockId::Hash(*hash))
|
||||
@ -161,15 +161,20 @@ impl MinerService for Miner {
|
||||
}
|
||||
|
||||
{
|
||||
let good = good.par_iter().map(|h| fetch_transactions(chain, h));
|
||||
let bad = bad.par_iter().map(|h| fetch_transactions(chain, h));
|
||||
let in_chain = vec![imported, enacted, invalid];
|
||||
let in_chain = in_chain
|
||||
.par_iter()
|
||||
.flat_map(|h| h.par_iter().map(|h| fetch_transactions(chain, h)));
|
||||
.map(|h| fetch_transactions(chain, h));
|
||||
let out_of_chain = retracted
|
||||
.par_iter()
|
||||
|
||||
good.for_each(|txs| {
|
||||
in_chain.for_each(|txs| {
|
||||
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
||||
let hashes = txs.iter().map(|tx| tx.hash()).collect::<Vec<H256>>();
|
||||
transaction_queue.remove_all(&hashes, |a| chain.nonce(a));
|
||||
});
|
||||
bad.for_each(|txs| {
|
||||
out_of_chain.for_each(|txs| {
|
||||
// populate sender
|
||||
for tx in &txs {
|
||||
let _sender = tx.sender();
|
||||
|
@ -114,7 +114,7 @@ API and Console Options:
|
||||
--rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible).
|
||||
|
||||
Sealing/Mining Options:
|
||||
--gasprice GAS Minimal gas price a transaction must have to be accepted for mining [default: 20000000000].
|
||||
--gas-price WEI Minimum amount of Wei to be paid for a transaction to be accepted for mining [default: 20000000000].
|
||||
--author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards
|
||||
from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63].
|
||||
--extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters.
|
||||
@ -138,11 +138,12 @@ Geth-Compatibility Options
|
||||
--maxpeers COUNT Equivalent to --peers COUNT.
|
||||
--nodekey KEY Equivalent to --node-key KEY.
|
||||
--nodiscover Equivalent to --no-discovery.
|
||||
--gasprice WEI Equivalent to --gas-price WEI.
|
||||
--etherbase ADDRESS Equivalent to --author ADDRESS.
|
||||
--extradata STRING Equivalent to --extra-data STRING.
|
||||
|
||||
Miscellaneous Options:
|
||||
-l --logging LOGGING Specify the logging level.
|
||||
-l --logging LOGGING Specify the logging level. Must conform to the same format as RUST_LOG.
|
||||
-v --version Show information about version.
|
||||
-h --help Show this screen.
|
||||
"#;
|
||||
@ -175,18 +176,19 @@ struct Args {
|
||||
flag_jsonrpc_port: u16,
|
||||
flag_jsonrpc_cors: String,
|
||||
flag_jsonrpc_apis: String,
|
||||
flag_author: String,
|
||||
flag_gas_price: String,
|
||||
flag_extra_data: Option<String>,
|
||||
flag_logging: Option<String>,
|
||||
flag_version: bool,
|
||||
// geth-compatibility...
|
||||
flag_nodekey: Option<String>,
|
||||
flag_nodiscover: bool,
|
||||
flag_maxpeers: Option<usize>,
|
||||
flag_gasprice: String,
|
||||
flag_author: String,
|
||||
flag_extra_data: Option<String>,
|
||||
flag_datadir: Option<String>,
|
||||
flag_extradata: Option<String>,
|
||||
flag_etherbase: Option<String>,
|
||||
flag_gasprice: Option<String>,
|
||||
flag_rpc: bool,
|
||||
flag_rpcaddr: Option<String>,
|
||||
flag_rpcport: Option<u16>,
|
||||
@ -301,9 +303,10 @@ impl Configuration {
|
||||
})
|
||||
}
|
||||
|
||||
fn gasprice(&self) -> U256 {
|
||||
U256::from_dec_str(self.args.flag_gasprice.as_str()).unwrap_or_else(|_| {
|
||||
die!("{}: Invalid gas price given. Must be a decimal unsigned 256-bit number.", self.args.flag_gasprice)
|
||||
fn gas_price(&self) -> U256 {
|
||||
let d = self.args.flag_gasprice.as_ref().unwrap_or(&self.args.flag_gas_price);
|
||||
U256::from_dec_str(d).unwrap_or_else(|_| {
|
||||
die!("{}: Invalid gas price given. Must be a decimal unsigned 256-bit number.", d)
|
||||
})
|
||||
}
|
||||
|
||||
@ -483,7 +486,7 @@ impl Configuration {
|
||||
let miner = Miner::new();
|
||||
miner.set_author(self.author());
|
||||
miner.set_extra_data(self.extra_data());
|
||||
miner.set_minimal_gas_price(self.gasprice());
|
||||
miner.set_minimal_gas_price(self.gas_price());
|
||||
|
||||
// Sync
|
||||
let sync = EthSync::register(service.network(), sync_config, client.clone(), miner.clone());
|
||||
|
@ -1263,9 +1263,9 @@ impl ChainSync {
|
||||
}
|
||||
|
||||
/// called when block is imported to chain, updates transactions queue and propagates the blocks
|
||||
pub fn chain_new_blocks(&mut self, io: &mut SyncIo, good: &[H256], bad: &[H256], retracted: &[H256]) {
|
||||
pub fn chain_new_blocks(&mut self, io: &mut SyncIo, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256]) {
|
||||
// Notify miner
|
||||
self.miner.chain_new_blocks(io.chain(), good, bad, retracted);
|
||||
self.miner.chain_new_blocks(io.chain(), imported, invalid, enacted, retracted);
|
||||
// Propagate latests blocks
|
||||
self.propagate_latest_blocks(io);
|
||||
// TODO [todr] propagate transactions?
|
||||
@ -1616,10 +1616,10 @@ mod tests {
|
||||
let mut io = TestIo::new(&mut client, &mut queue, None);
|
||||
|
||||
// when
|
||||
sync.chain_new_blocks(&mut io, &[], &good_blocks, &[]);
|
||||
sync.chain_new_blocks(&mut io, &[], &good_blocks, &[], &[]);
|
||||
assert_eq!(sync.miner.status().transaction_queue_future, 0);
|
||||
assert_eq!(sync.miner.status().transaction_queue_pending, 1);
|
||||
sync.chain_new_blocks(&mut io, &good_blocks, &retracted_blocks, &[]);
|
||||
sync.chain_new_blocks(&mut io, &good_blocks, &[], &[], &retracted_blocks);
|
||||
|
||||
// then
|
||||
let status = sync.miner.status();
|
||||
|
@ -167,9 +167,9 @@ impl NetworkProtocolHandler<SyncMessage> for EthSync {
|
||||
#[allow(single_match)]
|
||||
fn message(&self, io: &NetworkContext<SyncMessage>, message: &SyncMessage) {
|
||||
match *message {
|
||||
SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => {
|
||||
SyncMessage::NewChainBlocks { ref imported, ref invalid, ref enacted, ref retracted } => {
|
||||
let mut sync_io = NetSyncIo::new(io, self.chain.deref());
|
||||
self.sync.write().unwrap().chain_new_blocks(&mut sync_io, good, bad, retracted);
|
||||
self.sync.write().unwrap().chain_new_blocks(&mut sync_io, imported, invalid, enacted, retracted);
|
||||
},
|
||||
SyncMessage::NewChainHead => {
|
||||
let mut sync_io = NetSyncIo::new(io, self.chain.deref());
|
||||
|
@ -168,6 +168,6 @@ impl TestNet {
|
||||
|
||||
pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) {
|
||||
let mut peer = self.peer_mut(peer_id);
|
||||
peer.sync.chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[]);
|
||||
peer.sync.chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[], &[]);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user