Merging chain_blocks_verified to chain_new_blocks

This commit is contained in:
Tomasz Drwięga 2016-03-07 12:16:37 +01:00
parent b0ac103900
commit e83f856104
4 changed files with 52 additions and 48 deletions

View File

@ -207,7 +207,7 @@ pub struct ChainSync {
/// True if common block for our and remote chain has been found
have_common_block: bool,
/// Last propagated block number
last_send_block_number: BlockNumber,
last_sent_block_number: BlockNumber,
/// Max blocks to download ahead
max_download_ahead_blocks: usize,
/// Network ID
@ -236,7 +236,7 @@ impl ChainSync {
last_imported_hash: None,
syncing_difficulty: U256::from(0u64),
have_common_block: false,
last_send_block_number: 0,
last_sent_block_number: 0,
max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks),
network_id: config.network_id,
transaction_queue: Mutex::new(TransactionQueue::new()),
@ -1248,26 +1248,25 @@ impl ChainSync {
sent
}
fn propagate_latest_blocks(&mut self, io: &mut SyncIo) {
let chain_info = io.chain().chain_info();
if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
let blocks = self.propagate_blocks(&chain_info, io);
let hashes = self.propagate_new_hashes(&chain_info, io);
if blocks != 0 || hashes != 0 {
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
}
}
self.last_sent_block_number = chain_info.best_block_number;
}
/// Maintain other peers. Send out any new blocks and transactions
pub fn maintain_sync(&mut self, io: &mut SyncIo) {
self.check_resume(io);
}
/// should be called once chain has new block, triggers the latest block propagation
pub fn chain_blocks_verified(&mut self, io: &mut SyncIo) {
let chain = io.chain().chain_info();
if (((chain.best_block_number as i64) - (self.last_send_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
let blocks = self.propagate_blocks(&chain, io);
let hashes = self.propagate_new_hashes(&chain, io);
if blocks != 0 || hashes != 0 {
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
}
}
self.last_send_block_number = chain.best_block_number;
}
/// called when block is imported to chain, updates transactions queue
pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) {
/// called when block is imported to chain, updates transactions queue and propagates the blocks
pub fn chain_new_blocks(&mut self, io: &mut SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) {
fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec<SignedTransaction> {
let block = chain
.block(BlockId::Hash(hash.clone()))
@ -1278,6 +1277,7 @@ impl ChainSync {
}
{
let chain = io.chain();
let good = good.par_iter().map(|h| fetch_transactions(chain, h));
let bad = bad.par_iter().map(|h| fetch_transactions(chain, h));
@ -1296,6 +1296,12 @@ impl ChainSync {
transaction_queue.add_all(txs, |a| chain.nonce(a));
});
}
// Propagate latests blocks
self.propagate_latest_blocks(io);
// TODO [todr] propagate transactions?
}
}
#[cfg(test)]
@ -1634,13 +1640,13 @@ mod tests {
let retracted_blocks = vec![client.block_hash_delta_minus(1)];
let mut queue = VecDeque::new();
let io = TestIo::new(&mut client, &mut queue, None);
let mut io = TestIo::new(&mut client, &mut queue, None);
// when
sync.chain_new_blocks(&io, &[], &good_blocks, &[]);
sync.chain_new_blocks(&mut io, &[], &good_blocks, &[]);
assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0);
assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1);
sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks, &[]);
sync.chain_new_blocks(&mut io, &good_blocks, &retracted_blocks, &[]);
// then
let status = sync.transaction_queue.lock().unwrap().status();

View File

@ -154,13 +154,11 @@ impl NetworkProtocolHandler<SyncMessage> for EthSync {
fn message(&self, io: &NetworkContext<SyncMessage>, message: &SyncMessage) {
match *message {
SyncMessage::BlockVerified => {
self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref()));
},
SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => {
let sync_io = NetSyncIo::new(io, self.chain.deref());
self.sync.write().unwrap().chain_new_blocks(&sync_io, good, bad, retracted);
}
let mut sync_io = NetSyncIo::new(io, self.chain.deref());
self.sync.write().unwrap().chain_new_blocks(&mut sync_io, good, bad, retracted);
},
_ => {/* Ignore other messages */},
}
}
}

View File

@ -129,8 +129,8 @@ fn propagate_hashes() {
net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle);
net.sync();
net.trigger_block_verified(0); //first event just sets the marker
net.trigger_block_verified(0);
net.trigger_chain_new_blocks(0); //first event just sets the marker
net.trigger_chain_new_blocks(0);
// 5 peers to sync
assert_eq!(5, net.peer(0).queue.len());
@ -154,8 +154,8 @@ fn propagate_blocks() {
net.sync();
net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle);
net.trigger_block_verified(0); //first event just sets the marker
net.trigger_block_verified(0);
net.trigger_chain_new_blocks(0); //first event just sets the marker
net.trigger_chain_new_blocks(0);
assert!(!net.peer(0).queue.is_empty());
// NEW_BLOCK_PACKET

View File

@ -455,8 +455,8 @@ impl TestNet {
self.peers.iter().all(|p| p.queue.is_empty())
}
pub fn trigger_block_verified(&mut self, peer_id: usize) {
pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) {
let mut peer = self.peer_mut(peer_id);
peer.sync.chain_blocks_verified(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None));
peer.sync.chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[]);
}
}