Do not insert new blocks out of order

This commit is contained in:
arkpar 2016-01-10 15:08:57 +01:00
parent 11dd92f1f5
commit 5f5f26de48
3 changed files with 38 additions and 32 deletions

View File

@ -386,31 +386,35 @@ impl ChainSync {
let h = header_rlp.as_raw().sha3(); let h = header_rlp.as_raw().sha3();
trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h); trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h);
match io.chain().import_block(block_rlp.as_raw()) { let header_view = HeaderView::new(header_rlp.as_raw());
ImportResult::AlreadyInChain => { // TODO: Decompose block and add to self.headers and self.bodies instead
trace!(target: "sync", "New block already in chain {:?}", h); if header_view.number() == From::from(self.last_imported_block + 1) {
}, match io.chain().import_block(block_rlp.as_raw()) {
ImportResult::AlreadyQueued(_) => { ImportResult::AlreadyInChain => {
trace!(target: "sync", "New block already queued {:?}", h); trace!(target: "sync", "New block already in chain {:?}", h);
}, },
ImportResult::Queued(QueueStatus::Known) => { ImportResult::AlreadyQueued(_) => {
trace!(target: "sync", "New block queued {:?}", h); trace!(target: "sync", "New block already queued {:?}", h);
}, },
ImportResult::Queued(QueueStatus::Unknown) => { ImportResult::Queued(QueueStatus::Known) => {
trace!(target: "sync", "New block unknown {:?}", h); trace!(target: "sync", "New block queued {:?}", h);
//TODO: handle too many unknown blocks },
let difficulty: U256 = try!(r.val_at(1)); ImportResult::Queued(QueueStatus::Unknown) => {
let peer_difficulty = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer").difficulty; trace!(target: "sync", "New block unknown {:?}", h);
if difficulty > peer_difficulty { //TODO: handle too many unknown blocks
trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h); let difficulty: U256 = try!(r.val_at(1));
self.sync_peer(io, peer_id, true); let peer_difficulty = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer").difficulty;
if difficulty > peer_difficulty {
trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h);
self.sync_peer(io, peer_id, true);
}
},
ImportResult::Bad =>{
debug!(target: "sync", "Bad new block {:?}", h);
io.disable_peer(peer_id);
} }
}, };
ImportResult::Bad =>{ }
debug!(target: "sync", "Bad new block {:?}", h);
io.disable_peer(peer_id);
}
};
Ok(()) Ok(())
} }

View File

@ -1,10 +1,11 @@
use client::BlockChainClient; use client::BlockChainClient;
use util::network::{HandlerIo, PeerId, PacketId, Error as NetworkError}; use util::network::{HandlerIo, PeerId, PacketId,};
use util::error::UtilError;
pub trait SyncIo { pub trait SyncIo {
fn disable_peer(&mut self, peer_id: &PeerId); fn disable_peer(&mut self, peer_id: &PeerId);
fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError>; fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>;
fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError>; fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>;
fn chain<'s>(&'s mut self) -> &'s mut BlockChainClient; fn chain<'s>(&'s mut self) -> &'s mut BlockChainClient;
} }
@ -27,11 +28,11 @@ impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> {
self.network.disable_peer(*peer_id); self.network.disable_peer(*peer_id);
} }
fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError>{ fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>{
self.network.respond(packet_id, data) self.network.respond(packet_id, data)
} }
fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError>{ fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>{
self.network.send(peer_id, packet_id, data) self.network.send(peer_id, packet_id, data)
} }

View File

@ -4,7 +4,8 @@ use util::hash::{H256, FixedHash};
use util::uint::{U256}; use util::uint::{U256};
use util::sha3::Hashable; use util::sha3::Hashable;
use util::rlp::{self, Rlp, RlpStream, View, Stream}; use util::rlp::{self, Rlp, RlpStream, View, Stream};
use util::network::{PeerId, PacketId, Error as NetworkError}; use util::network::{PeerId, PacketId};
use util::error::UtilError;
use client::{BlockChainClient, BlockStatus, BlockNumber, TreeRoute, BlockQueueStatus, BlockChainInfo, ImportResult, QueueStatus}; use client::{BlockChainClient, BlockStatus, BlockNumber, TreeRoute, BlockQueueStatus, BlockChainInfo, ImportResult, QueueStatus};
use header::Header as BlockHeader; use header::Header as BlockHeader;
use sync::io::SyncIo; use sync::io::SyncIo;
@ -195,7 +196,7 @@ impl<'p> SyncIo for TestIo<'p> {
fn disable_peer(&mut self, _peer_id: &PeerId) { fn disable_peer(&mut self, _peer_id: &PeerId) {
} }
fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> { fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError> {
self.queue.push_back(TestPacket { self.queue.push_back(TestPacket {
data: data, data: data,
packet_id: packet_id, packet_id: packet_id,
@ -204,7 +205,7 @@ impl<'p> SyncIo for TestIo<'p> {
Ok(()) Ok(())
} }
fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> { fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError> {
self.queue.push_back(TestPacket { self.queue.push_back(TestPacket {
data: data, data: data,
packet_id: packet_id, packet_id: packet_id,