Do not insert new blocks out of order

This commit is contained in:
arkpar 2016-01-10 15:08:57 +01:00
parent 11dd92f1f5
commit 5f5f26de48
3 changed files with 38 additions and 32 deletions

View File

@ -386,6 +386,9 @@ impl ChainSync {
let h = header_rlp.as_raw().sha3(); let h = header_rlp.as_raw().sha3();
trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h); trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h);
let header_view = HeaderView::new(header_rlp.as_raw());
// TODO: Decompose block and add to self.headers and self.bodies instead
if header_view.number() == From::from(self.last_imported_block + 1) {
match io.chain().import_block(block_rlp.as_raw()) { match io.chain().import_block(block_rlp.as_raw()) {
ImportResult::AlreadyInChain => { ImportResult::AlreadyInChain => {
trace!(target: "sync", "New block already in chain {:?}", h); trace!(target: "sync", "New block already in chain {:?}", h);
@ -411,6 +414,7 @@ impl ChainSync {
io.disable_peer(peer_id); io.disable_peer(peer_id);
} }
}; };
}
Ok(()) Ok(())
} }

View File

@ -1,10 +1,11 @@
use client::BlockChainClient; use client::BlockChainClient;
use util::network::{HandlerIo, PeerId, PacketId, Error as NetworkError}; use util::network::{HandlerIo, PeerId, PacketId,};
use util::error::UtilError;
pub trait SyncIo { pub trait SyncIo {
fn disable_peer(&mut self, peer_id: &PeerId); fn disable_peer(&mut self, peer_id: &PeerId);
fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError>; fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>;
fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError>; fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>;
fn chain<'s>(&'s mut self) -> &'s mut BlockChainClient; fn chain<'s>(&'s mut self) -> &'s mut BlockChainClient;
} }
@ -27,11 +28,11 @@ impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> {
self.network.disable_peer(*peer_id); self.network.disable_peer(*peer_id);
} }
fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError>{ fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>{
self.network.respond(packet_id, data) self.network.respond(packet_id, data)
} }
fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError>{ fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>{
self.network.send(peer_id, packet_id, data) self.network.send(peer_id, packet_id, data)
} }

View File

@ -4,7 +4,8 @@ use util::hash::{H256, FixedHash};
use util::uint::{U256}; use util::uint::{U256};
use util::sha3::Hashable; use util::sha3::Hashable;
use util::rlp::{self, Rlp, RlpStream, View, Stream}; use util::rlp::{self, Rlp, RlpStream, View, Stream};
use util::network::{PeerId, PacketId, Error as NetworkError}; use util::network::{PeerId, PacketId};
use util::error::UtilError;
use client::{BlockChainClient, BlockStatus, BlockNumber, TreeRoute, BlockQueueStatus, BlockChainInfo, ImportResult, QueueStatus}; use client::{BlockChainClient, BlockStatus, BlockNumber, TreeRoute, BlockQueueStatus, BlockChainInfo, ImportResult, QueueStatus};
use header::Header as BlockHeader; use header::Header as BlockHeader;
use sync::io::SyncIo; use sync::io::SyncIo;
@ -195,7 +196,7 @@ impl<'p> SyncIo for TestIo<'p> {
fn disable_peer(&mut self, _peer_id: &PeerId) { fn disable_peer(&mut self, _peer_id: &PeerId) {
} }
fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> { fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError> {
self.queue.push_back(TestPacket { self.queue.push_back(TestPacket {
data: data, data: data,
packet_id: packet_id, packet_id: packet_id,
@ -204,7 +205,7 @@ impl<'p> SyncIo for TestIo<'p> {
Ok(()) Ok(())
} }
fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> { fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError> {
self.queue.push_back(TestPacket { self.queue.push_back(TestPacket {
data: data, data: data,
packet_id: packet_id, packet_id: packet_id,