2020-09-22 14:53:52 +02:00
|
|
|
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of OpenEthereum.
|
2018-05-09 12:05:34 +02:00
|
|
|
|
2020-09-22 14:53:52 +02:00
|
|
|
// OpenEthereum is free software: you can redistribute it and/or modify
|
2018-05-09 12:05:34 +02:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2020-09-22 14:53:52 +02:00
|
|
|
// OpenEthereum is distributed in the hope that it will be useful,
|
2018-05-09 12:05:34 +02:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2020-09-22 14:53:52 +02:00
|
|
|
// along with OpenEthereum. If not, see <http://www.gnu.org/licenses/>.
|
2018-05-09 12:05:34 +02:00
|
|
|
|
|
|
|
use bytes::Bytes;
|
2019-02-13 09:20:33 +01:00
|
|
|
use enum_primitive::FromPrimitive;
|
2018-05-09 12:05:34 +02:00
|
|
|
use ethereum_types::H256;
|
|
|
|
use network::{self, PeerId};
|
2020-12-17 12:57:34 +01:00
|
|
|
use devp2p::PAYLOAD_SOFT_LIMIT;
|
2018-05-09 12:05:34 +02:00
|
|
|
use parking_lot::RwLock;
|
|
|
|
use rlp::{Rlp, RlpStream};
|
|
|
|
use std::cmp;
|
2019-01-04 14:05:46 +01:00
|
|
|
use types::{ids::BlockId, BlockNumber};
|
2018-07-02 18:50:05 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
use sync_io::SyncIo;
|
|
|
|
|
2019-02-13 09:20:33 +01:00
|
|
|
use super::sync_packet::{
|
|
|
|
PacketInfo, SyncPacket,
|
|
|
|
SyncPacket::{
|
|
|
|
BlockBodiesPacket, BlockHeadersPacket, ConsensusDataPacket, GetBlockBodiesPacket,
|
2020-08-24 14:18:03 +02:00
|
|
|
GetBlockHeadersPacket, GetReceiptsPacket, GetSnapshotDataPacket, GetSnapshotManifestPacket,
|
|
|
|
ReceiptsPacket, SnapshotDataPacket, SnapshotManifestPacket, StatusPacket,
|
|
|
|
TransactionsPacket,
|
2019-02-13 09:20:33 +01:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
use super::{
|
2020-09-05 19:45:31 +02:00
|
|
|
ChainSync, PacketProcessError, RlpResponseResult, SyncHandler, MAX_BODIES_TO_SEND,
|
2020-08-24 14:18:03 +02:00
|
|
|
MAX_HEADERS_TO_SEND, MAX_RECEIPTS_HEADERS_TO_SEND,
|
2018-05-09 12:05:34 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
/// The Chain Sync Supplier: answers requests from peers with available data
|
|
|
|
pub struct SyncSupplier;
|
|
|
|
|
|
|
|
impl SyncSupplier {
|
|
|
|
/// Dispatch incoming requests and responses
|
2019-02-13 09:20:33 +01:00
|
|
|
// Take a u8 and not a SyncPacketId because this is the entry point
|
|
|
|
// to chain sync from the outside world.
|
2018-05-09 12:05:34 +02:00
|
|
|
pub fn dispatch_packet(
|
|
|
|
sync: &RwLock<ChainSync>,
|
2020-07-29 10:36:15 +02:00
|
|
|
io: &mut dyn SyncIo,
|
2018-05-09 12:05:34 +02:00
|
|
|
peer: PeerId,
|
|
|
|
packet_id: u8,
|
|
|
|
data: &[u8],
|
|
|
|
) {
|
|
|
|
let rlp = Rlp::new(data);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2019-02-13 09:20:33 +01:00
|
|
|
if let Some(id) = SyncPacket::from_u8(packet_id) {
|
|
|
|
let result = match id {
|
|
|
|
GetBlockBodiesPacket => SyncSupplier::return_rlp(
|
|
|
|
io,
|
|
|
|
&rlp,
|
|
|
|
peer,
|
|
|
|
SyncSupplier::return_block_bodies,
|
|
|
|
|e| format!("Error sending block bodies: {:?}", e),
|
2020-08-05 06:08:03 +02:00
|
|
|
),
|
|
|
|
|
2019-02-13 09:20:33 +01:00
|
|
|
GetBlockHeadersPacket => SyncSupplier::return_rlp(
|
|
|
|
io,
|
|
|
|
&rlp,
|
|
|
|
peer,
|
|
|
|
SyncSupplier::return_block_headers,
|
|
|
|
|e| format!("Error sending block headers: {:?}", e),
|
2020-08-05 06:08:03 +02:00
|
|
|
),
|
|
|
|
|
2019-02-13 09:20:33 +01:00
|
|
|
GetReceiptsPacket => {
|
|
|
|
SyncSupplier::return_rlp(io, &rlp, peer, SyncSupplier::return_receipts, |e| {
|
|
|
|
format!("Error sending receipts: {:?}", e)
|
2020-08-05 06:08:03 +02:00
|
|
|
})
|
|
|
|
}
|
2019-02-13 09:20:33 +01:00
|
|
|
GetSnapshotManifestPacket => SyncSupplier::return_rlp(
|
|
|
|
io,
|
|
|
|
&rlp,
|
|
|
|
peer,
|
|
|
|
SyncSupplier::return_snapshot_manifest,
|
|
|
|
|e| format!("Error sending snapshot manifest: {:?}", e),
|
2020-08-05 06:08:03 +02:00
|
|
|
),
|
|
|
|
|
2019-02-13 09:20:33 +01:00
|
|
|
GetSnapshotDataPacket => SyncSupplier::return_rlp(
|
|
|
|
io,
|
2020-08-05 06:08:03 +02:00
|
|
|
&rlp,
|
2019-02-13 09:20:33 +01:00
|
|
|
peer,
|
|
|
|
SyncSupplier::return_snapshot_data,
|
|
|
|
|e| format!("Error sending snapshot data: {:?}", e),
|
2020-08-05 06:08:03 +02:00
|
|
|
),
|
|
|
|
|
2019-02-13 09:20:33 +01:00
|
|
|
StatusPacket => {
|
|
|
|
sync.write().on_packet(io, peer, packet_id, data);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
// Packets that require the peer to be confirmed
|
|
|
|
_ => {
|
|
|
|
if !sync.read().peers.contains_key(&peer) {
|
|
|
|
debug!(target:"sync", "Unexpected packet {} from unregistered peer: {}:{}", packet_id, peer, io.peer_version(peer));
|
2020-08-05 06:08:03 +02:00
|
|
|
return;
|
2019-02-13 09:20:33 +01:00
|
|
|
}
|
|
|
|
debug!(target: "sync", "{} -> Dispatching packet: {}", peer, packet_id);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2019-02-13 09:20:33 +01:00
|
|
|
match id {
|
|
|
|
ConsensusDataPacket => SyncHandler::on_consensus_packet(io, peer, &rlp),
|
|
|
|
TransactionsPacket => {
|
|
|
|
let res = {
|
|
|
|
let sync_ro = sync.read();
|
|
|
|
SyncHandler::on_peer_transactions(&*sync_ro, io, peer, &rlp)
|
|
|
|
};
|
|
|
|
if res.is_err() {
|
|
|
|
// peer sent invalid data, disconnect.
|
|
|
|
io.disable_peer(peer);
|
|
|
|
sync.write().deactivate_peer(io, peer);
|
2018-11-28 11:30:05 +01:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
_ => {
|
2019-02-13 09:20:33 +01:00
|
|
|
sync.write().on_packet(io, peer, packet_id, data);
|
2018-11-28 11:30:05 +01:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2019-02-13 09:20:33 +01:00
|
|
|
Ok(())
|
2018-11-28 11:30:05 +01:00
|
|
|
}
|
2019-02-13 09:20:33 +01:00
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2020-09-05 19:45:31 +02:00
|
|
|
match result {
|
|
|
|
Err(PacketProcessError::Decoder(e)) => {
|
|
|
|
debug!(target:"sync", "{} -> Malformed packet {} : {}", peer, packet_id, e)
|
|
|
|
}
|
|
|
|
Err(PacketProcessError::ClientBusy) => {
|
|
|
|
sync.write().add_delayed_request(peer, packet_id, data)
|
|
|
|
}
|
|
|
|
Ok(()) => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Dispatch delayed request
|
|
|
|
/// The main difference with dispatch packet is the direct send of the responses to the peer
|
|
|
|
pub fn dispatch_delayed_request(
|
|
|
|
sync: &RwLock<ChainSync>,
|
|
|
|
io: &mut dyn SyncIo,
|
|
|
|
peer: PeerId,
|
|
|
|
packet_id: u8,
|
|
|
|
data: &[u8],
|
|
|
|
) {
|
|
|
|
let rlp = Rlp::new(data);
|
|
|
|
|
|
|
|
if let Some(id) = SyncPacket::from_u8(packet_id) {
|
|
|
|
let result = match id {
|
|
|
|
GetBlockHeadersPacket => SyncSupplier::send_rlp(
|
|
|
|
io,
|
|
|
|
&rlp,
|
|
|
|
peer,
|
|
|
|
SyncSupplier::return_block_headers,
|
|
|
|
|e| format!("Error sending block headers: {:?}", e),
|
|
|
|
),
|
|
|
|
|
|
|
|
_ => {
|
|
|
|
debug!(target:"sync", "Unexpected packet {} was dispatched for delayed processing", packet_id);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
match result {
|
|
|
|
Err(PacketProcessError::Decoder(e)) => {
|
|
|
|
debug!(target:"sync", "{} -> Malformed packet {} : {}", peer, packet_id, e)
|
|
|
|
}
|
|
|
|
Err(PacketProcessError::ClientBusy) => {
|
|
|
|
sync.write().add_delayed_request(peer, packet_id, data)
|
|
|
|
}
|
|
|
|
Ok(()) => {}
|
|
|
|
}
|
2019-02-13 09:20:33 +01:00
|
|
|
}
|
2018-05-09 12:05:34 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
/// Respond to GetBlockHeaders request
|
2020-07-29 10:36:15 +02:00
|
|
|
fn return_block_headers(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
|
2020-09-05 19:45:31 +02:00
|
|
|
// Cannot return blocks, if forks processing is in progress,
|
|
|
|
// The request should be postponed for later processing
|
|
|
|
if io.chain().is_processing_fork() {
|
|
|
|
return Err(PacketProcessError::ClientBusy);
|
|
|
|
}
|
2018-05-09 12:05:34 +02:00
|
|
|
// Packet layout:
|
|
|
|
// [ block: { P , B_32 }, maxHeaders: P, skip: P, reverse: P in { 0 , 1 } ]
|
|
|
|
let max_headers: usize = r.val_at(1)?;
|
|
|
|
let skip: usize = r.val_at(2)?;
|
|
|
|
let reverse: bool = r.val_at(3)?;
|
|
|
|
let last = io.chain().chain_info().best_block_number;
|
|
|
|
let number = if r.at(0)?.size() == 32 {
|
|
|
|
// id is a hash
|
|
|
|
let hash: H256 = r.val_at(0)?;
|
|
|
|
trace!(target: "sync", "{} -> GetBlockHeaders (hash: {}, max: {}, skip: {}, reverse:{})", peer_id, hash, max_headers, skip, reverse);
|
|
|
|
match io.chain().block_header(BlockId::Hash(hash)) {
|
|
|
|
Some(hdr) => {
|
|
|
|
let number = hdr.number().into();
|
|
|
|
debug_assert_eq!(hdr.hash(), hash);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
if max_headers == 1
|
|
|
|
|| io.chain().block_hash(BlockId::Number(number)) != Some(hash)
|
|
|
|
{
|
|
|
|
// Non canonical header or single header requested
|
|
|
|
// TODO: handle single-step reverse hashchains of non-canon hashes
|
|
|
|
trace!(target:"sync", "Returning single header: {:?}", hash);
|
|
|
|
let mut rlp = RlpStream::new_list(1);
|
|
|
|
rlp.append_raw(&hdr.into_inner(), 1);
|
2020-09-05 19:45:31 +02:00
|
|
|
return Ok(Some((BlockHeadersPacket, rlp)));
|
2018-05-09 12:05:34 +02:00
|
|
|
}
|
|
|
|
number
|
|
|
|
}
|
2020-09-05 19:45:31 +02:00
|
|
|
None => return Ok(Some((BlockHeadersPacket, RlpStream::new_list(0)))), //no such header, return nothing
|
2018-05-09 12:05:34 +02:00
|
|
|
}
|
|
|
|
} else {
|
2018-05-16 22:01:55 +02:00
|
|
|
let number = r.val_at::<BlockNumber>(0)?;
|
|
|
|
trace!(target: "sync", "{} -> GetBlockHeaders (number: {}, max: {}, skip: {}, reverse:{})", peer_id, number, max_headers, skip, reverse);
|
|
|
|
number
|
2018-05-09 12:05:34 +02:00
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let mut number = if reverse {
|
|
|
|
cmp::min(last, number)
|
|
|
|
} else {
|
|
|
|
cmp::max(0, number)
|
|
|
|
};
|
|
|
|
let max_count = cmp::min(MAX_HEADERS_TO_SEND, max_headers);
|
|
|
|
let mut count = 0;
|
|
|
|
let mut data = Bytes::new();
|
2018-06-15 17:30:34 +02:00
|
|
|
let inc = skip.saturating_add(1) as BlockNumber;
|
2018-05-09 12:05:34 +02:00
|
|
|
let overlay = io.chain_overlay().read();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-16 22:01:55 +02:00
|
|
|
// We are checking the `overlay` as well since it's where the ForkBlock
|
|
|
|
// header is cached : so peers can confirm we are on the right fork,
|
|
|
|
// even if we are not synced until the fork block
|
|
|
|
while (number <= last || overlay.contains_key(&number)) && count < max_count {
|
2018-05-09 12:05:34 +02:00
|
|
|
if let Some(hdr) = overlay.get(&number) {
|
|
|
|
trace!(target: "sync", "{}: Returning cached fork header", peer_id);
|
|
|
|
data.extend_from_slice(hdr);
|
|
|
|
count += 1;
|
|
|
|
} else if let Some(hdr) = io.chain().block_header(BlockId::Number(number)) {
|
|
|
|
data.append(&mut hdr.into_inner());
|
|
|
|
count += 1;
|
2019-01-04 19:58:21 +01:00
|
|
|
// Check that the packet won't be oversized
|
2020-12-17 12:57:34 +01:00
|
|
|
if data.len() > PAYLOAD_SOFT_LIMIT {
|
2019-01-04 19:58:21 +01:00
|
|
|
break;
|
|
|
|
}
|
2018-05-09 12:05:34 +02:00
|
|
|
} else {
|
|
|
|
// No required block.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if reverse {
|
|
|
|
if number <= inc || number == 0 {
|
|
|
|
break;
|
|
|
|
}
|
2018-06-15 17:30:34 +02:00
|
|
|
number = number.saturating_sub(inc);
|
2018-05-16 22:01:55 +02:00
|
|
|
} else {
|
2018-06-15 17:30:34 +02:00
|
|
|
number = number.saturating_add(inc);
|
2018-05-09 12:05:34 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2018-05-09 12:05:34 +02:00
|
|
|
let mut rlp = RlpStream::new_list(count as usize);
|
|
|
|
rlp.append_raw(&data, count as usize);
|
|
|
|
trace!(target: "sync", "{} -> GetBlockHeaders: returned {} entries", peer_id, count);
|
2020-09-05 19:45:31 +02:00
|
|
|
Ok(Some((BlockHeadersPacket, rlp)))
|
2018-05-09 12:05:34 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
/// Respond to GetBlockBodies request
|
2020-07-29 10:36:15 +02:00
|
|
|
fn return_block_bodies(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
|
2018-05-09 12:05:34 +02:00
|
|
|
let mut count = r.item_count().unwrap_or(0);
|
|
|
|
if count == 0 {
|
|
|
|
debug!(target: "sync", "Empty GetBlockBodies request, ignoring.");
|
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
count = cmp::min(count, MAX_BODIES_TO_SEND);
|
|
|
|
let mut added = 0usize;
|
|
|
|
let mut data = Bytes::new();
|
|
|
|
for i in 0..count {
|
|
|
|
if let Some(body) = io.chain().block_body(BlockId::Hash(r.val_at::<H256>(i)?)) {
|
|
|
|
data.append(&mut body.into_inner());
|
|
|
|
added += 1;
|
2019-01-04 19:58:21 +01:00
|
|
|
// Check that the packet won't be oversized
|
2020-12-17 12:57:34 +01:00
|
|
|
if data.len() > PAYLOAD_SOFT_LIMIT {
|
2019-01-04 19:58:21 +01:00
|
|
|
break;
|
2018-05-09 12:05:34 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
}
|
2018-05-09 12:05:34 +02:00
|
|
|
let mut rlp = RlpStream::new_list(added);
|
|
|
|
rlp.append_raw(&data, added);
|
|
|
|
trace!(target: "sync", "{} -> GetBlockBodies: returned {} entries", peer_id, added);
|
2020-09-05 19:45:31 +02:00
|
|
|
Ok(Some((BlockBodiesPacket, rlp)))
|
2018-05-09 12:05:34 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2020-07-29 10:36:15 +02:00
|
|
|
fn return_receipts(io: &dyn SyncIo, rlp: &Rlp, peer_id: PeerId) -> RlpResponseResult {
|
2018-05-09 12:05:34 +02:00
|
|
|
let mut count = rlp.item_count().unwrap_or(0);
|
|
|
|
trace!(target: "sync", "{} -> GetReceipts: {} entries", peer_id, count);
|
|
|
|
if count == 0 {
|
|
|
|
debug!(target: "sync", "Empty GetReceipts request, ignoring.");
|
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
count = cmp::min(count, MAX_RECEIPTS_HEADERS_TO_SEND);
|
|
|
|
let mut added_headers = 0usize;
|
|
|
|
let mut data = Bytes::new();
|
2019-01-04 19:58:21 +01:00
|
|
|
let mut total_bytes = 0;
|
2018-05-09 12:05:34 +02:00
|
|
|
for i in 0..count {
|
2018-11-18 00:06:34 +01:00
|
|
|
if let Some(receipts) = io.chain().block_receipts(&rlp.val_at::<H256>(i)?) {
|
|
|
|
let mut receipts_bytes = ::rlp::encode(&receipts);
|
2019-01-04 19:58:21 +01:00
|
|
|
total_bytes += receipts_bytes.len();
|
2020-12-17 12:57:34 +01:00
|
|
|
if total_bytes > PAYLOAD_SOFT_LIMIT {
|
2019-01-04 19:58:21 +01:00
|
|
|
break;
|
|
|
|
}
|
2018-05-09 12:05:34 +02:00
|
|
|
data.append(&mut receipts_bytes);
|
|
|
|
added_headers += 1;
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2018-05-09 12:05:34 +02:00
|
|
|
}
|
|
|
|
let mut rlp_result = RlpStream::new_list(added_headers);
|
|
|
|
rlp_result.append_raw(&data, added_headers);
|
2020-09-05 19:45:31 +02:00
|
|
|
Ok(Some((ReceiptsPacket, rlp_result)))
|
2018-05-09 12:05:34 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
/// Respond to GetSnapshotManifest request
|
2020-07-29 10:36:15 +02:00
|
|
|
fn return_snapshot_manifest(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
|
2018-05-09 12:05:34 +02:00
|
|
|
let count = r.item_count().unwrap_or(0);
|
2018-05-16 22:01:55 +02:00
|
|
|
trace!(target: "warp", "{} -> GetSnapshotManifest", peer_id);
|
2018-05-09 12:05:34 +02:00
|
|
|
if count != 0 {
|
2018-05-16 22:01:55 +02:00
|
|
|
debug!(target: "warp", "Invalid GetSnapshotManifest request, ignoring.");
|
2018-05-09 12:05:34 +02:00
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
let rlp = match io.snapshot_service().manifest() {
|
|
|
|
Some(manifest) => {
|
2018-05-16 22:01:55 +02:00
|
|
|
trace!(target: "warp", "{} <- SnapshotManifest", peer_id);
|
2018-05-09 12:05:34 +02:00
|
|
|
let mut rlp = RlpStream::new_list(1);
|
|
|
|
rlp.append_raw(&manifest.into_rlp(), 1);
|
|
|
|
rlp
|
|
|
|
}
|
|
|
|
None => {
|
2018-05-16 22:01:55 +02:00
|
|
|
trace!(target: "warp", "{}: No snapshot manifest to return", peer_id);
|
2018-05-09 12:05:34 +02:00
|
|
|
RlpStream::new_list(0)
|
|
|
|
}
|
|
|
|
};
|
2020-09-05 19:45:31 +02:00
|
|
|
Ok(Some((SnapshotManifestPacket, rlp)))
|
2018-05-09 12:05:34 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
/// Respond to GetSnapshotData request
|
2020-07-29 10:36:15 +02:00
|
|
|
fn return_snapshot_data(io: &dyn SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult {
|
2018-05-09 12:05:34 +02:00
|
|
|
let hash: H256 = r.val_at(0)?;
|
2018-05-16 22:01:55 +02:00
|
|
|
trace!(target: "warp", "{} -> GetSnapshotData {:?}", peer_id, hash);
|
2018-05-09 12:05:34 +02:00
|
|
|
let rlp = match io.snapshot_service().chunk(hash) {
|
|
|
|
Some(data) => {
|
|
|
|
let mut rlp = RlpStream::new_list(1);
|
2018-05-16 22:01:55 +02:00
|
|
|
trace!(target: "warp", "{} <- SnapshotData", peer_id);
|
2018-05-09 12:05:34 +02:00
|
|
|
rlp.append(&data);
|
|
|
|
rlp
|
|
|
|
}
|
|
|
|
None => {
|
2018-05-16 22:01:55 +02:00
|
|
|
trace!(target: "warp", "{}: No snapshot data to return", peer_id);
|
2018-05-09 12:05:34 +02:00
|
|
|
RlpStream::new_list(0)
|
|
|
|
}
|
|
|
|
};
|
2020-09-05 19:45:31 +02:00
|
|
|
Ok(Some((SnapshotDataPacket, rlp)))
|
2018-05-09 12:05:34 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
fn return_rlp<FRlp, FError>(
|
2020-07-29 10:36:15 +02:00
|
|
|
io: &mut dyn SyncIo,
|
2018-05-09 12:05:34 +02:00
|
|
|
rlp: &Rlp,
|
|
|
|
peer: PeerId,
|
|
|
|
rlp_func: FRlp,
|
|
|
|
error_func: FError,
|
2020-09-05 19:45:31 +02:00
|
|
|
) -> Result<(), PacketProcessError>
|
|
|
|
where
|
|
|
|
FRlp: Fn(&dyn SyncIo, &Rlp, PeerId) -> RlpResponseResult,
|
|
|
|
FError: FnOnce(network::Error) -> String,
|
|
|
|
{
|
|
|
|
let response = rlp_func(io, rlp, peer);
|
|
|
|
if let Some((packet_id, rlp_stream)) = response? {
|
|
|
|
io.respond(packet_id.id(), rlp_stream.out())
|
|
|
|
.unwrap_or_else(|e| debug!(target: "sync", "{:?}", error_func(e)));
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn send_rlp<FRlp, FError>(
|
|
|
|
io: &mut dyn SyncIo,
|
|
|
|
rlp: &Rlp,
|
|
|
|
peer: PeerId,
|
|
|
|
rlp_func: FRlp,
|
|
|
|
error_func: FError,
|
|
|
|
) -> Result<(), PacketProcessError>
|
2018-05-09 12:05:34 +02:00
|
|
|
where
|
2020-07-29 10:36:15 +02:00
|
|
|
FRlp: Fn(&dyn SyncIo, &Rlp, PeerId) -> RlpResponseResult,
|
2018-05-09 12:05:34 +02:00
|
|
|
FError: FnOnce(network::Error) -> String,
|
|
|
|
{
|
|
|
|
let response = rlp_func(io, rlp, peer);
|
|
|
|
match response {
|
|
|
|
Err(e) => Err(e),
|
|
|
|
Ok(Some((packet_id, rlp_stream))) => {
|
2020-09-05 19:45:31 +02:00
|
|
|
io.send(peer, packet_id, rlp_stream.out())
|
2018-05-09 12:05:34 +02:00
|
|
|
.unwrap_or_else(|e| debug!(target: "sync", "{:?}", error_func(e)));
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
_ => Ok(()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use super::{super::tests::*, *};
|
2018-08-24 11:53:31 +02:00
|
|
|
use blocks::SyncHeader;
|
2018-05-09 12:05:34 +02:00
|
|
|
use bytes::Bytes;
|
|
|
|
use ethcore::client::{BlockChainClient, EachBlockWith, TestBlockChainClient};
|
|
|
|
use ethereum_types::H256;
|
|
|
|
use parking_lot::RwLock;
|
|
|
|
use rlp::{Rlp, RlpStream};
|
|
|
|
use std::collections::VecDeque;
|
|
|
|
use tests::{helpers::TestIo, snapshot::TestSnapshotService};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
#[test]
|
|
|
|
fn return_block_headers() {
|
|
|
|
fn make_hash_req(h: &H256, count: usize, skip: usize, reverse: bool) -> Bytes {
|
|
|
|
let mut rlp = RlpStream::new_list(4);
|
|
|
|
rlp.append(h);
|
|
|
|
rlp.append(&count);
|
|
|
|
rlp.append(&skip);
|
|
|
|
rlp.append(&if reverse { 1u32 } else { 0u32 });
|
|
|
|
rlp.out()
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
fn make_num_req(n: usize, count: usize, skip: usize, reverse: bool) -> Bytes {
|
|
|
|
let mut rlp = RlpStream::new_list(4);
|
|
|
|
rlp.append(&n);
|
|
|
|
rlp.append(&count);
|
|
|
|
rlp.append(&skip);
|
|
|
|
rlp.append(&if reverse { 1u32 } else { 0u32 });
|
2020-08-05 06:08:03 +02:00
|
|
|
rlp.out()
|
|
|
|
}
|
2018-08-24 11:53:31 +02:00
|
|
|
fn to_header_vec(rlp: ::chain::RlpResponseResult) -> Vec<SyncHeader> {
|
2018-05-09 12:05:34 +02:00
|
|
|
Rlp::new(&rlp.unwrap().unwrap().1.out())
|
2018-08-24 11:53:31 +02:00
|
|
|
.iter()
|
|
|
|
.map(|r| SyncHeader::from_rlp(r.as_raw().to_vec()).unwrap())
|
|
|
|
.collect()
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2018-08-24 11:53:31 +02:00
|
|
|
let mut client = TestBlockChainClient::new();
|
2018-05-09 12:05:34 +02:00
|
|
|
client.add_blocks(100, EachBlockWith::Nothing);
|
|
|
|
let blocks: Vec<_> = (0..100)
|
2020-08-05 06:08:03 +02:00
|
|
|
.map(|i| {
|
2020-07-29 10:36:15 +02:00
|
|
|
(&client as &dyn BlockChainClient)
|
2018-05-09 12:05:34 +02:00
|
|
|
.block(BlockId::Number(i as BlockNumber))
|
|
|
|
.map(|b| b.into_inner())
|
|
|
|
.unwrap()
|
2020-08-05 06:08:03 +02:00
|
|
|
})
|
2018-05-09 12:05:34 +02:00
|
|
|
.collect();
|
2018-08-24 11:53:31 +02:00
|
|
|
let headers: Vec<_> = blocks
|
|
|
|
.iter()
|
|
|
|
.map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap())
|
|
|
|
.collect();
|
|
|
|
let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let queue = RwLock::new(VecDeque::new());
|
|
|
|
let ss = TestSnapshotService::new();
|
|
|
|
let io = TestIo::new(&mut client, &ss, &queue, None);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let unknown: H256 = H256::new();
|
|
|
|
let result = SyncSupplier::return_block_headers(
|
|
|
|
&io,
|
|
|
|
&Rlp::new(&make_hash_req(&unknown, 1, 0, false)),
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
assert!(to_header_vec(result).is_empty());
|
|
|
|
let result = SyncSupplier::return_block_headers(
|
|
|
|
&io,
|
|
|
|
&Rlp::new(&make_hash_req(&unknown, 1, 0, true)),
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
assert!(to_header_vec(result).is_empty());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let result = SyncSupplier::return_block_headers(
|
|
|
|
&io,
|
|
|
|
&Rlp::new(&make_hash_req(&hashes[2], 1, 0, true)),
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
assert_eq!(to_header_vec(result), vec![headers[2].clone()]);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let result = SyncSupplier::return_block_headers(
|
|
|
|
&io,
|
|
|
|
&Rlp::new(&make_hash_req(&hashes[2], 1, 0, false)),
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
assert_eq!(to_header_vec(result), vec![headers[2].clone()]);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let result = SyncSupplier::return_block_headers(
|
|
|
|
&io,
|
|
|
|
&Rlp::new(&make_hash_req(&hashes[50], 3, 5, false)),
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
to_header_vec(result),
|
|
|
|
vec![
|
|
|
|
headers[50].clone(),
|
|
|
|
headers[56].clone(),
|
|
|
|
headers[62].clone()
|
2020-08-05 06:08:03 +02:00
|
|
|
]
|
2018-05-09 12:05:34 +02:00
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let result = SyncSupplier::return_block_headers(
|
|
|
|
&io,
|
|
|
|
&Rlp::new(&make_hash_req(&hashes[50], 3, 5, true)),
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
to_header_vec(result),
|
|
|
|
vec![
|
|
|
|
headers[50].clone(),
|
|
|
|
headers[44].clone(),
|
|
|
|
headers[38].clone()
|
2020-08-05 06:08:03 +02:00
|
|
|
]
|
2018-05-09 12:05:34 +02:00
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let result =
|
|
|
|
SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(2, 1, 0, true)), 0);
|
|
|
|
assert_eq!(to_header_vec(result), vec![headers[2].clone()]);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let result =
|
|
|
|
SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(2, 1, 0, false)), 0);
|
|
|
|
assert_eq!(to_header_vec(result), vec![headers[2].clone()]);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let result =
|
|
|
|
SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(50, 3, 5, false)), 0);
|
|
|
|
assert_eq!(
|
|
|
|
to_header_vec(result),
|
|
|
|
vec![
|
|
|
|
headers[50].clone(),
|
|
|
|
headers[56].clone(),
|
|
|
|
headers[62].clone()
|
2019-01-04 19:58:21 +01:00
|
|
|
]
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2019-01-04 19:58:21 +01:00
|
|
|
let result =
|
|
|
|
SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(50, 3, 5, true)), 0);
|
2018-05-09 12:05:34 +02:00
|
|
|
assert_eq!(
|
|
|
|
to_header_vec(result),
|
2020-08-05 06:08:03 +02:00
|
|
|
vec![
|
2018-05-09 12:05:34 +02:00
|
|
|
headers[50].clone(),
|
|
|
|
headers[44].clone(),
|
|
|
|
headers[38].clone()
|
2020-08-05 06:08:03 +02:00
|
|
|
]
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-01-04 19:58:21 +01:00
|
|
|
#[test]
|
|
|
|
fn respect_packet_limit() {
|
|
|
|
let small_num_blocks = 10;
|
|
|
|
let large_num_blocks = 50;
|
|
|
|
let tx_per_block = 100;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2019-01-04 19:58:21 +01:00
|
|
|
let mut client = TestBlockChainClient::new();
|
|
|
|
client.add_blocks(large_num_blocks, EachBlockWith::Transactions(tx_per_block));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2019-01-04 19:58:21 +01:00
|
|
|
let mut small_rlp_request = RlpStream::new_list(small_num_blocks);
|
|
|
|
let mut large_rlp_request = RlpStream::new_list(large_num_blocks);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2019-01-04 19:58:21 +01:00
|
|
|
for i in 0..small_num_blocks {
|
|
|
|
let hash: H256 = client.block_hash(BlockId::Number(i as u64)).unwrap();
|
|
|
|
small_rlp_request.append(&hash);
|
|
|
|
large_rlp_request.append(&hash);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2019-01-04 19:58:21 +01:00
|
|
|
for i in small_num_blocks..large_num_blocks {
|
|
|
|
let hash: H256 = client.block_hash(BlockId::Number(i as u64)).unwrap();
|
|
|
|
large_rlp_request.append(&hash);
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2019-01-04 19:58:21 +01:00
|
|
|
let queue = RwLock::new(VecDeque::new());
|
|
|
|
let ss = TestSnapshotService::new();
|
|
|
|
let io = TestIo::new(&mut client, &ss, &queue, None);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2019-01-04 19:58:21 +01:00
|
|
|
let small_result =
|
|
|
|
SyncSupplier::return_block_bodies(&io, &Rlp::new(&small_rlp_request.out()), 0);
|
|
|
|
let small_result = small_result.unwrap().unwrap().1;
|
|
|
|
assert_eq!(
|
|
|
|
Rlp::new(&small_result.out()).item_count().unwrap(),
|
|
|
|
small_num_blocks
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2019-01-04 19:58:21 +01:00
|
|
|
let large_result =
|
|
|
|
SyncSupplier::return_block_bodies(&io, &Rlp::new(&large_rlp_request.out()), 0);
|
|
|
|
let large_result = large_result.unwrap().unwrap().1;
|
|
|
|
assert!(Rlp::new(&large_result.out()).item_count().unwrap() < large_num_blocks);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
#[test]
|
|
|
|
fn return_receipts_empty() {
|
|
|
|
let mut client = TestBlockChainClient::new();
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
|
|
|
let ss = TestSnapshotService::new();
|
|
|
|
let io = TestIo::new(&mut client, &ss, &queue, None);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let result = SyncSupplier::return_receipts(&io, &Rlp::new(&[0xc0]), 0);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
assert!(result.is_ok());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
#[test]
|
|
|
|
fn return_receipts() {
|
|
|
|
let mut client = TestBlockChainClient::new();
|
|
|
|
let queue = RwLock::new(VecDeque::new());
|
|
|
|
let sync = dummy_sync_with_peer(H256::new(), &client);
|
|
|
|
let ss = TestSnapshotService::new();
|
|
|
|
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let mut receipt_list = RlpStream::new_list(4);
|
|
|
|
receipt_list.append(&H256::from(
|
|
|
|
"0000000000000000000000000000000000000000000000005555555555555555",
|
|
|
|
));
|
|
|
|
receipt_list.append(&H256::from(
|
|
|
|
"ff00000000000000000000000000000000000000000000000000000000000000",
|
|
|
|
));
|
|
|
|
receipt_list.append(&H256::from(
|
|
|
|
"fff0000000000000000000000000000000000000000000000000000000000000",
|
|
|
|
));
|
|
|
|
receipt_list.append(&H256::from(
|
|
|
|
"aff0000000000000000000000000000000000000000000000000000000000000",
|
2020-08-05 06:08:03 +02:00
|
|
|
));
|
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
let receipts_request = receipt_list.out();
|
|
|
|
// it returns rlp ONLY for hashes started with "f"
|
|
|
|
let result = SyncSupplier::return_receipts(&io, &Rlp::new(&receipts_request.clone()), 0);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
assert!(result.is_ok());
|
|
|
|
let rlp_result = result.unwrap();
|
|
|
|
assert!(rlp_result.is_some());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
// the length of two rlp-encoded receipts
|
|
|
|
assert_eq!(603, rlp_result.unwrap().1.out().len());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-05-09 12:05:34 +02:00
|
|
|
io.sender = Some(2usize);
|
2019-02-13 09:20:33 +01:00
|
|
|
SyncSupplier::dispatch_packet(
|
|
|
|
&RwLock::new(sync),
|
|
|
|
&mut io,
|
|
|
|
0usize,
|
|
|
|
GetReceiptsPacket.id(),
|
|
|
|
&receipts_request,
|
|
|
|
);
|
2018-05-09 12:05:34 +02:00
|
|
|
assert_eq!(1, io.packets.len());
|
|
|
|
}
|
|
|
|
}
|