2019-01-07 11:33:07 +01:00
|
|
|
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of Parity Ethereum.
|
2017-04-07 19:35:39 +02:00
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
2017-04-07 19:35:39 +02:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
2017-04-07 19:35:39 +02:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2019-01-07 11:33:07 +01:00
|
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2017-04-07 19:35:39 +02:00
|
|
|
|
|
|
|
//! Tests for the on-demand service.
|
|
|
|
|
|
|
|
use cache::Cache;
|
2019-01-04 14:05:46 +01:00
|
|
|
use common_types::header::Header;
|
2018-01-10 13:35:18 +01:00
|
|
|
use ethereum_types::H256;
|
2017-04-07 19:35:39 +02:00
|
|
|
use futures::Future;
|
|
|
|
use net::*;
|
|
|
|
use network::{NodeId, PeerId};
|
2017-09-02 20:09:13 +02:00
|
|
|
use parking_lot::Mutex;
|
2019-01-04 14:05:46 +01:00
|
|
|
use request::{self as basic_request, Response};
|
2017-04-07 19:35:39 +02:00
|
|
|
|
|
|
|
use std::{
|
|
|
|
sync::Arc,
|
2020-08-05 06:08:03 +02:00
|
|
|
thread,
|
2018-12-05 10:36:53 +01:00
|
|
|
time::{Duration, Instant},
|
|
|
|
};
|
2017-04-07 19:35:39 +02:00
|
|
|
|
2019-03-27 14:46:20 +01:00
|
|
|
use super::{request, HeaderRef, OnDemand, OnDemandRequester, Peer};
|
2017-04-07 19:35:39 +02:00
|
|
|
|
|
|
|
// useful contexts to give the service.
|
|
|
|
enum Context {
|
|
|
|
NoOp,
|
|
|
|
WithPeer(PeerId),
|
|
|
|
RequestFrom(PeerId, ReqId),
|
|
|
|
Punish(PeerId),
|
2018-12-05 10:36:53 +01:00
|
|
|
FaultyRequest,
|
2017-04-07 19:35:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl EventContext for Context {
|
|
|
|
fn peer(&self) -> PeerId {
|
|
|
|
match *self {
|
|
|
|
Context::WithPeer(id) | Context::RequestFrom(id, _) | Context::Punish(id) => id,
|
2018-12-05 10:36:53 +01:00
|
|
|
Context::FaultyRequest => 0,
|
2017-04-07 19:35:39 +02:00
|
|
|
_ => panic!("didn't expect to have peer queried."),
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2017-04-07 19:35:39 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
fn as_basic(&self) -> &BasicContext {
|
|
|
|
self
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl BasicContext for Context {
|
|
|
|
/// Returns the relevant's peer persistent Id (aka NodeId).
|
|
|
|
fn persistent_peer_id(&self, _: PeerId) -> Option<NodeId> {
|
|
|
|
panic!("didn't expect to provide persistent ID")
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
fn request_from(&self, peer_id: PeerId, _: ::request::NetworkRequests) -> Result<ReqId, Error> {
|
|
|
|
match *self {
|
|
|
|
Context::RequestFrom(id, req_id) => {
|
|
|
|
if peer_id == id {
|
|
|
|
Ok(req_id)
|
|
|
|
} else {
|
|
|
|
Err(Error::NoCredits)
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2018-12-05 10:36:53 +01:00
|
|
|
Context::FaultyRequest => Err(Error::NoCredits),
|
2017-04-07 19:35:39 +02:00
|
|
|
_ => panic!("didn't expect to have requests dispatched."),
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
fn make_announcement(&self, _: Announcement) {
|
|
|
|
panic!("didn't expect to make announcement")
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
fn disconnect_peer(&self, id: PeerId) {
|
|
|
|
self.disable_peer(id)
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
fn disable_peer(&self, peer_id: PeerId) {
|
|
|
|
match *self {
|
|
|
|
Context::Punish(id) if id == peer_id => {}
|
|
|
|
_ => panic!("Unexpectedly punished peer."),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// test harness.
|
|
|
|
struct Harness {
|
|
|
|
service: OnDemand,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Harness {
|
|
|
|
fn create() -> Self {
|
2018-03-14 12:29:52 +01:00
|
|
|
let cache = Arc::new(Mutex::new(Cache::new(
|
|
|
|
Default::default(),
|
|
|
|
Duration::from_secs(60),
|
|
|
|
)));
|
2017-04-07 19:35:39 +02:00
|
|
|
Harness {
|
2018-12-05 10:36:53 +01:00
|
|
|
service: OnDemand::new_test(
|
|
|
|
cache,
|
|
|
|
// Response `time_to_live`
|
|
|
|
Duration::from_secs(5),
|
|
|
|
// Request start backoff
|
|
|
|
Duration::from_secs(1),
|
|
|
|
// Request max backoff
|
|
|
|
Duration::from_secs(20),
|
|
|
|
super::DEFAULT_MAX_REQUEST_BACKOFF_ROUNDS,
|
|
|
|
super::DEFAULT_NUM_CONSECUTIVE_FAILED_REQUESTS,
|
|
|
|
),
|
2017-04-07 19:35:39 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
fn inject_peer(&self, id: PeerId, peer: Peer) {
|
|
|
|
self.service.peers.write().insert(id, peer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn dummy_status() -> Status {
|
|
|
|
Status {
|
|
|
|
protocol_version: 1,
|
|
|
|
network_id: 999,
|
|
|
|
head_td: 1.into(),
|
|
|
|
head_hash: H256::default(),
|
|
|
|
head_num: 1359,
|
|
|
|
genesis_hash: H256::default(),
|
|
|
|
last_head: None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn dummy_capabilities() -> Capabilities {
|
|
|
|
Capabilities {
|
|
|
|
serve_headers: true,
|
|
|
|
serve_chain_since: Some(1),
|
|
|
|
serve_state_since: Some(1),
|
|
|
|
tx_relay: true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn detects_hangup() {
|
|
|
|
let on_demand = Harness::create().service;
|
2017-05-23 12:39:25 +02:00
|
|
|
let result = on_demand.request_raw(
|
|
|
|
&Context::NoOp,
|
|
|
|
vec![request::HeaderByHash(H256::default().into()).into()],
|
|
|
|
);
|
2017-04-07 19:35:39 +02:00
|
|
|
|
|
|
|
assert_eq!(on_demand.pending.read().len(), 1);
|
|
|
|
drop(result);
|
|
|
|
|
|
|
|
on_demand.dispatch_pending(&Context::NoOp);
|
|
|
|
assert!(on_demand.pending.read().is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn single_request() {
|
|
|
|
let harness = Harness::create();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let peer_id = 10101;
|
|
|
|
let req_id = ReqId(14426);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness.inject_peer(
|
|
|
|
peer_id,
|
|
|
|
Peer {
|
|
|
|
status: dummy_status(),
|
|
|
|
capabilities: dummy_capabilities(),
|
|
|
|
},
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let header = Header::default();
|
2018-04-03 10:01:28 +02:00
|
|
|
let encoded = header.encoded();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let recv = harness
|
|
|
|
.service
|
|
|
|
.request_raw(
|
|
|
|
&Context::NoOp,
|
2017-05-23 12:39:25 +02:00
|
|
|
vec![request::HeaderByHash(header.hash().into()).into()],
|
2017-04-07 19:35:39 +02:00
|
|
|
)
|
|
|
|
.unwrap();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 0);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness.service.on_responses(
|
|
|
|
&Context::WithPeer(peer_id),
|
|
|
|
req_id,
|
|
|
|
&[Response::Headers(basic_request::HeadersResponse {
|
|
|
|
headers: vec![encoded],
|
|
|
|
})],
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert!(recv.wait().is_ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn no_capabilities() {
|
|
|
|
let harness = Harness::create();
|
|
|
|
|
|
|
|
let peer_id = 10101;
|
|
|
|
|
|
|
|
let mut capabilities = dummy_capabilities();
|
|
|
|
capabilities.serve_headers = false;
|
|
|
|
|
|
|
|
harness.inject_peer(
|
|
|
|
peer_id,
|
|
|
|
Peer {
|
|
|
|
status: dummy_status(),
|
|
|
|
capabilities: capabilities,
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
let _recv = harness
|
|
|
|
.service
|
|
|
|
.request_raw(
|
|
|
|
&Context::NoOp,
|
2017-05-23 12:39:25 +02:00
|
|
|
vec![request::HeaderByHash(H256::default().into()).into()],
|
2017-04-07 19:35:39 +02:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
|
|
|
|
|
|
|
harness.service.dispatch_pending(&Context::NoOp);
|
|
|
|
|
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn reassign() {
|
|
|
|
let harness = Harness::create();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let peer_ids = (10101, 12345);
|
|
|
|
let req_ids = (ReqId(14426), ReqId(555));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness.inject_peer(
|
|
|
|
peer_ids.0,
|
|
|
|
Peer {
|
|
|
|
status: dummy_status(),
|
|
|
|
capabilities: dummy_capabilities(),
|
|
|
|
},
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let header = Header::default();
|
2018-04-03 10:01:28 +02:00
|
|
|
let encoded = header.encoded();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let recv = harness
|
|
|
|
.service
|
|
|
|
.request_raw(
|
|
|
|
&Context::NoOp,
|
2017-05-23 12:39:25 +02:00
|
|
|
vec![request::HeaderByHash(header.hash().into()).into()],
|
2017-04-07 19:35:39 +02:00
|
|
|
)
|
|
|
|
.unwrap();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.dispatch_pending(&Context::RequestFrom(peer_ids.0, req_ids.0));
|
|
|
|
assert_eq!(harness.service.pending.read().len(), 0);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.on_disconnect(&Context::WithPeer(peer_ids.0), &[req_ids.0]);
|
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness.inject_peer(
|
|
|
|
peer_ids.1,
|
|
|
|
Peer {
|
|
|
|
status: dummy_status(),
|
|
|
|
capabilities: dummy_capabilities(),
|
|
|
|
},
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.dispatch_pending(&Context::RequestFrom(peer_ids.1, req_ids.1));
|
|
|
|
assert_eq!(harness.service.pending.read().len(), 0);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness.service.on_responses(
|
|
|
|
&Context::WithPeer(peer_ids.1),
|
|
|
|
req_ids.1,
|
|
|
|
&[Response::Headers(basic_request::HeadersResponse {
|
|
|
|
headers: vec![encoded],
|
|
|
|
})],
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert!(recv.wait().is_ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn partial_response() {
|
|
|
|
let harness = Harness::create();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let peer_id = 111;
|
|
|
|
let req_ids = (ReqId(14426), ReqId(555));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness.inject_peer(
|
|
|
|
peer_id,
|
|
|
|
Peer {
|
|
|
|
status: dummy_status(),
|
|
|
|
capabilities: dummy_capabilities(),
|
|
|
|
},
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let make = |num| {
|
|
|
|
let mut hdr = Header::default();
|
|
|
|
hdr.set_number(num);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-04-03 10:01:28 +02:00
|
|
|
let encoded = hdr.encoded();
|
2017-04-07 19:35:39 +02:00
|
|
|
(hdr, encoded)
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let (header1, encoded1) = make(5);
|
|
|
|
let (header2, encoded2) = make(23452);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
// request two headers.
|
|
|
|
let recv = harness
|
|
|
|
.service
|
|
|
|
.request_raw(
|
|
|
|
&Context::NoOp,
|
|
|
|
vec![
|
2017-05-23 12:39:25 +02:00
|
|
|
request::HeaderByHash(header1.hash().into()).into(),
|
|
|
|
request::HeaderByHash(header2.hash().into()).into(),
|
2017-04-07 19:35:39 +02:00
|
|
|
],
|
|
|
|
)
|
|
|
|
.unwrap();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0));
|
|
|
|
assert_eq!(harness.service.pending.read().len(), 0);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
// supply only the first one.
|
|
|
|
harness.service.on_responses(
|
|
|
|
&Context::WithPeer(peer_id),
|
|
|
|
req_ids.0,
|
|
|
|
&[Response::Headers(basic_request::HeadersResponse {
|
|
|
|
headers: vec![encoded1],
|
|
|
|
})],
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1));
|
|
|
|
assert_eq!(harness.service.pending.read().len(), 0);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
// supply the next one.
|
|
|
|
harness.service.on_responses(
|
|
|
|
&Context::WithPeer(peer_id),
|
|
|
|
req_ids.1,
|
|
|
|
&[Response::Headers(basic_request::HeadersResponse {
|
|
|
|
headers: vec![encoded2],
|
|
|
|
})],
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert!(recv.wait().is_ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn part_bad_part_good() {
|
|
|
|
let harness = Harness::create();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let peer_id = 111;
|
|
|
|
let req_ids = (ReqId(14426), ReqId(555));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness.inject_peer(
|
|
|
|
peer_id,
|
|
|
|
Peer {
|
|
|
|
status: dummy_status(),
|
|
|
|
capabilities: dummy_capabilities(),
|
|
|
|
},
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let make = |num| {
|
|
|
|
let mut hdr = Header::default();
|
|
|
|
hdr.set_number(num);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-04-03 10:01:28 +02:00
|
|
|
let encoded = hdr.encoded();
|
2017-04-07 19:35:39 +02:00
|
|
|
(hdr, encoded)
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let (header1, encoded1) = make(5);
|
|
|
|
let (header2, encoded2) = make(23452);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
// request two headers.
|
|
|
|
let recv = harness
|
|
|
|
.service
|
|
|
|
.request_raw(
|
|
|
|
&Context::NoOp,
|
|
|
|
vec![
|
2017-05-23 12:39:25 +02:00
|
|
|
request::HeaderByHash(header1.hash().into()).into(),
|
|
|
|
request::HeaderByHash(header2.hash().into()).into(),
|
2017-04-07 19:35:39 +02:00
|
|
|
],
|
|
|
|
)
|
|
|
|
.unwrap();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0));
|
|
|
|
assert_eq!(harness.service.pending.read().len(), 0);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
// supply only the first one, but followed by the wrong kind of response.
|
|
|
|
// the first header should be processed.
|
|
|
|
harness.service.on_responses(
|
|
|
|
&Context::Punish(peer_id),
|
|
|
|
req_ids.0,
|
|
|
|
&[
|
|
|
|
Response::Headers(basic_request::HeadersResponse {
|
|
|
|
headers: vec![encoded1],
|
|
|
|
}),
|
|
|
|
Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] }),
|
|
|
|
],
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness.inject_peer(
|
|
|
|
peer_id,
|
|
|
|
Peer {
|
|
|
|
status: dummy_status(),
|
|
|
|
capabilities: dummy_capabilities(),
|
|
|
|
},
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1));
|
|
|
|
assert_eq!(harness.service.pending.read().len(), 0);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
// supply the next one.
|
|
|
|
harness.service.on_responses(
|
|
|
|
&Context::WithPeer(peer_id),
|
|
|
|
req_ids.1,
|
|
|
|
&[Response::Headers(basic_request::HeadersResponse {
|
|
|
|
headers: vec![encoded2],
|
|
|
|
})],
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert!(recv.wait().is_ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn wrong_kind() {
|
|
|
|
let harness = Harness::create();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let peer_id = 10101;
|
|
|
|
let req_id = ReqId(14426);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness.inject_peer(
|
|
|
|
peer_id,
|
|
|
|
Peer {
|
|
|
|
status: dummy_status(),
|
|
|
|
capabilities: dummy_capabilities(),
|
|
|
|
},
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
let _recv = harness
|
|
|
|
.service
|
|
|
|
.request_raw(
|
|
|
|
&Context::NoOp,
|
2017-05-23 12:39:25 +02:00
|
|
|
vec![request::HeaderByHash(H256::default().into()).into()],
|
2017-04-07 19:35:39 +02:00
|
|
|
)
|
|
|
|
.unwrap();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 0);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
harness.service.on_responses(
|
|
|
|
&Context::Punish(peer_id),
|
|
|
|
req_id,
|
|
|
|
&[Response::Receipts(basic_request::ReceiptsResponse {
|
|
|
|
receipts: vec![],
|
|
|
|
})],
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
|
|
|
}
|
2017-05-23 12:39:25 +02:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn back_references() {
|
|
|
|
let harness = Harness::create();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
let peer_id = 10101;
|
|
|
|
let req_id = ReqId(14426);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
harness.inject_peer(
|
|
|
|
peer_id,
|
|
|
|
Peer {
|
|
|
|
status: dummy_status(),
|
|
|
|
capabilities: dummy_capabilities(),
|
|
|
|
},
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
let header = Header::default();
|
2018-04-03 10:01:28 +02:00
|
|
|
let encoded = header.encoded();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
let recv = harness
|
|
|
|
.service
|
|
|
|
.request_raw(
|
|
|
|
&Context::NoOp,
|
|
|
|
vec![
|
|
|
|
request::HeaderByHash(header.hash().into()).into(),
|
|
|
|
request::BlockReceipts(HeaderRef::Unresolved(0, header.hash().into())).into(),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
.unwrap();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 0);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
harness.service.on_responses(
|
|
|
|
&Context::WithPeer(peer_id),
|
|
|
|
req_id,
|
|
|
|
&[
|
|
|
|
Response::Headers(basic_request::HeadersResponse {
|
|
|
|
headers: vec![encoded],
|
|
|
|
}),
|
|
|
|
Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] }),
|
|
|
|
],
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
assert!(recv.wait().is_ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
#[should_panic]
|
|
|
|
fn bad_back_reference() {
|
|
|
|
let harness = Harness::create();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
let header = Header::default();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
let _ = harness
|
|
|
|
.service
|
|
|
|
.request_raw(
|
|
|
|
&Context::NoOp,
|
|
|
|
vec![
|
|
|
|
request::HeaderByHash(header.hash().into()).into(),
|
|
|
|
request::BlockReceipts(HeaderRef::Unresolved(1, header.hash().into())).into(),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn fill_from_cache() {
|
|
|
|
let harness = Harness::create();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
let peer_id = 10101;
|
|
|
|
let req_id = ReqId(14426);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
harness.inject_peer(
|
|
|
|
peer_id,
|
|
|
|
Peer {
|
|
|
|
status: dummy_status(),
|
|
|
|
capabilities: dummy_capabilities(),
|
|
|
|
},
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
let header = Header::default();
|
2018-04-03 10:01:28 +02:00
|
|
|
let encoded = header.encoded();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
let recv = harness
|
|
|
|
.service
|
|
|
|
.request_raw(
|
|
|
|
&Context::NoOp,
|
|
|
|
vec![
|
|
|
|
request::HeaderByHash(header.hash().into()).into(),
|
|
|
|
request::BlockReceipts(HeaderRef::Unresolved(0, header.hash().into())).into(),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
.unwrap();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 0);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
harness.service.on_responses(
|
|
|
|
&Context::WithPeer(peer_id),
|
|
|
|
req_id,
|
|
|
|
&[Response::Headers(basic_request::HeadersResponse {
|
|
|
|
headers: vec![encoded],
|
|
|
|
})],
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-23 12:39:25 +02:00
|
|
|
assert!(recv.wait().is_ok());
|
|
|
|
}
|
2018-12-05 10:36:53 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn request_without_response_should_backoff_and_then_be_dropped() {
|
|
|
|
let harness = Harness::create();
|
|
|
|
let peer_id = 0;
|
|
|
|
let req_id = ReqId(13);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
harness.inject_peer(
|
|
|
|
peer_id,
|
|
|
|
Peer {
|
|
|
|
status: dummy_status(),
|
|
|
|
capabilities: dummy_capabilities(),
|
|
|
|
},
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
let binary_exp_backoff: Vec<u64> = vec![1, 2, 4, 8, 16, 20, 20, 20, 20, 20];
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
let _recv = harness
|
|
|
|
.service
|
|
|
|
.request_raw(
|
|
|
|
&Context::RequestFrom(peer_id, req_id),
|
|
|
|
vec![request::HeaderByHash(Header::default().encoded().hash().into()).into()],
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(harness.service.pending.read().len(), 1);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
for backoff in &binary_exp_backoff {
|
|
|
|
harness.service.dispatch_pending(&Context::FaultyRequest);
|
|
|
|
assert_eq!(
|
|
|
|
harness.service.pending.read().len(),
|
|
|
|
1,
|
|
|
|
"Request should not be dropped"
|
|
|
|
);
|
|
|
|
let now = Instant::now();
|
|
|
|
while now.elapsed() < Duration::from_secs(*backoff) {}
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
harness.service.dispatch_pending(&Context::FaultyRequest);
|
|
|
|
assert_eq!(
|
|
|
|
harness.service.pending.read().len(),
|
|
|
|
0,
|
|
|
|
"Request exceeded the 10 backoff rounds should be dropped"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn empty_responses_exceeds_limit_should_be_dropped() {
|
|
|
|
let harness = Harness::create();
|
|
|
|
let peer_id = 0;
|
|
|
|
let req_id = ReqId(13);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
harness.inject_peer(
|
|
|
|
peer_id,
|
|
|
|
Peer {
|
|
|
|
status: dummy_status(),
|
|
|
|
capabilities: dummy_capabilities(),
|
|
|
|
},
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
let _recv = harness
|
|
|
|
.service
|
|
|
|
.request_raw(
|
|
|
|
&Context::RequestFrom(peer_id, req_id),
|
|
|
|
vec![request::HeaderByHash(Header::default().encoded().hash().into()).into()],
|
|
|
|
)
|
|
|
|
.unwrap();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
assert_eq!(harness.service.pending.read().len(), 0);
|
|
|
|
assert_eq!(harness.service.in_transit.read().len(), 1);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
let now = Instant::now();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
// Send `empty responses` in the current time window
|
|
|
|
// Use only half of the `time_window` because we can't be sure exactly
|
|
|
|
// when the window started and the clock accurancy
|
|
|
|
while now.elapsed() < harness.service.response_time_window / 2 {
|
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.on_responses(&Context::RequestFrom(13, req_id), req_id, &[]);
|
|
|
|
assert!(harness.service.pending.read().len() != 0);
|
|
|
|
let pending = harness.service.pending.write().remove(0);
|
|
|
|
harness.service.in_transit.write().insert(req_id, pending);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
// Make sure we passed the first `time window`
|
|
|
|
thread::sleep(Duration::from_secs(5));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
// Now, response is in failure state but need another response to be `polled`
|
|
|
|
harness
|
|
|
|
.service
|
|
|
|
.on_responses(&Context::RequestFrom(13, req_id), req_id, &[]);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-12-05 10:36:53 +01:00
|
|
|
assert!(harness.service.in_transit.read().is_empty());
|
|
|
|
assert!(harness.service.pending.read().is_empty());
|
|
|
|
}
|