openethereum/ethcore/light/src/on_demand/tests.rs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

717 lines
18 KiB
Rust
Raw Normal View History

// Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity Ethereum.
2017-04-07 19:35:39 +02:00
// Parity Ethereum is free software: you can redistribute it and/or modify
2017-04-07 19:35:39 +02:00
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Ethereum is distributed in the hope that it will be useful,
2017-04-07 19:35:39 +02:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
2017-04-07 19:35:39 +02:00
//! Tests for the on-demand service.
use cache::Cache;
use common_types::header::Header;
use ethereum_types::H256;
2017-04-07 19:35:39 +02:00
use futures::Future;
use net::*;
use network::{NodeId, PeerId};
use parking_lot::Mutex;
use request::{self as basic_request, Response};
2017-04-07 19:35:39 +02:00
use std::{
sync::Arc,
2020-08-05 06:08:03 +02:00
thread,
time::{Duration, Instant},
};
2017-04-07 19:35:39 +02:00
fix(light): make `OnDemand` generic instead of using the concrete type (#10514) * ethcore: add clique engine (#9981) * fix broken sync * correct seal fields * ethcore: fix comment * parity: remove duplicate params * clique: fix whitespaces * ethcore: fix goerli chain spec * refactor signer_snapshot into pending/finalized state * move close_block_extra_data after seal is applied * refactor most of the logic into the signer_snapshot * clique: refactor locking logic out of the consensus engine interface * Fix jsonspec and add an unittest * Replace space with tabs * Unbroke sync * Fix broken sync * 1/2 state tracking without votes * 2/2 implement vote tracking * ci: use travis for goerli * ci: setup a clique network * ci: sync a görli node * add clique deploy script * ci: fix paths in clique deploy script * ci: use docker compose * ci: fix travis job names * ci: fix build deps * ci: massively reduce tests * Revert "ci: massively reduce tests" This reverts commit 6369f0b069ed2607a7e9f2e1d85489bacdc43384. * ci: run cargo test directly * ci: separate build and test stages * ci: cache rust installation * ci: simplify ci stages * ci: make clique deploy script executable * ci: shutdown goerli sync after 20min * ci: remove slow sync stage * ci: use timeout to finish jobs * ci: fix build path * ci: use absolute paths to end this confusion * ci: add geth and parity to path * ci: be more verbose * ci: allow for more relaxed caching timeout * ci: update repositories for custom ppa * ci: fix typo in file name * ci: fix docker compose file * ci: add ethkey to docker * ci: make sure deploy script is up to date with upstream * ci: stop docker container after certain time * ci: force superuser to update permissions on docker files * ci: reduce run time of script to ~30 min * ci: remove duplicate caching in travis * remove trace statements * clique: add more validation involving the recent signer list * ethcore: enable constantinople for rinkeby * ethcore: fix whitespaces in rinkeby spec * ethcore: reformat goerli.json * Revert "ci: remove duplicate caching in travis" This reverts commit a562838d3d194d37f9871dcbe00b783637978f89. * tmp commit * another tmp commit * it builds! * add sealing capabilities * add seal_header hook to allow separation of block seal/importing code paths * clique: remove populate_from_parent. * add panic * make turn delay random * initialize OpenBlock properly in 'enact' * misc: remove duplicate lines * misc: fix license headers * misc: convert spaces to tabs * misc: fix tabs * Update Cargo.toml * Update Cargo.toml * Update Cargo.toml * clique: ensure validator restores state before trying to seal * clique: make 'state' return an Error. Make some error messages more clear * Fix compile error after rebase & toolchain upgrade * fix a bunch of import warnings * Refactor code * Fix permissions * Refactoring syncing * Implement full validator checks * Refactor util functions to seperate file * mining 1 * ethcore: add chainspec for kotti * ethcore: rename pre-goerli configs * ethcore: load kotti chain spec * cli: add kotti to params * Implement working local sealing * making sealing & syncing work together * Relax timestamp checking * ethcore: prepare for the real goerli to launch * Implement NOTURN wiggle properly & cleanupnup warnings * Implement vote casting * Update docs & skip signing if no signer * Optimize step-service interval * Record state on local sealed block * Fix script filemode * Cleaning up codebase * restore enact trace logging * Delete clique.sh and move sync.sh * remove travis.yml * Remove dead code * Cleanup compile warning * address review comments * adding more comments and removing unwrap() * ci: remove sync script * Address review comments * fix compile error * adding better debugging for timing * Implement an dedicated thread for sealing timing * fix(add helper for timestamp overflows) (#10330) * fix(add helper timestamp overflows) * fix(simplify code) * fix(make helper private) * snap: official image / test (#10168) * official image / test * fix / test * bit more necromancy * fix paths * add source bin/df /test * add source bin/df /test2 * something w paths /test * something w paths /test * add source-type /test * show paths /test * copy plugin /test * plugin -> nil * install rhash * no questions while installing rhash * publish snap only for release * fix(docker): fix not receives SIGINT (#10059) * fix(docker): fix not receives SIGINT * fix: update with reviews * update with review * update * update * Don't add discovery initiators to the node table (#10305) * Don't add discovery initiators to the node table * Use enums for tracking state of the nodes in discovery * Dont try to ping ourselves * Fix minor nits * Update timeouts when observing an outdated node * Extracted update_bucket_record from update_node * Fixed typo * Fix two final nits from @todr * change docker image based on debian instead of ubuntu due to the chan… (#10336) * change docker image based on debian instead of ubuntu due to the changes of the build container * role back docker build image and docker deploy image to ubuntu:xenial based (#10338) * Bundle protocol and packet_id together in chain sync (#10315) Define a new `enum` where devp2p subprotocol packet ids (currently eth and par) are defined. Additionally provide functionality to query id value and protocol of a given id object. * snap: prefix version and populate candidate channel (#10343) * snap: populate candidate releases with beta snaps to avoid stale channel * snap: prefix version with v* * addressing review comments * engine: fix copyright header * scripts: restore permissions on sign command * ethcore: enforce tabs * ethcore: enforce tabs * ethcore: enforce tabs * addressing comments * addressing comments * addressing more comments * addressing more comments * addressing more comments * addressing more comments * addressing more comments * json-spec: fix clique epoch to non-zero u64 * ci: enable travis for parity goerli * ci: don't separate build and test step * ci: don't run c++ tests on travis * ci: simplify cargo test to squeeze into travis timeout * ci: don't run tests on travis at all * style(fixes) * fix(add tests) * fix(recent_signer bug) * fix(complete all tests) * fix(nits) * fix(simplify asserts) * fix(cliqueState): simplify code * fix(nits) * docs(comments what's need to fixed) * fix(revert unintended changes) * fix(tests) * fix(logs): voting logs * fix(readability + more logs) * fix(sync) * docs(add missing licens header) * fix(log): info! -> trace! * docs(fix nits) + fix(remove assert) * perf(use counter instead of vec) * fix(remove needless block in match) * fix(faulty comment) * grumbles(docs for tests) * fix(nits) * fix(revert_vote): only remove vote when votes == 0 * fix(vote counter): checked arithmetics * fix(simplify tests) * fix(nits) * fix(clique): err types * fix(clique utils): make use of errors * fix(cleanup nits) * fix(clique sealing): don't read state no signer * fix(replace Vec<Signers> with BTreeSet<Signers>) * fix(tests): BTreeSet and more generic helpers * fix(nits) * fix(ethcore_block_seal): remove needless `Box` * fix(faulty log): info -> trace * fix(checked SystemTime): prevent SystemTime panics * style(chain cfg): space after `:` * style(fn enact): fix whitespace * docs(clique): StepService * docs(nit): fix faulty comment * docs(fix typo) * style(fix bad indentation) * fix(bad regex match) * grumble(on_seal_block): make `&mut` to avoid clone * docs(on_seal_block): fix faulty documentation * Delete .travis.yml * docs: remove eth hf references in spec * Update client.rs * fix(nits) * fix(clique step): `RwLock` -> `AtomicBool` * fix(clique): use `Duration::as_millis` * Clean up some Clique documentation * Add trace information to eth_estimateGas (#10519) * Add trace information to eth_estimateGas * replace unwrap better version * change vm::Error formatter to more user-friendly * remove extra error format * use map_or instead sequence of map/unwrap_or * fix(light/on_demand): extract as a trait * fix(grumble): OnDemand remove needless trait bound
2019-03-27 14:46:20 +01:00
use super::{request, HeaderRef, OnDemand, OnDemandRequester, Peer};
2017-04-07 19:35:39 +02:00
// useful contexts to give the service.
enum Context {
NoOp,
WithPeer(PeerId),
RequestFrom(PeerId, ReqId),
Punish(PeerId),
FaultyRequest,
2017-04-07 19:35:39 +02:00
}
impl EventContext for Context {
fn peer(&self) -> PeerId {
match *self {
Context::WithPeer(id) | Context::RequestFrom(id, _) | Context::Punish(id) => id,
Context::FaultyRequest => 0,
2017-04-07 19:35:39 +02:00
_ => panic!("didn't expect to have peer queried."),
2020-08-05 06:08:03 +02:00
}
2017-04-07 19:35:39 +02:00
}
2020-08-05 06:08:03 +02:00
2020-07-29 10:36:15 +02:00
fn as_basic(&self) -> &dyn BasicContext {
2017-04-07 19:35:39 +02:00
self
}
}
impl BasicContext for Context {
/// Returns the relevant's peer persistent Id (aka NodeId).
fn persistent_peer_id(&self, _: PeerId) -> Option<NodeId> {
panic!("didn't expect to provide persistent ID")
}
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
fn request_from(&self, peer_id: PeerId, _: ::request::NetworkRequests) -> Result<ReqId, Error> {
match *self {
Context::RequestFrom(id, req_id) => {
if peer_id == id {
Ok(req_id)
} else {
Err(Error::NoCredits)
}
2020-08-05 06:08:03 +02:00
}
Context::FaultyRequest => Err(Error::NoCredits),
2017-04-07 19:35:39 +02:00
_ => panic!("didn't expect to have requests dispatched."),
}
2020-08-05 06:08:03 +02:00
}
2017-04-07 19:35:39 +02:00
fn make_announcement(&self, _: Announcement) {
panic!("didn't expect to make announcement")
}
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
fn disconnect_peer(&self, id: PeerId) {
self.disable_peer(id)
}
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
fn disable_peer(&self, peer_id: PeerId) {
match *self {
Context::Punish(id) if id == peer_id => {}
_ => panic!("Unexpectedly punished peer."),
}
}
}
// test harness.
struct Harness {
service: OnDemand,
}
impl Harness {
fn create() -> Self {
let cache = Arc::new(Mutex::new(Cache::new(
Default::default(),
Duration::from_secs(60),
)));
2017-04-07 19:35:39 +02:00
Harness {
service: OnDemand::new_test(
cache,
// Response `time_to_live`
Duration::from_secs(5),
// Request start backoff
Duration::from_secs(1),
// Request max backoff
Duration::from_secs(20),
super::DEFAULT_MAX_REQUEST_BACKOFF_ROUNDS,
super::DEFAULT_NUM_CONSECUTIVE_FAILED_REQUESTS,
),
2017-04-07 19:35:39 +02:00
}
2020-08-05 06:08:03 +02:00
}
2017-04-07 19:35:39 +02:00
fn inject_peer(&self, id: PeerId, peer: Peer) {
self.service.peers.write().insert(id, peer);
}
}
fn dummy_status() -> Status {
Status {
protocol_version: 1,
network_id: 999,
head_td: 1.into(),
head_hash: H256::default(),
head_num: 1359,
genesis_hash: H256::default(),
last_head: None,
}
}
fn dummy_capabilities() -> Capabilities {
Capabilities {
serve_headers: true,
serve_chain_since: Some(1),
serve_state_since: Some(1),
tx_relay: true,
}
}
#[test]
fn detects_hangup() {
let on_demand = Harness::create().service;
let result = on_demand.request_raw(
&Context::NoOp,
vec![request::HeaderByHash(H256::default().into()).into()],
);
2017-04-07 19:35:39 +02:00
assert_eq!(on_demand.pending.read().len(), 1);
drop(result);
on_demand.dispatch_pending(&Context::NoOp);
assert!(on_demand.pending.read().is_empty());
}
#[test]
fn single_request() {
let harness = Harness::create();
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let peer_id = 10101;
let req_id = ReqId(14426);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness.inject_peer(
peer_id,
Peer {
status: dummy_status(),
capabilities: dummy_capabilities(),
},
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let header = Header::default();
let encoded = header.encoded();
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let recv = harness
.service
.request_raw(
&Context::NoOp,
vec![request::HeaderByHash(header.hash().into()).into()],
2017-04-07 19:35:39 +02:00
)
.unwrap();
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert_eq!(harness.service.pending.read().len(), 1);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness
.service
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert_eq!(harness.service.pending.read().len(), 0);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness.service.on_responses(
&Context::WithPeer(peer_id),
req_id,
&[Response::Headers(basic_request::HeadersResponse {
headers: vec![encoded],
})],
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert!(recv.wait().is_ok());
}
#[test]
fn no_capabilities() {
let harness = Harness::create();
let peer_id = 10101;
let mut capabilities = dummy_capabilities();
capabilities.serve_headers = false;
harness.inject_peer(
peer_id,
Peer {
status: dummy_status(),
capabilities: capabilities,
},
);
let _recv = harness
.service
.request_raw(
&Context::NoOp,
vec![request::HeaderByHash(H256::default().into()).into()],
2017-04-07 19:35:39 +02:00
)
.unwrap();
assert_eq!(harness.service.pending.read().len(), 1);
harness.service.dispatch_pending(&Context::NoOp);
assert_eq!(harness.service.pending.read().len(), 1);
}
#[test]
fn reassign() {
let harness = Harness::create();
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let peer_ids = (10101, 12345);
let req_ids = (ReqId(14426), ReqId(555));
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness.inject_peer(
peer_ids.0,
Peer {
status: dummy_status(),
capabilities: dummy_capabilities(),
},
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let header = Header::default();
let encoded = header.encoded();
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let recv = harness
.service
.request_raw(
&Context::NoOp,
vec![request::HeaderByHash(header.hash().into()).into()],
2017-04-07 19:35:39 +02:00
)
.unwrap();
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert_eq!(harness.service.pending.read().len(), 1);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness
.service
.dispatch_pending(&Context::RequestFrom(peer_ids.0, req_ids.0));
assert_eq!(harness.service.pending.read().len(), 0);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness
.service
.on_disconnect(&Context::WithPeer(peer_ids.0), &[req_ids.0]);
assert_eq!(harness.service.pending.read().len(), 1);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness.inject_peer(
peer_ids.1,
Peer {
status: dummy_status(),
capabilities: dummy_capabilities(),
},
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness
.service
.dispatch_pending(&Context::RequestFrom(peer_ids.1, req_ids.1));
assert_eq!(harness.service.pending.read().len(), 0);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness.service.on_responses(
&Context::WithPeer(peer_ids.1),
req_ids.1,
&[Response::Headers(basic_request::HeadersResponse {
headers: vec![encoded],
})],
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert!(recv.wait().is_ok());
}
#[test]
fn partial_response() {
let harness = Harness::create();
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let peer_id = 111;
let req_ids = (ReqId(14426), ReqId(555));
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness.inject_peer(
peer_id,
Peer {
status: dummy_status(),
capabilities: dummy_capabilities(),
},
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let make = |num| {
let mut hdr = Header::default();
hdr.set_number(num);
2020-08-05 06:08:03 +02:00
let encoded = hdr.encoded();
2017-04-07 19:35:39 +02:00
(hdr, encoded)
};
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let (header1, encoded1) = make(5);
let (header2, encoded2) = make(23452);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
// request two headers.
let recv = harness
.service
.request_raw(
&Context::NoOp,
vec![
request::HeaderByHash(header1.hash().into()).into(),
request::HeaderByHash(header2.hash().into()).into(),
2017-04-07 19:35:39 +02:00
],
)
.unwrap();
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert_eq!(harness.service.pending.read().len(), 1);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness
.service
.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0));
assert_eq!(harness.service.pending.read().len(), 0);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
// supply only the first one.
harness.service.on_responses(
&Context::WithPeer(peer_id),
req_ids.0,
&[Response::Headers(basic_request::HeadersResponse {
headers: vec![encoded1],
})],
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert_eq!(harness.service.pending.read().len(), 1);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness
.service
.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1));
assert_eq!(harness.service.pending.read().len(), 0);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
// supply the next one.
harness.service.on_responses(
&Context::WithPeer(peer_id),
req_ids.1,
&[Response::Headers(basic_request::HeadersResponse {
headers: vec![encoded2],
})],
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert!(recv.wait().is_ok());
}
#[test]
fn part_bad_part_good() {
let harness = Harness::create();
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let peer_id = 111;
let req_ids = (ReqId(14426), ReqId(555));
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness.inject_peer(
peer_id,
Peer {
status: dummy_status(),
capabilities: dummy_capabilities(),
},
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let make = |num| {
let mut hdr = Header::default();
hdr.set_number(num);
2020-08-05 06:08:03 +02:00
let encoded = hdr.encoded();
2017-04-07 19:35:39 +02:00
(hdr, encoded)
};
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let (header1, encoded1) = make(5);
let (header2, encoded2) = make(23452);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
// request two headers.
let recv = harness
.service
.request_raw(
&Context::NoOp,
vec![
request::HeaderByHash(header1.hash().into()).into(),
request::HeaderByHash(header2.hash().into()).into(),
2017-04-07 19:35:39 +02:00
],
)
.unwrap();
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert_eq!(harness.service.pending.read().len(), 1);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness
.service
.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0));
assert_eq!(harness.service.pending.read().len(), 0);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
// supply only the first one, but followed by the wrong kind of response.
// the first header should be processed.
harness.service.on_responses(
&Context::Punish(peer_id),
req_ids.0,
&[
Response::Headers(basic_request::HeadersResponse {
headers: vec![encoded1],
}),
Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] }),
],
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert_eq!(harness.service.pending.read().len(), 1);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness.inject_peer(
peer_id,
Peer {
status: dummy_status(),
capabilities: dummy_capabilities(),
},
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness
.service
.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1));
assert_eq!(harness.service.pending.read().len(), 0);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
// supply the next one.
harness.service.on_responses(
&Context::WithPeer(peer_id),
req_ids.1,
&[Response::Headers(basic_request::HeadersResponse {
headers: vec![encoded2],
})],
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert!(recv.wait().is_ok());
}
#[test]
fn wrong_kind() {
let harness = Harness::create();
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let peer_id = 10101;
let req_id = ReqId(14426);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness.inject_peer(
peer_id,
Peer {
status: dummy_status(),
capabilities: dummy_capabilities(),
},
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
let _recv = harness
.service
.request_raw(
&Context::NoOp,
vec![request::HeaderByHash(H256::default().into()).into()],
2017-04-07 19:35:39 +02:00
)
.unwrap();
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert_eq!(harness.service.pending.read().len(), 1);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness
.service
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert_eq!(harness.service.pending.read().len(), 0);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
harness.service.on_responses(
&Context::Punish(peer_id),
req_id,
&[Response::Receipts(basic_request::ReceiptsResponse {
receipts: vec![],
})],
);
2020-08-05 06:08:03 +02:00
2017-04-07 19:35:39 +02:00
assert_eq!(harness.service.pending.read().len(), 1);
}
#[test]
fn back_references() {
let harness = Harness::create();
2020-08-05 06:08:03 +02:00
let peer_id = 10101;
let req_id = ReqId(14426);
2020-08-05 06:08:03 +02:00
harness.inject_peer(
peer_id,
Peer {
status: dummy_status(),
capabilities: dummy_capabilities(),
},
);
2020-08-05 06:08:03 +02:00
let header = Header::default();
let encoded = header.encoded();
2020-08-05 06:08:03 +02:00
let recv = harness
.service
.request_raw(
&Context::NoOp,
vec![
request::HeaderByHash(header.hash().into()).into(),
request::BlockReceipts(HeaderRef::Unresolved(0, header.hash().into())).into(),
],
)
.unwrap();
2020-08-05 06:08:03 +02:00
assert_eq!(harness.service.pending.read().len(), 1);
2020-08-05 06:08:03 +02:00
harness
.service
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
2020-08-05 06:08:03 +02:00
assert_eq!(harness.service.pending.read().len(), 0);
2020-08-05 06:08:03 +02:00
harness.service.on_responses(
&Context::WithPeer(peer_id),
req_id,
&[
Response::Headers(basic_request::HeadersResponse {
headers: vec![encoded],
}),
Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] }),
],
);
2020-08-05 06:08:03 +02:00
assert!(recv.wait().is_ok());
}
#[test]
#[should_panic]
fn bad_back_reference() {
let harness = Harness::create();
2020-08-05 06:08:03 +02:00
let header = Header::default();
2020-08-05 06:08:03 +02:00
let _ = harness
.service
.request_raw(
&Context::NoOp,
vec![
request::HeaderByHash(header.hash().into()).into(),
request::BlockReceipts(HeaderRef::Unresolved(1, header.hash().into())).into(),
],
)
.unwrap();
}
#[test]
fn fill_from_cache() {
let harness = Harness::create();
2020-08-05 06:08:03 +02:00
let peer_id = 10101;
let req_id = ReqId(14426);
2020-08-05 06:08:03 +02:00
harness.inject_peer(
peer_id,
Peer {
status: dummy_status(),
capabilities: dummy_capabilities(),
},
);
2020-08-05 06:08:03 +02:00
let header = Header::default();
let encoded = header.encoded();
2020-08-05 06:08:03 +02:00
let recv = harness
.service
.request_raw(
&Context::NoOp,
vec![
request::HeaderByHash(header.hash().into()).into(),
request::BlockReceipts(HeaderRef::Unresolved(0, header.hash().into())).into(),
],
)
.unwrap();
2020-08-05 06:08:03 +02:00
assert_eq!(harness.service.pending.read().len(), 1);
2020-08-05 06:08:03 +02:00
harness
.service
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
2020-08-05 06:08:03 +02:00
assert_eq!(harness.service.pending.read().len(), 0);
2020-08-05 06:08:03 +02:00
harness.service.on_responses(
&Context::WithPeer(peer_id),
req_id,
&[Response::Headers(basic_request::HeadersResponse {
headers: vec![encoded],
})],
);
2020-08-05 06:08:03 +02:00
assert!(recv.wait().is_ok());
}
#[test]
fn request_without_response_should_backoff_and_then_be_dropped() {
let harness = Harness::create();
let peer_id = 0;
let req_id = ReqId(13);
2020-08-05 06:08:03 +02:00
harness.inject_peer(
peer_id,
Peer {
status: dummy_status(),
capabilities: dummy_capabilities(),
},
);
2020-08-05 06:08:03 +02:00
let binary_exp_backoff: Vec<u64> = vec![1, 2, 4, 8, 16, 20, 20, 20, 20, 20];
2020-08-05 06:08:03 +02:00
let _recv = harness
.service
.request_raw(
&Context::RequestFrom(peer_id, req_id),
vec![request::HeaderByHash(Header::default().encoded().hash().into()).into()],
)
.unwrap();
assert_eq!(harness.service.pending.read().len(), 1);
2020-08-05 06:08:03 +02:00
for backoff in &binary_exp_backoff {
harness.service.dispatch_pending(&Context::FaultyRequest);
assert_eq!(
harness.service.pending.read().len(),
1,
"Request should not be dropped"
);
let now = Instant::now();
while now.elapsed() < Duration::from_secs(*backoff) {}
}
2020-08-05 06:08:03 +02:00
harness.service.dispatch_pending(&Context::FaultyRequest);
assert_eq!(
harness.service.pending.read().len(),
0,
"Request exceeded the 10 backoff rounds should be dropped"
);
}
#[test]
fn empty_responses_exceeds_limit_should_be_dropped() {
let harness = Harness::create();
let peer_id = 0;
let req_id = ReqId(13);
2020-08-05 06:08:03 +02:00
harness.inject_peer(
peer_id,
Peer {
status: dummy_status(),
capabilities: dummy_capabilities(),
},
);
2020-08-05 06:08:03 +02:00
let _recv = harness
.service
.request_raw(
&Context::RequestFrom(peer_id, req_id),
vec![request::HeaderByHash(Header::default().encoded().hash().into()).into()],
)
.unwrap();
2020-08-05 06:08:03 +02:00
harness
.service
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
2020-08-05 06:08:03 +02:00
assert_eq!(harness.service.pending.read().len(), 0);
assert_eq!(harness.service.in_transit.read().len(), 1);
2020-08-05 06:08:03 +02:00
let now = Instant::now();
2020-08-05 06:08:03 +02:00
// Send `empty responses` in the current time window
// Use only half of the `time_window` because we can't be sure exactly
// when the window started and the clock accurancy
while now.elapsed() < harness.service.response_time_window / 2 {
harness
.service
.on_responses(&Context::RequestFrom(13, req_id), req_id, &[]);
assert!(harness.service.pending.read().len() != 0);
let pending = harness.service.pending.write().remove(0);
harness.service.in_transit.write().insert(req_id, pending);
}
2020-08-05 06:08:03 +02:00
// Make sure we passed the first `time window`
thread::sleep(Duration::from_secs(5));
2020-08-05 06:08:03 +02:00
// Now, response is in failure state but need another response to be `polled`
harness
.service
.on_responses(&Context::RequestFrom(13, req_id), req_id, &[]);
2020-08-05 06:08:03 +02:00
assert!(harness.service.in_transit.read().is_empty());
assert!(harness.service.pending.read().is_empty());
}