tests for on_demand
This commit is contained in:
parent
5793bb8fac
commit
68ec7ae41e
@ -108,9 +108,14 @@ mod timeout {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A request id.
|
/// A request id.
|
||||||
|
#[cfg(not(test))]
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
|
||||||
pub struct ReqId(usize);
|
pub struct ReqId(usize);
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
|
||||||
|
pub struct ReqId(pub usize);
|
||||||
|
|
||||||
impl fmt::Display for ReqId {
|
impl fmt::Display for ReqId {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "Request #{}", self.0)
|
write!(f, "Request #{}", self.0)
|
||||||
|
@ -40,11 +40,15 @@ use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY, SHA3_EMPTY_LIST_RLP};
|
|||||||
use net::{self, Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId};
|
use net::{self, Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId};
|
||||||
use cache::Cache;
|
use cache::Cache;
|
||||||
use request::{self as basic_request, Request as NetworkRequest};
|
use request::{self as basic_request, Request as NetworkRequest};
|
||||||
|
use self::request::CheckedRequest;
|
||||||
|
|
||||||
|
pub use self::request::{Request, Response};
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
pub mod request;
|
pub mod request;
|
||||||
|
|
||||||
pub use self::request::{CheckedRequest, Request, Response};
|
|
||||||
|
|
||||||
/// The result of execution
|
/// The result of execution
|
||||||
pub type ExecutionResult = Result<Executed, ExecutionError>;
|
pub type ExecutionResult = Result<Executed, ExecutionError>;
|
||||||
|
|
||||||
@ -137,6 +141,7 @@ pub struct OnDemand {
|
|||||||
peers: RwLock<HashMap<PeerId, Peer>>,
|
peers: RwLock<HashMap<PeerId, Peer>>,
|
||||||
in_transit: RwLock<HashMap<ReqId, Pending>>,
|
in_transit: RwLock<HashMap<ReqId, Pending>>,
|
||||||
cache: Arc<Mutex<Cache>>,
|
cache: Arc<Mutex<Cache>>,
|
||||||
|
no_immediate_dispatch: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OnDemand {
|
impl OnDemand {
|
||||||
@ -147,9 +152,20 @@ impl OnDemand {
|
|||||||
peers: RwLock::new(HashMap::new()),
|
peers: RwLock::new(HashMap::new()),
|
||||||
in_transit: RwLock::new(HashMap::new()),
|
in_transit: RwLock::new(HashMap::new()),
|
||||||
cache: cache,
|
cache: cache,
|
||||||
|
no_immediate_dispatch: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// make a test version: this doesn't dispatch pending requests
|
||||||
|
// until you trigger it manually.
|
||||||
|
#[cfg(test)]
|
||||||
|
fn new_test(cache: Arc<Mutex<Cache>>) -> Self {
|
||||||
|
let mut me = OnDemand::new(cache);
|
||||||
|
me.no_immediate_dispatch = true;
|
||||||
|
|
||||||
|
me
|
||||||
|
}
|
||||||
|
|
||||||
/// Request a header's hash by block number and CHT root hash.
|
/// Request a header's hash by block number and CHT root hash.
|
||||||
/// Returns the hash.
|
/// Returns the hash.
|
||||||
pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> BoxFuture<H256, Canceled> {
|
pub fn hash_by_number(&self, ctx: &BasicContext, req: request::HeaderProof) -> BoxFuture<H256, Canceled> {
|
||||||
@ -335,7 +351,7 @@ impl OnDemand {
|
|||||||
sender: sender,
|
sender: sender,
|
||||||
});
|
});
|
||||||
|
|
||||||
self.dispatch_pending(ctx);
|
self.attempt_dispatch(ctx);
|
||||||
|
|
||||||
Ok(receiver)
|
Ok(receiver)
|
||||||
}
|
}
|
||||||
@ -352,6 +368,14 @@ impl OnDemand {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// maybe dispatch pending requests.
|
||||||
|
// sometimes
|
||||||
|
fn attempt_dispatch(&self, ctx: &BasicContext) {
|
||||||
|
if !self.no_immediate_dispatch {
|
||||||
|
self.dispatch_pending(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// dispatch pending requests, and discard those for which the corresponding
|
// dispatch pending requests, and discard those for which the corresponding
|
||||||
// receiver has been dropped.
|
// receiver has been dropped.
|
||||||
fn dispatch_pending(&self, ctx: &BasicContext) {
|
fn dispatch_pending(&self, ctx: &BasicContext) {
|
||||||
@ -414,7 +438,7 @@ impl OnDemand {
|
|||||||
impl Handler for OnDemand {
|
impl Handler for OnDemand {
|
||||||
fn on_connect(&self, ctx: &EventContext, status: &Status, capabilities: &Capabilities) {
|
fn on_connect(&self, ctx: &EventContext, status: &Status, capabilities: &Capabilities) {
|
||||||
self.peers.write().insert(ctx.peer(), Peer { status: status.clone(), capabilities: capabilities.clone() });
|
self.peers.write().insert(ctx.peer(), Peer { status: status.clone(), capabilities: capabilities.clone() });
|
||||||
self.dispatch_pending(ctx.as_basic());
|
self.attempt_dispatch(ctx.as_basic());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) {
|
fn on_disconnect(&self, ctx: &EventContext, unfulfilled: &[ReqId]) {
|
||||||
@ -431,7 +455,7 @@ impl Handler for OnDemand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.dispatch_pending(ctx);
|
self.attempt_dispatch(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) {
|
fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) {
|
||||||
@ -443,7 +467,7 @@ impl Handler for OnDemand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.dispatch_pending(ctx.as_basic());
|
self.attempt_dispatch(ctx.as_basic());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[basic_request::Response]) {
|
fn on_responses(&self, ctx: &EventContext, req_id: ReqId, responses: &[basic_request::Response]) {
|
||||||
@ -501,50 +525,10 @@ impl Handler for OnDemand {
|
|||||||
pending.required_capabilities = capabilities;
|
pending.required_capabilities = capabilities;
|
||||||
|
|
||||||
self.pending.write().push(pending);
|
self.pending.write().push(pending);
|
||||||
self.dispatch_pending(ctx.as_basic());
|
self.attempt_dispatch(ctx.as_basic());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tick(&self, ctx: &BasicContext) {
|
fn tick(&self, ctx: &BasicContext) {
|
||||||
self.dispatch_pending(ctx)
|
self.attempt_dispatch(ctx)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use cache::Cache;
|
|
||||||
use net::{Announcement, BasicContext, ReqId, Error as LesError};
|
|
||||||
use request::NetworkRequests;
|
|
||||||
|
|
||||||
use network::{PeerId, NodeId};
|
|
||||||
use time::Duration;
|
|
||||||
use util::{H256, Mutex};
|
|
||||||
|
|
||||||
struct FakeContext;
|
|
||||||
|
|
||||||
impl BasicContext for FakeContext {
|
|
||||||
fn persistent_peer_id(&self, _: PeerId) -> Option<NodeId> { None }
|
|
||||||
fn request_from(&self, _: PeerId, _: NetworkRequests) -> Result<ReqId, LesError> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
fn make_announcement(&self, _: Announcement) { }
|
|
||||||
fn disconnect_peer(&self, _: PeerId) { }
|
|
||||||
fn disable_peer(&self, _: PeerId) { }
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn detects_hangup() {
|
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
|
||||||
let on_demand = OnDemand::new(cache);
|
|
||||||
let result = on_demand.header_by_hash(&FakeContext, request::HeaderByHash(H256::default()));
|
|
||||||
|
|
||||||
assert!(on_demand.pending.read().len() == 1);
|
|
||||||
drop(result);
|
|
||||||
|
|
||||||
on_demand.dispatch_pending(&FakeContext);
|
|
||||||
assert!(on_demand.pending.read().is_empty());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
397
ethcore/light/src/on_demand/tests.rs
Normal file
397
ethcore/light/src/on_demand/tests.rs
Normal file
@ -0,0 +1,397 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Tests for the on-demand service.
|
||||||
|
|
||||||
|
use cache::Cache;
|
||||||
|
use ethcore::encoded;
|
||||||
|
use ethcore::header::{Header, Seal};
|
||||||
|
use futures::Future;
|
||||||
|
use network::{PeerId, NodeId};
|
||||||
|
use net::*;
|
||||||
|
use util::{H256, Mutex};
|
||||||
|
use time::Duration;
|
||||||
|
use ::request::{self as basic_request, Response};
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use super::{request, OnDemand, Peer};
|
||||||
|
|
||||||
|
// useful contexts to give the service.
|
||||||
|
enum Context {
|
||||||
|
NoOp,
|
||||||
|
WithPeer(PeerId),
|
||||||
|
RequestFrom(PeerId, ReqId),
|
||||||
|
Punish(PeerId),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EventContext for Context {
|
||||||
|
fn peer(&self) -> PeerId {
|
||||||
|
match *self {
|
||||||
|
Context::WithPeer(id)
|
||||||
|
| Context::RequestFrom(id, _)
|
||||||
|
| Context::Punish(id) => id,
|
||||||
|
_ => panic!("didn't expect to have peer queried."),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_basic(&self) -> &BasicContext { self }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BasicContext for Context {
|
||||||
|
/// Returns the relevant's peer persistent Id (aka NodeId).
|
||||||
|
fn persistent_peer_id(&self, _: PeerId) -> Option<NodeId> {
|
||||||
|
panic!("didn't expect to provide persistent ID")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn request_from(&self, peer_id: PeerId, _: ::request::NetworkRequests) -> Result<ReqId, Error> {
|
||||||
|
match *self {
|
||||||
|
Context::RequestFrom(id, req_id) => if peer_id == id { Ok(req_id) } else { Err(Error::NoCredits) },
|
||||||
|
_ => panic!("didn't expect to have requests dispatched."),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_announcement(&self, _: Announcement) {
|
||||||
|
panic!("didn't expect to make announcement")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn disconnect_peer(&self, id: PeerId) {
|
||||||
|
self.disable_peer(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn disable_peer(&self, peer_id: PeerId) {
|
||||||
|
match *self {
|
||||||
|
Context::Punish(id) if id == peer_id => {},
|
||||||
|
_ => panic!("Unexpectedly punished peer."),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// test harness.
|
||||||
|
struct Harness {
|
||||||
|
service: OnDemand,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Harness {
|
||||||
|
fn create() -> Self {
|
||||||
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::minutes(1))));
|
||||||
|
Harness {
|
||||||
|
service: OnDemand::new_test(cache),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inject_peer(&self, id: PeerId, peer: Peer) {
|
||||||
|
self.service.peers.write().insert(id, peer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dummy_status() -> Status {
|
||||||
|
Status {
|
||||||
|
protocol_version: 1,
|
||||||
|
network_id: 999,
|
||||||
|
head_td: 1.into(),
|
||||||
|
head_hash: H256::default(),
|
||||||
|
head_num: 1359,
|
||||||
|
genesis_hash: H256::default(),
|
||||||
|
last_head: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dummy_capabilities() -> Capabilities {
|
||||||
|
Capabilities {
|
||||||
|
serve_headers: true,
|
||||||
|
serve_chain_since: Some(1),
|
||||||
|
serve_state_since: Some(1),
|
||||||
|
tx_relay: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn detects_hangup() {
|
||||||
|
let on_demand = Harness::create().service;
|
||||||
|
let result = on_demand.header_by_hash(&Context::NoOp, request::HeaderByHash(H256::default()));
|
||||||
|
|
||||||
|
assert_eq!(on_demand.pending.read().len(), 1);
|
||||||
|
drop(result);
|
||||||
|
|
||||||
|
on_demand.dispatch_pending(&Context::NoOp);
|
||||||
|
assert!(on_demand.pending.read().is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn single_request() {
|
||||||
|
let harness = Harness::create();
|
||||||
|
|
||||||
|
let peer_id = 10101;
|
||||||
|
let req_id = ReqId(14426);
|
||||||
|
|
||||||
|
harness.inject_peer(peer_id, Peer {
|
||||||
|
status: dummy_status(),
|
||||||
|
capabilities: dummy_capabilities(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let header = Header::default();
|
||||||
|
let encoded = encoded::Header::new(header.rlp(Seal::With));
|
||||||
|
|
||||||
|
let recv = harness.service.request_raw(
|
||||||
|
&Context::NoOp,
|
||||||
|
vec![request::HeaderByHash(header.hash()).into()]
|
||||||
|
).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 1);
|
||||||
|
|
||||||
|
harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
||||||
|
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 0);
|
||||||
|
|
||||||
|
harness.service.on_responses(
|
||||||
|
&Context::WithPeer(peer_id),
|
||||||
|
req_id,
|
||||||
|
&[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded] })]
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(recv.wait().is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn no_capabilities() {
|
||||||
|
let harness = Harness::create();
|
||||||
|
|
||||||
|
let peer_id = 10101;
|
||||||
|
|
||||||
|
let mut capabilities = dummy_capabilities();
|
||||||
|
capabilities.serve_headers = false;
|
||||||
|
|
||||||
|
harness.inject_peer(peer_id, Peer {
|
||||||
|
status: dummy_status(),
|
||||||
|
capabilities: capabilities,
|
||||||
|
});
|
||||||
|
|
||||||
|
let _recv = harness.service.request_raw(
|
||||||
|
&Context::NoOp,
|
||||||
|
vec![request::HeaderByHash(Default::default()).into()]
|
||||||
|
).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 1);
|
||||||
|
|
||||||
|
harness.service.dispatch_pending(&Context::NoOp);
|
||||||
|
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn reassign() {
|
||||||
|
let harness = Harness::create();
|
||||||
|
|
||||||
|
let peer_ids = (10101, 12345);
|
||||||
|
let req_ids = (ReqId(14426), ReqId(555));
|
||||||
|
|
||||||
|
harness.inject_peer(peer_ids.0, Peer {
|
||||||
|
status: dummy_status(),
|
||||||
|
capabilities: dummy_capabilities(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let header = Header::default();
|
||||||
|
let encoded = encoded::Header::new(header.rlp(Seal::With));
|
||||||
|
|
||||||
|
let recv = harness.service.request_raw(
|
||||||
|
&Context::NoOp,
|
||||||
|
vec![request::HeaderByHash(header.hash()).into()]
|
||||||
|
).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 1);
|
||||||
|
|
||||||
|
harness.service.dispatch_pending(&Context::RequestFrom(peer_ids.0, req_ids.0));
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 0);
|
||||||
|
|
||||||
|
harness.service.on_disconnect(&Context::WithPeer(peer_ids.0), &[req_ids.0]);
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 1);
|
||||||
|
|
||||||
|
harness.inject_peer(peer_ids.1, Peer {
|
||||||
|
status: dummy_status(),
|
||||||
|
capabilities: dummy_capabilities(),
|
||||||
|
});
|
||||||
|
|
||||||
|
harness.service.dispatch_pending(&Context::RequestFrom(peer_ids.1, req_ids.1));
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 0);
|
||||||
|
|
||||||
|
harness.service.on_responses(
|
||||||
|
&Context::WithPeer(peer_ids.1),
|
||||||
|
req_ids.1,
|
||||||
|
&[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded] })]
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(recv.wait().is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn partial_response() {
|
||||||
|
let harness = Harness::create();
|
||||||
|
|
||||||
|
let peer_id = 111;
|
||||||
|
let req_ids = (ReqId(14426), ReqId(555));
|
||||||
|
|
||||||
|
harness.inject_peer(peer_id, Peer {
|
||||||
|
status: dummy_status(),
|
||||||
|
capabilities: dummy_capabilities(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let make = |num| {
|
||||||
|
let mut hdr = Header::default();
|
||||||
|
hdr.set_number(num);
|
||||||
|
|
||||||
|
let encoded = encoded::Header::new(hdr.rlp(Seal::With));
|
||||||
|
(hdr, encoded)
|
||||||
|
};
|
||||||
|
|
||||||
|
let (header1, encoded1) = make(5);
|
||||||
|
let (header2, encoded2) = make(23452);
|
||||||
|
|
||||||
|
// request two headers.
|
||||||
|
let recv = harness.service.request_raw(
|
||||||
|
&Context::NoOp,
|
||||||
|
vec![
|
||||||
|
request::HeaderByHash(header1.hash()).into(),
|
||||||
|
request::HeaderByHash(header2.hash()).into(),
|
||||||
|
],
|
||||||
|
).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 1);
|
||||||
|
|
||||||
|
harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0));
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 0);
|
||||||
|
|
||||||
|
// supply only the first one.
|
||||||
|
harness.service.on_responses(
|
||||||
|
&Context::WithPeer(peer_id),
|
||||||
|
req_ids.0,
|
||||||
|
&[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded1] })]
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 1);
|
||||||
|
|
||||||
|
harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1));
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 0);
|
||||||
|
|
||||||
|
// supply the next one.
|
||||||
|
harness.service.on_responses(
|
||||||
|
&Context::WithPeer(peer_id),
|
||||||
|
req_ids.1,
|
||||||
|
&[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded2] })]
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(recv.wait().is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn part_bad_part_good() {
|
||||||
|
let harness = Harness::create();
|
||||||
|
|
||||||
|
let peer_id = 111;
|
||||||
|
let req_ids = (ReqId(14426), ReqId(555));
|
||||||
|
|
||||||
|
harness.inject_peer(peer_id, Peer {
|
||||||
|
status: dummy_status(),
|
||||||
|
capabilities: dummy_capabilities(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let make = |num| {
|
||||||
|
let mut hdr = Header::default();
|
||||||
|
hdr.set_number(num);
|
||||||
|
|
||||||
|
let encoded = encoded::Header::new(hdr.rlp(Seal::With));
|
||||||
|
(hdr, encoded)
|
||||||
|
};
|
||||||
|
|
||||||
|
let (header1, encoded1) = make(5);
|
||||||
|
let (header2, encoded2) = make(23452);
|
||||||
|
|
||||||
|
// request two headers.
|
||||||
|
let recv = harness.service.request_raw(
|
||||||
|
&Context::NoOp,
|
||||||
|
vec![
|
||||||
|
request::HeaderByHash(header1.hash()).into(),
|
||||||
|
request::HeaderByHash(header2.hash()).into(),
|
||||||
|
],
|
||||||
|
).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 1);
|
||||||
|
|
||||||
|
harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0));
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 0);
|
||||||
|
|
||||||
|
// supply only the first one, but followed by the wrong kind of response.
|
||||||
|
// the first header should be processed.
|
||||||
|
harness.service.on_responses(
|
||||||
|
&Context::Punish(peer_id),
|
||||||
|
req_ids.0,
|
||||||
|
&[
|
||||||
|
Response::Headers(basic_request::HeadersResponse { headers: vec![encoded1] }),
|
||||||
|
Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] } ),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 1);
|
||||||
|
|
||||||
|
harness.inject_peer(peer_id, Peer {
|
||||||
|
status: dummy_status(),
|
||||||
|
capabilities: dummy_capabilities(),
|
||||||
|
});
|
||||||
|
|
||||||
|
harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1));
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 0);
|
||||||
|
|
||||||
|
// supply the next one.
|
||||||
|
harness.service.on_responses(
|
||||||
|
&Context::WithPeer(peer_id),
|
||||||
|
req_ids.1,
|
||||||
|
&[Response::Headers(basic_request::HeadersResponse { headers: vec![encoded2] })]
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(recv.wait().is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn wrong_kind() {
|
||||||
|
let harness = Harness::create();
|
||||||
|
|
||||||
|
let peer_id = 10101;
|
||||||
|
let req_id = ReqId(14426);
|
||||||
|
|
||||||
|
harness.inject_peer(peer_id, Peer {
|
||||||
|
status: dummy_status(),
|
||||||
|
capabilities: dummy_capabilities(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let _recv = harness.service.request_raw(
|
||||||
|
&Context::NoOp,
|
||||||
|
vec![request::HeaderByHash(Default::default()).into()]
|
||||||
|
).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 1);
|
||||||
|
|
||||||
|
harness.service.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
||||||
|
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 0);
|
||||||
|
|
||||||
|
harness.service.on_responses(
|
||||||
|
&Context::Punish(peer_id),
|
||||||
|
req_id,
|
||||||
|
&[Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] })]
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(harness.service.pending.read().len(), 1);
|
||||||
|
}
|
@ -17,12 +17,14 @@
|
|||||||
//! Block header.
|
//! Block header.
|
||||||
|
|
||||||
use util::*;
|
use util::*;
|
||||||
use basic_types::{LogBloom, Seal, ZERO_LOGBLOOM};
|
use basic_types::{LogBloom, ZERO_LOGBLOOM};
|
||||||
use time::get_time;
|
use time::get_time;
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
|
|
||||||
use std::cell::RefCell;
|
use std::cell::RefCell;
|
||||||
|
|
||||||
|
pub use basic_types::Seal;
|
||||||
|
|
||||||
/// Type for Block number
|
/// Type for Block number
|
||||||
pub type BlockNumber = u64;
|
pub type BlockNumber = u64;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user