light: finish basic header chain and add tests
This commit is contained in:
parent
c2264bed27
commit
45ef986c04
7
Cargo.lock
generated
7
Cargo.lock
generated
@ -469,6 +469,7 @@ dependencies = [
|
||||
"ethcore-util 1.5.0",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rlp 0.1.0",
|
||||
"smallvec 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -1681,6 +1682,11 @@ name = "smallvec"
|
||||
version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "solicit"
|
||||
version = "0.4.4"
|
||||
@ -2152,6 +2158,7 @@ dependencies = [
|
||||
"checksum slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6dbdd334bd28d328dad1c41b0ea662517883d8880d8533895ef96c8003dec9c4"
|
||||
"checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23"
|
||||
"checksum smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "fcc8d19212aacecf95e4a7a2179b26f7aeb9732a915cf01f05b0d3e044865410"
|
||||
"checksum smallvec 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a3c84984c278afe61a46e19868e8b23e2ee3be5b3cc6dea6edad4893bc6c841"
|
||||
"checksum solicit 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "172382bac9424588d7840732b250faeeef88942e37b6e35317dce98cafdd75b2"
|
||||
"checksum spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "93bdab61c1a413e591c4d17388ffa859eaff2df27f1e13a5ec8b716700605adf"
|
||||
"checksum stable-heap 0.1.0 (git+https://github.com/carllerche/stable-heap?rev=3c5cd1ca47)" = "<none>"
|
||||
|
@ -19,7 +19,8 @@ ethcore-io = { path = "../../util/io" }
|
||||
ethcore-ipc = { path = "../../ipc/rpc", optional = true }
|
||||
rlp = { path = "../../util/rlp" }
|
||||
time = "0.1"
|
||||
smallvec = "0.3.1"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
ipc = ["ethcore-ipc", "ethcore-ipc-codegen"]
|
||||
ipc = ["ethcore-ipc", "ethcore-ipc-codegen"]
|
||||
|
@ -23,28 +23,35 @@
|
||||
//! This is separate from the `BlockChain` for two reasons:
|
||||
//! - It stores only headers (and a pruned subset of them)
|
||||
//! - To allow for flexibility in the database layout once that's incorporated.
|
||||
// TODO: use DB instead of memory.
|
||||
// TODO: use DB instead of memory. DB Layout: just the contents of `candidates`/`headers`
|
||||
//
|
||||
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
|
||||
use ethcore::header::Header;
|
||||
use ethcore::error::BlockError;
|
||||
use ethcore::ids::BlockId;
|
||||
use ethcore::views::HeaderView;
|
||||
use util::{Bytes, H256, U256, Mutex, RwLock};
|
||||
|
||||
/// Delay this many blocks before producing a CHT.
|
||||
use smallvec::SmallVec;
|
||||
|
||||
/// Delay this many blocks before producing a CHT. required to be at
|
||||
/// least 1 but should be more in order to be resilient against reorgs.
|
||||
const CHT_DELAY: u64 = 2048;
|
||||
|
||||
/// Generate CHT roots of this size.
|
||||
// TODO: move into more generic module.
|
||||
// TODO: move CHT definition/creation into more generic module.
|
||||
const CHT_SIZE: u64 = 2048;
|
||||
|
||||
/// Information about a block.
|
||||
#[derive(Debug, Clone)]
|
||||
struct BestBlock {
|
||||
hash: H256,
|
||||
number: u64,
|
||||
total_difficulty: U256,
|
||||
pub struct BlockDescriptor {
|
||||
/// The block's hash
|
||||
pub hash: H256,
|
||||
/// The block's number
|
||||
pub number: u64,
|
||||
/// The block's total difficulty.
|
||||
pub total_difficulty: U256,
|
||||
}
|
||||
|
||||
// candidate block description.
|
||||
@ -55,7 +62,7 @@ struct Candidate {
|
||||
}
|
||||
|
||||
struct Entry {
|
||||
candidates: Vec<Candidate>,
|
||||
candidates: SmallVec<[Candidate; 3]>, // 3 arbitrarily chosen
|
||||
canonical_hash: H256,
|
||||
}
|
||||
|
||||
@ -64,7 +71,7 @@ pub struct HeaderChain {
|
||||
genesis_header: Bytes, // special-case the genesis.
|
||||
candidates: RwLock<BTreeMap<u64, Entry>>,
|
||||
headers: RwLock<HashMap<H256, Bytes>>,
|
||||
best_block: RwLock<BestBlock>,
|
||||
best_block: RwLock<BlockDescriptor>,
|
||||
cht_roots: Mutex<Vec<H256>>,
|
||||
}
|
||||
|
||||
@ -75,7 +82,7 @@ impl HeaderChain {
|
||||
|
||||
HeaderChain {
|
||||
genesis_header: genesis.to_owned(),
|
||||
best_block: RwLock::new(BestBlock {
|
||||
best_block: RwLock::new(BlockDescriptor {
|
||||
hash: g_view.hash(),
|
||||
number: 0,
|
||||
total_difficulty: g_view.difficulty(),
|
||||
@ -114,7 +121,7 @@ impl HeaderChain {
|
||||
|
||||
// insert headers and candidates entries.
|
||||
let mut candidates = self.candidates.write();
|
||||
candidates.entry(number).or_insert_with(|| Entry { candidates: Vec::new(), canonical_hash: hash})
|
||||
candidates.entry(number).or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash})
|
||||
.candidates.push(Candidate {
|
||||
hash: hash,
|
||||
parent_hash: parent_hash,
|
||||
@ -137,7 +144,7 @@ impl HeaderChain {
|
||||
canon_hash = canon.parent_hash;
|
||||
}
|
||||
|
||||
*self.best_block.write() = BestBlock {
|
||||
*self.best_block.write() = BlockDescriptor {
|
||||
hash: hash,
|
||||
number: number,
|
||||
total_difficulty: total_difficulty,
|
||||
@ -145,13 +152,24 @@ impl HeaderChain {
|
||||
|
||||
// produce next CHT root if it's time.
|
||||
let earliest_era = *candidates.keys().next().expect("at least one era just created; qed");
|
||||
if earliest_era + CHT_DELAY + CHT_SIZE < number {
|
||||
let values: Vec<_> = (0..CHT_SIZE).map(|x| x + earliest_era)
|
||||
.map(|x| candidates.remove(&x).map(|entry| (x, entry)))
|
||||
.map(|x| x.expect("all eras stored are sequential with no gaps; qed"))
|
||||
.map(|(x, entry)| (::rlp::encode(&x), ::rlp::encode(&entry.canonical_hash)))
|
||||
.map(|(k, v)| (k.to_vec(), v.to_vec()))
|
||||
.collect();
|
||||
if earliest_era + CHT_DELAY + CHT_SIZE <= number {
|
||||
let mut values = Vec::with_capacity(CHT_SIZE as usize);
|
||||
{
|
||||
let mut headers = self.headers.write();
|
||||
for i in (0..CHT_SIZE).map(|x| x + earliest_era) {
|
||||
let era_entry = candidates.remove(&i)
|
||||
.expect("all eras are sequential with no gaps; qed");
|
||||
|
||||
for ancient in &era_entry.candidates {
|
||||
headers.remove(&ancient.hash);
|
||||
}
|
||||
|
||||
values.push((
|
||||
::rlp::encode(&i).to_vec(),
|
||||
::rlp::encode(&era_entry.canonical_hash).to_vec(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let cht_root = ::util::triehash::trie_root(values);
|
||||
debug!(target: "chain", "Produced CHT {} root: {:?}", (earliest_era - 1) % CHT_SIZE, cht_root);
|
||||
@ -165,7 +183,7 @@ impl HeaderChain {
|
||||
|
||||
/// Get a block header. In the case of query by number, only canonical blocks
|
||||
/// will be returned.
|
||||
pub fn block_header(&self, id: BlockId) -> Option<Bytes> {
|
||||
pub fn get_header(&self, id: BlockId) -> Option<Bytes> {
|
||||
match id {
|
||||
BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.clone()),
|
||||
BlockId::Hash(hash) => self.headers.read().get(&hash).map(|x| x.to_vec()),
|
||||
@ -182,4 +200,141 @@ impl HeaderChain {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the nth CHT root, if it's been computed.
|
||||
///
|
||||
/// CHT root 0 is from block `1..2048`.
|
||||
/// CHT root 1 is from block `2049..4096`
|
||||
/// and so on.
|
||||
///
|
||||
/// This is because it's assumed that the genesis hash is known,
|
||||
/// so including it within a CHT would be redundant.
|
||||
pub fn cht_root(&self, n: usize) -> Option<H256> {
|
||||
self.cht_roots.lock().get(n).map(|h| h.clone())
|
||||
}
|
||||
|
||||
/// Get the genesis hash.
|
||||
pub fn genesis_hash(&self) -> H256 {
|
||||
use util::Hashable;
|
||||
|
||||
self.genesis_header.sha3()
|
||||
}
|
||||
|
||||
/// Get the best block's data.
|
||||
pub fn best_block(&self) -> BlockDescriptor {
|
||||
self.best_block.read().clone()
|
||||
}
|
||||
|
||||
/// If there is a gap between the genesis and the rest
|
||||
/// of the stored blocks, return the first post-gap block.
|
||||
pub fn first_block(&self) -> Option<BlockDescriptor> {
|
||||
let candidates = self.candidates.read();
|
||||
match candidates.iter().next() {
|
||||
None | Some((&1, _)) => None,
|
||||
Some((&height, entry)) => Some(BlockDescriptor {
|
||||
number: height,
|
||||
hash: entry.canonical_hash,
|
||||
total_difficulty: entry.candidates.iter().find(|x| x.hash == entry.canonical_hash)
|
||||
.expect("entry always stores canonical candidate; qed").total_difficulty,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::HeaderChain;
|
||||
use ethcore::ids::BlockId;
|
||||
use ethcore::header::Header;
|
||||
use ethcore::spec::Spec;
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let spec = Spec::new_test();
|
||||
let genesis_header = spec.genesis_header();
|
||||
|
||||
let chain = HeaderChain::new(&::rlp::encode(&genesis_header));
|
||||
|
||||
let mut parent_hash = genesis_header.hash();
|
||||
let mut rolling_timestamp = genesis_header.timestamp();
|
||||
for i in 1..10000 {
|
||||
let mut header = Header::new();
|
||||
header.set_parent_hash(parent_hash);
|
||||
header.set_number(i);
|
||||
header.set_timestamp(rolling_timestamp);
|
||||
header.set_difficulty(*genesis_header.difficulty() * i.into());
|
||||
|
||||
chain.insert(::rlp::encode(&header).to_vec());
|
||||
|
||||
parent_hash = header.hash();
|
||||
rolling_timestamp += 10;
|
||||
}
|
||||
|
||||
assert!(chain.get_header(BlockId::Number(10)).is_none());
|
||||
assert!(chain.get_header(BlockId::Number(9000)).is_some());
|
||||
assert!(chain.cht_root(2).is_some());
|
||||
assert!(chain.cht_root(3).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reorganize() {
|
||||
let spec = Spec::new_test();
|
||||
let genesis_header = spec.genesis_header();
|
||||
|
||||
let chain = HeaderChain::new(&::rlp::encode(&genesis_header));
|
||||
|
||||
let mut parent_hash = genesis_header.hash();
|
||||
let mut rolling_timestamp = genesis_header.timestamp();
|
||||
for i in 1..6 {
|
||||
let mut header = Header::new();
|
||||
header.set_parent_hash(parent_hash);
|
||||
header.set_number(i);
|
||||
header.set_timestamp(rolling_timestamp);
|
||||
header.set_difficulty(*genesis_header.difficulty() * i.into());
|
||||
|
||||
chain.insert(::rlp::encode(&header).to_vec()).unwrap();
|
||||
|
||||
parent_hash = header.hash();
|
||||
rolling_timestamp += 10;
|
||||
}
|
||||
|
||||
{
|
||||
let mut rolling_timestamp = rolling_timestamp;
|
||||
let mut parent_hash = parent_hash;
|
||||
for i in 6..16 {
|
||||
let mut header = Header::new();
|
||||
header.set_parent_hash(parent_hash);
|
||||
header.set_number(i);
|
||||
header.set_timestamp(rolling_timestamp);
|
||||
header.set_difficulty(*genesis_header.difficulty() * i.into());
|
||||
|
||||
chain.insert(::rlp::encode(&header).to_vec()).unwrap();
|
||||
|
||||
parent_hash = header.hash();
|
||||
rolling_timestamp += 10;
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(chain.best_block().number, 15);
|
||||
|
||||
{
|
||||
let mut rolling_timestamp = rolling_timestamp;
|
||||
let mut parent_hash = parent_hash;
|
||||
|
||||
// import a shorter chain which has better TD.
|
||||
for i in 6..13 {
|
||||
let mut header = Header::new();
|
||||
header.set_parent_hash(parent_hash);
|
||||
header.set_number(i);
|
||||
header.set_timestamp(rolling_timestamp);
|
||||
header.set_difficulty(*genesis_header.difficulty() * (i * i).into());
|
||||
|
||||
chain.insert(::rlp::encode(&header).to_vec()).unwrap();
|
||||
|
||||
parent_hash = header.hash();
|
||||
rolling_timestamp += 11;
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(chain.best_block().number, 12);
|
||||
}
|
||||
}
|
||||
|
@ -20,41 +20,53 @@ use std::sync::Arc;
|
||||
|
||||
use ethcore::engines::Engine;
|
||||
use ethcore::ids::BlockId;
|
||||
use ethcore::service::ClientIoMessage;
|
||||
use ethcore::block_import_error::BlockImportError;
|
||||
use ethcore::block_status::BlockStatus;
|
||||
use ethcore::verification::queue::{HeaderQueue, QueueInfo};
|
||||
use ethcore::verification::queue::{HeaderQueue, QueueInfo, Config as QueueConfig};
|
||||
use ethcore::transaction::SignedTransaction;
|
||||
use ethcore::blockchain_info::BlockChainInfo;
|
||||
|
||||
use ethcore::spec::Spec;
|
||||
use ethcore::service::ClientIoMessage;
|
||||
use io::IoChannel;
|
||||
|
||||
use util::hash::{H256, H256FastMap};
|
||||
use util::{Bytes, Mutex};
|
||||
|
||||
use provider::Provider;
|
||||
use request;
|
||||
|
||||
use self::header_chain::HeaderChain;
|
||||
|
||||
mod header_chain;
|
||||
|
||||
/// Configuration for the light client.
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct Config {
|
||||
queue: QueueConfig,
|
||||
}
|
||||
|
||||
/// Light client implementation.
|
||||
pub struct Client {
|
||||
_engine: Arc<Engine>,
|
||||
header_queue: HeaderQueue,
|
||||
_message_channel: Mutex<IoChannel<ClientIoMessage>>,
|
||||
queue: HeaderQueue,
|
||||
chain: HeaderChain,
|
||||
tx_pool: Mutex<H256FastMap<SignedTransaction>>,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
/// Create a new `Client`.
|
||||
pub fn new(config: Config, spec: &Spec, io_channel: IoChannel<ClientIoMessage>) -> Self {
|
||||
Client {
|
||||
queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true),
|
||||
chain: HeaderChain::new(&::rlp::encode(&spec.genesis_header())),
|
||||
tx_pool: Mutex::new(Default::default()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Import a header as rlp-encoded bytes.
|
||||
pub fn import_header(&self, bytes: Bytes) -> Result<H256, BlockImportError> {
|
||||
let header = ::rlp::decode(&bytes);
|
||||
|
||||
self.header_queue.import(header).map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Whether the block is already known (but not necessarily part of the canonical chain)
|
||||
pub fn is_known(&self, _id: BlockId) -> bool {
|
||||
false
|
||||
self.queue.import(header).map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Import a local transaction.
|
||||
@ -68,30 +80,34 @@ impl Client {
|
||||
}
|
||||
|
||||
/// Inquire about the status of a given block (or header).
|
||||
pub fn status(&self, _id: BlockId) -> BlockStatus {
|
||||
pub fn status(&self, id: BlockId) -> BlockStatus {
|
||||
BlockStatus::Unknown
|
||||
}
|
||||
|
||||
/// Get the header queue info.
|
||||
pub fn queue_info(&self) -> QueueInfo {
|
||||
self.header_queue.queue_info()
|
||||
}
|
||||
|
||||
/// Best block number.
|
||||
pub fn best_block_number(&self) -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
/// Best block hash.
|
||||
pub fn best_block_hash(&self) -> u64 {
|
||||
unimplemented!()
|
||||
self.queue.queue_info()
|
||||
}
|
||||
}
|
||||
|
||||
// dummy implementation -- may draw from canonical cache further on.
|
||||
impl Provider for Client {
|
||||
fn chain_info(&self) -> BlockChainInfo {
|
||||
unimplemented!()
|
||||
let best_block = self.chain.best_block();
|
||||
let first_block = self.chain.first_block();
|
||||
let genesis_hash = self.chain.genesis_hash();
|
||||
|
||||
BlockChainInfo {
|
||||
total_difficulty: best_block.total_difficulty,
|
||||
pending_total_difficulty: best_block.total_difficulty,
|
||||
genesis_hash: genesis_hash,
|
||||
best_block_hash: best_block.hash,
|
||||
best_block_number: best_block.number,
|
||||
ancient_block_hash: if first_block.is_some() { Some(genesis_hash) } else { None },
|
||||
ancient_block_number: if first_block.is_some() { Some(0) } else { None },
|
||||
first_block_hash: first_block.as_ref().map(|first| first.hash),
|
||||
first_block_number: first_block.as_ref().map(|first| first.number),
|
||||
}
|
||||
}
|
||||
|
||||
fn reorg_depth(&self, _a: &H256, _b: &H256) -> Option<u64> {
|
||||
|
@ -60,7 +60,8 @@ extern crate ethcore_util as util;
|
||||
extern crate ethcore_network as network;
|
||||
extern crate ethcore_io as io;
|
||||
extern crate rlp;
|
||||
extern crate smallvec;
|
||||
extern crate time;
|
||||
|
||||
#[cfg(feature = "ipc")]
|
||||
extern crate ethcore_ipc as ipc;
|
||||
extern crate ethcore_ipc as ipc;
|
||||
|
@ -20,7 +20,7 @@ use network::{NetworkContext, PeerId, NodeId};
|
||||
|
||||
use super::{Announcement, LightProtocol, ReqId};
|
||||
use super::error::Error;
|
||||
use request::Request;
|
||||
use request::{self, Request};
|
||||
|
||||
/// An I/O context which allows sending and receiving packets as well as
|
||||
/// disconnecting peers. This is used as a generalization of the portions
|
||||
@ -93,6 +93,10 @@ pub trait EventContext {
|
||||
// TODO: maybe just put this on a timer in LightProtocol?
|
||||
fn make_announcement(&self, announcement: Announcement);
|
||||
|
||||
/// Find the maximum number of requests of a specific type which can be made from
|
||||
/// supplied peer.
|
||||
fn max_requests(&self, peer: PeerId, kind: request::Kind) -> Option<usize>;
|
||||
|
||||
/// Disconnect a peer.
|
||||
fn disconnect_peer(&self, peer: PeerId);
|
||||
|
||||
@ -128,6 +132,10 @@ impl<'a> EventContext for Ctx<'a> {
|
||||
self.proto.make_announcement(self.io, announcement);
|
||||
}
|
||||
|
||||
fn max_requests(&self, peer: PeerId, kind: request::Kind) -> Option<usize> {
|
||||
self.proto.max_requests(peer, kind)
|
||||
}
|
||||
|
||||
fn disconnect_peer(&self, peer: PeerId) {
|
||||
self.io.disconnect_peer(peer);
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ impl Peer {
|
||||
|
||||
/// An LES event handler.
|
||||
///
|
||||
/// Each handler function takes a context which describes the relevant peer
|
||||
/// Each handler function takes a context which describes the relevant peer
|
||||
/// and gives references to the IO layer and protocol structure so new messages
|
||||
/// can be dispatched immediately.
|
||||
///
|
||||
@ -185,10 +185,12 @@ pub trait Handler: Send + Sync {
|
||||
fn on_state_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[Vec<Bytes>]) { }
|
||||
/// Called when a peer responds with contract code.
|
||||
fn on_code(&self, _ctx: &EventContext, _req_id: ReqId, _codes: &[Bytes]) { }
|
||||
/// Called when a peer responds with header proofs. Each proof is a block header coupled
|
||||
/// Called when a peer responds with header proofs. Each proof is a block header coupled
|
||||
/// with a series of trie nodes is ascending order by distance from the root.
|
||||
fn on_header_proofs(&self, _ctx: &EventContext, _req_id: ReqId, _proofs: &[(Bytes, Vec<Bytes>)]) { }
|
||||
/// Called on abort.
|
||||
/// Called on abort. This signals to handlers that they should clean up
|
||||
/// and ignore peers.
|
||||
// TODO: coreresponding `on_activate`?
|
||||
fn on_abort(&self) { }
|
||||
}
|
||||
|
||||
@ -215,9 +217,9 @@ pub struct Params {
|
||||
/// This is simply designed for request-response purposes. Higher level uses
|
||||
/// of the protocol, such as synchronization, will function as wrappers around
|
||||
/// this system.
|
||||
//
|
||||
//
|
||||
// LOCK ORDER:
|
||||
// Locks must be acquired in the order declared, and when holding a read lock
|
||||
// Locks must be acquired in the order declared, and when holding a read lock
|
||||
// on the peers, only one peer may be held at a time.
|
||||
pub struct LightProtocol {
|
||||
provider: Arc<Provider>,
|
||||
@ -252,7 +254,7 @@ impl LightProtocol {
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the maximum amount of requests of a specific type
|
||||
/// Check the maximum amount of requests of a specific type
|
||||
/// which a peer would be able to serve.
|
||||
pub fn max_requests(&self, peer: PeerId, kind: request::Kind) -> Option<usize> {
|
||||
self.peers.read().get(&peer).and_then(|peer| {
|
||||
@ -267,11 +269,11 @@ impl LightProtocol {
|
||||
})
|
||||
}
|
||||
|
||||
/// Make a request to a peer.
|
||||
/// Make a request to a peer.
|
||||
///
|
||||
/// Fails on: nonexistent peer, network error, peer not server,
|
||||
/// insufficient buffer. Does not check capabilities before sending.
|
||||
/// On success, returns a request id which can later be coordinated
|
||||
/// On success, returns a request id which can later be coordinated
|
||||
/// with an event.
|
||||
pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result<ReqId, Error> {
|
||||
let peers = self.peers.read();
|
||||
@ -325,10 +327,10 @@ impl LightProtocol {
|
||||
|
||||
// TODO: "urgent" announcements like new blocks?
|
||||
// the timer approach will skip 1 (possibly 2) in rare occasions.
|
||||
if peer_info.sent_head == announcement.head_hash ||
|
||||
if peer_info.sent_head == announcement.head_hash ||
|
||||
peer_info.status.head_num >= announcement.head_num ||
|
||||
now - peer_info.last_update < Duration::milliseconds(UPDATE_INTERVAL_MS) {
|
||||
continue
|
||||
continue
|
||||
}
|
||||
|
||||
peer_info.last_update = now;
|
||||
@ -357,7 +359,7 @@ impl LightProtocol {
|
||||
/// Add an event handler.
|
||||
/// Ownership will be transferred to the protocol structure,
|
||||
/// and the handler will be kept alive as long as it is.
|
||||
/// These are intended to be added when the protocol structure
|
||||
/// These are intended to be added when the protocol structure
|
||||
/// is initialized as a means of customizing its behavior.
|
||||
pub fn add_handler(&mut self, handler: Box<Handler>) {
|
||||
self.handlers.push(handler);
|
||||
@ -380,7 +382,7 @@ impl LightProtocol {
|
||||
pending_requests.clear();
|
||||
}
|
||||
|
||||
// Does the common pre-verification of responses before the response itself
|
||||
// Does the common pre-verification of responses before the response itself
|
||||
// is actually decoded:
|
||||
// - check whether peer exists
|
||||
// - check whether request was made
|
||||
@ -406,7 +408,7 @@ impl LightProtocol {
|
||||
let mut peer_info = peer_info.lock();
|
||||
match peer_info.remote_flow.as_mut() {
|
||||
Some(&mut (ref mut buf, ref mut flow)) => {
|
||||
let actual_buffer = ::std::cmp::min(cur_buffer, *flow.limit());
|
||||
let actual_buffer = ::std::cmp::min(cur_buffer, *flow.limit());
|
||||
buf.update_to(actual_buffer)
|
||||
}
|
||||
None => return Err(Error::NotServer), // this really should be impossible.
|
||||
@ -488,17 +490,17 @@ impl LightProtocol {
|
||||
request::Kind::Receipts => timeout::RECEIPTS,
|
||||
request::Kind::StateProofs => timeout::PROOFS,
|
||||
request::Kind::Codes => timeout::CONTRACT_CODES,
|
||||
request::Kind::HeaderProofs => timeout::HEADER_PROOFS,
|
||||
request::Kind::HeaderProofs => timeout::HEADER_PROOFS,
|
||||
};
|
||||
|
||||
if r.timestamp + Duration::milliseconds(kind_timeout) <= now {
|
||||
debug!(target: "les", "Request for {:?} from peer {} timed out",
|
||||
debug!(target: "les", "Request for {:?} from peer {} timed out",
|
||||
r.request.kind(), r.peer_id);
|
||||
|
||||
|
||||
// keep the request in the `pending` set for now so
|
||||
// on_disconnect will pass unfulfilled ReqIds to handlers.
|
||||
// in the case that a response is received after this, the
|
||||
// disconnect won't be cancelled but the ReqId won't be
|
||||
// disconnect won't be cancelled but the ReqId won't be
|
||||
// marked as abandoned.
|
||||
io.disconnect_peer(r.peer_id);
|
||||
}
|
||||
@ -519,7 +521,7 @@ impl LightProtocol {
|
||||
punish(*peer, io, Error::UnsupportedProtocolVersion(proto_version));
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
let chain_info = self.provider.chain_info();
|
||||
|
||||
let status = Status {
|
||||
@ -540,7 +542,7 @@ impl LightProtocol {
|
||||
last_update: SteadyTime::now(),
|
||||
});
|
||||
|
||||
io.send(*peer, packet::STATUS, status_packet);
|
||||
io.send(*peer, packet::STATUS, status_packet);
|
||||
}
|
||||
|
||||
// called when a peer disconnects.
|
||||
@ -569,7 +571,7 @@ impl LightProtocol {
|
||||
io: io,
|
||||
proto: self,
|
||||
}, &unfulfilled)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -608,7 +610,7 @@ impl LightProtocol {
|
||||
for handler in &self.handlers {
|
||||
handler.on_connect(&Ctx {
|
||||
peer: *peer,
|
||||
io: io,
|
||||
io: io,
|
||||
proto: self,
|
||||
}, &status, &capabilities)
|
||||
}
|
||||
@ -662,7 +664,7 @@ impl LightProtocol {
|
||||
}
|
||||
|
||||
// Handle a request for block headers.
|
||||
fn get_block_headers(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> {
|
||||
fn get_block_headers(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> {
|
||||
const MAX_HEADERS: usize = 512;
|
||||
|
||||
let peers = self.peers.read();
|
||||
@ -914,7 +916,7 @@ impl LightProtocol {
|
||||
.map(|x| x.iter().map(|node| node.as_raw().to_owned()).collect())
|
||||
.collect();
|
||||
|
||||
for handler in &self.handlers {
|
||||
for handler in &self.handlers {
|
||||
handler.on_state_proofs(&Ctx {
|
||||
peer: *peer,
|
||||
io: io,
|
||||
@ -983,7 +985,7 @@ impl LightProtocol {
|
||||
|
||||
let raw_code: Vec<Bytes> = try!(try!(raw.at(2)).iter().map(|x| x.as_val()).collect());
|
||||
|
||||
for handler in &self.handlers {
|
||||
for handler in &self.handlers {
|
||||
handler.on_code(&Ctx {
|
||||
peer: *peer,
|
||||
io: io,
|
||||
@ -1055,11 +1057,11 @@ impl LightProtocol {
|
||||
try!(raw.at(1)).iter().map(|x| x.as_raw().to_owned()).collect(),
|
||||
))
|
||||
}
|
||||
|
||||
|
||||
let req_id = try!(self.pre_verify_response(peer, request::Kind::HeaderProofs, &raw));
|
||||
let raw_proofs: Vec<_> = try!(try!(raw.at(2)).iter().map(decode_res).collect());
|
||||
|
||||
for handler in &self.handlers {
|
||||
for handler in &self.handlers {
|
||||
handler.on_header_proofs(&Ctx {
|
||||
peer: *peer,
|
||||
io: io,
|
||||
@ -1082,7 +1084,7 @@ impl LightProtocol {
|
||||
handler.on_transactions(&Ctx {
|
||||
peer: *peer,
|
||||
io: io,
|
||||
proto: self,
|
||||
proto: self,
|
||||
}, &txs);
|
||||
}
|
||||
|
||||
@ -1136,12 +1138,12 @@ fn encode_request(req: &Request, req_id: usize) -> Vec<u8> {
|
||||
Request::Headers(ref headers) => {
|
||||
let mut stream = RlpStream::new_list(2);
|
||||
stream.append(&req_id).begin_list(4);
|
||||
|
||||
|
||||
match headers.start {
|
||||
HashOrNumber::Hash(ref hash) => stream.append(hash),
|
||||
HashOrNumber::Number(ref num) => stream.append(num),
|
||||
};
|
||||
|
||||
|
||||
stream
|
||||
.append(&headers.max)
|
||||
.append(&headers.skip)
|
||||
@ -1214,4 +1216,4 @@ fn encode_request(req: &Request, req_id: usize) -> Vec<u8> {
|
||||
stream.out()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -562,4 +562,4 @@ mod tests {
|
||||
assert_eq!(read_capabilities, capabilities);
|
||||
assert!(read_flow.is_none());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user