Remove light client
This commit is contained in:
parent
2ab8c72ce3
commit
194101ed00
69
Cargo.lock
generated
69
Cargo.lock
generated
@ -155,16 +155,6 @@ dependencies = [
|
||||
"byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bincode"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bit-set"
|
||||
version = "0.4.0"
|
||||
@ -984,49 +974,6 @@ dependencies = [
|
||||
"tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ethcore-light"
|
||||
version = "1.12.0"
|
||||
dependencies = [
|
||||
"bincode 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"common-types 0.1.0",
|
||||
"error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethcore 1.12.0",
|
||||
"ethcore-blockchain 0.1.0",
|
||||
"ethcore-db 0.1.0",
|
||||
"ethcore-io 1.12.0",
|
||||
"ethcore-network 1.12.0",
|
||||
"ethereum-types 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"failsafe 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fastmap 0.1.0",
|
||||
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hash-db 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"heapsize 0.4.2 (git+https://github.com/cheme/heapsize.git?branch=ec-macfix)",
|
||||
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"journaldb 0.2.0",
|
||||
"keccak-hash 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"keccak-hasher 0.1.1",
|
||||
"kvdb 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kvdb-memorydb 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"memory-cache 0.1.0",
|
||||
"memory-db 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-bytes 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"patricia-trie-ethereum 0.1.0",
|
||||
"rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rlp 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rlp_derive 0.1.0",
|
||||
"serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"stats 0.1.0",
|
||||
"tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"trie-db 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"triehash-ethereum 0.2.0",
|
||||
"vm 0.1.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ethcore-logger"
|
||||
version = "1.12.0"
|
||||
@ -1255,7 +1202,6 @@ dependencies = [
|
||||
"env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethcore 1.12.0",
|
||||
"ethcore-io 1.12.0",
|
||||
"ethcore-light 1.12.0",
|
||||
"ethcore-network 1.12.0",
|
||||
"ethcore-network-devp2p 1.12.0",
|
||||
"ethcore-private-tx 1.0.0",
|
||||
@ -1434,16 +1380,6 @@ dependencies = [
|
||||
"vm 0.1.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "failsafe"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "failure"
|
||||
version = "0.1.6"
|
||||
@ -2675,7 +2611,6 @@ dependencies = [
|
||||
"ethcore-call-contract 0.1.0",
|
||||
"ethcore-db 0.1.0",
|
||||
"ethcore-io 1.12.0",
|
||||
"ethcore-light 1.12.0",
|
||||
"ethcore-logger 1.12.0",
|
||||
"ethcore-miner 1.12.0",
|
||||
"ethcore-network 1.12.0",
|
||||
@ -2826,7 +2761,6 @@ dependencies = [
|
||||
"ethcore 1.12.0",
|
||||
"ethcore-accounts 0.1.0",
|
||||
"ethcore-io 1.12.0",
|
||||
"ethcore-light 1.12.0",
|
||||
"ethcore-logger 1.12.0",
|
||||
"ethcore-miner 1.12.0",
|
||||
"ethcore-network 1.12.0",
|
||||
@ -2837,7 +2771,6 @@ dependencies = [
|
||||
"ethkey 0.3.0",
|
||||
"ethstore 0.2.1",
|
||||
"fake-fetch 0.0.1",
|
||||
"fastmap 0.1.0",
|
||||
"fetch 0.1.0",
|
||||
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -4843,7 +4776,6 @@ dependencies = [
|
||||
"checksum base-x 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1"
|
||||
"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e"
|
||||
"checksum base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643"
|
||||
"checksum bincode 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e103c8b299b28a9c6990458b7013dc4a8356a9b854c51b9883241f5866fac36e"
|
||||
"checksum bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9bf6104718e80d7b26a68fdbacff3481cfc05df670821affc7e9cbc1884400c"
|
||||
"checksum bit-vec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f"
|
||||
"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"
|
||||
@ -4905,7 +4837,6 @@ dependencies = [
|
||||
"checksum ethbloom 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1a93a43ce2e9f09071449da36bfa7a1b20b950ee344b6904ff23de493b03b386"
|
||||
"checksum ethereum-types 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6e742184dc63a01c8ea0637369f8faa27c40f537949908a237f95c05e68d2c96"
|
||||
"checksum ethereum-types-serialize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1873d77b32bc1891a79dad925f2acbc318ee942b38b9110f9dbc5fbeffcea350"
|
||||
"checksum failsafe 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf04c6ffd217a68c73fdf40eb3331c484fd7a9fa4fd1028fcf74456ef889ca12"
|
||||
"checksum failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f8273f13c977665c5db7eb2b99ae520952fe5ac831ae4cd09d80c4c7042b5ed9"
|
||||
"checksum failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0bc225b78e0391e4b8683440bf2e63c2deeeb2ce5189eab46e2b68c6d3725d08"
|
||||
"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed"
|
||||
|
@ -38,7 +38,6 @@ ethcore-blockchain = { path = "ethcore/blockchain" }
|
||||
ethcore-call-contract = { path = "ethcore/call-contract"}
|
||||
ethcore-db = { path = "ethcore/db" }
|
||||
ethcore-io = { path = "util/io" }
|
||||
ethcore-light = { path = "ethcore/light" }
|
||||
ethcore-logger = { path = "parity/logger" }
|
||||
ethcore-miner = { path = "miner" }
|
||||
ethcore-network = { path = "util/network" }
|
||||
|
@ -37,8 +37,6 @@ pub const COL_TRACE: Option<u32> = Some(4);
|
||||
pub const COL_ACCOUNT_BLOOM: Option<u32> = Some(5);
|
||||
/// Column for general information from the local node which can persist.
|
||||
pub const COL_NODE_INFO: Option<u32> = Some(6);
|
||||
/// Column for the light client chain.
|
||||
pub const COL_LIGHT_CHAIN: Option<u32> = Some(7);
|
||||
/// Number of columns in DB
|
||||
pub const NUM_COLUMNS: Option<u32> = Some(8);
|
||||
|
||||
|
@ -1,52 +0,0 @@
|
||||
[package]
|
||||
description = "Parity Ethereum (EthCore) Light Client Implementation (Block Import IO Service, Blockchain Data Fetching, Light Client Header Chain Storage, Parity Light Protocol (PLP) Provider, Light Transaction Queue, CHT Definitions, Light Client Data Cache), Parity Light Protocol (PLP) Implementation, P2P Network I/O and Event Context Generalization, Peer Error Handling & Punishment, Request Load Timer & Distribution Manager, Pending Request Set Storage, Request Credit Management, Light Client Request Types, Request Chain Builder Utility, On-demand Chain Request Service over LES (for RPCs), ResponseGuard Implementation)"
|
||||
homepage = "http://parity.io"
|
||||
license = "GPL-3.0"
|
||||
name = "ethcore-light"
|
||||
version = "1.12.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
log = "0.4"
|
||||
parity-bytes = "0.1"
|
||||
common-types = { path = "../types" }
|
||||
ethcore = { path = ".."}
|
||||
ethcore-db = { path = "../db" }
|
||||
ethcore-blockchain = { path = "../blockchain" }
|
||||
ethereum-types = "0.4"
|
||||
memory-db = "0.11.0"
|
||||
trie-db = "0.11.0"
|
||||
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
|
||||
ethcore-network = { path = "../../util/network" }
|
||||
ethcore-io = { path = "../../util/io" }
|
||||
hash-db = "0.11.0"
|
||||
heapsize = "0.4"
|
||||
vm = { path = "../vm" }
|
||||
fastmap = { path = "../../util/fastmap" }
|
||||
failsafe = { version = "0.3.0", default-features = false, features = ["parking_lot_mutex"] }
|
||||
rlp = { version = "0.3.0", features = ["ethereum"] }
|
||||
rlp_derive = { path = "../../util/rlp-derive" }
|
||||
smallvec = "0.6"
|
||||
futures = "0.1"
|
||||
rand = "0.4"
|
||||
itertools = "0.5"
|
||||
bincode = "0.8.0"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
parking_lot = "0.7"
|
||||
stats = { path = "../../util/stats" }
|
||||
keccak-hash = "0.1"
|
||||
keccak-hasher = { path = "../../util/keccak-hasher" }
|
||||
triehash-ethereum = { version = "0.2", path = "../../util/triehash-ethereum" }
|
||||
kvdb = "0.1"
|
||||
memory-cache = { path = "../../util/memory-cache" }
|
||||
error-chain = { version = "0.12", default-features = false }
|
||||
journaldb = { path = "../../util/journaldb" }
|
||||
|
||||
[dev-dependencies]
|
||||
ethcore = { path = "..", features = ["test-helpers"] }
|
||||
kvdb-memorydb = "0.1"
|
||||
tempdir = "0.3"
|
||||
|
||||
[features]
|
||||
default = []
|
@ -1,192 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Cache for data fetched from the network.
|
||||
//!
|
||||
//! Stores ancient block headers, bodies, receipts, and total difficulties.
|
||||
//! Furthermore, stores a "gas price corpus" of relative recency, which is a sorted
|
||||
//! vector of all gas prices from a recent range of blocks.
|
||||
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use common_types::{encoded, receipt::Receipt, BlockNumber};
|
||||
use ethereum_types::{H256, U256};
|
||||
use heapsize::HeapSizeOf;
|
||||
use memory_cache::MemoryLruCache;
|
||||
use stats::Corpus;
|
||||
|
||||
/// Configuration for how much data to cache.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct CacheSizes {
|
||||
/// Maximum size, in bytes, of cached headers.
|
||||
pub headers: usize,
|
||||
/// Maximum size, in bytes, of cached canonical hashes.
|
||||
pub canon_hashes: usize,
|
||||
/// Maximum size, in bytes, of cached block bodies.
|
||||
pub bodies: usize,
|
||||
/// Maximum size, in bytes, of cached block receipts.
|
||||
pub receipts: usize,
|
||||
/// Maximum size, in bytes, of cached chain score for the block.
|
||||
pub chain_score: usize,
|
||||
}
|
||||
|
||||
impl Default for CacheSizes {
|
||||
fn default() -> Self {
|
||||
const MB: usize = 1024 * 1024;
|
||||
CacheSizes {
|
||||
headers: 10 * MB,
|
||||
canon_hashes: 3 * MB,
|
||||
bodies: 20 * MB,
|
||||
receipts: 10 * MB,
|
||||
chain_score: 7 * MB,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The light client data cache.
|
||||
///
|
||||
/// Note that almost all getter methods take `&mut self` due to the necessity to update
|
||||
/// the underlying LRU-caches on read.
|
||||
/// [LRU-cache](https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_Recently_Used_.28LRU.29)
|
||||
pub struct Cache {
|
||||
headers: MemoryLruCache<H256, encoded::Header>,
|
||||
canon_hashes: MemoryLruCache<BlockNumber, H256>,
|
||||
bodies: MemoryLruCache<H256, encoded::Body>,
|
||||
receipts: MemoryLruCache<H256, Vec<Receipt>>,
|
||||
chain_score: MemoryLruCache<H256, U256>,
|
||||
corpus: Option<(Corpus<U256>, Instant)>,
|
||||
corpus_expiration: Duration,
|
||||
}
|
||||
|
||||
impl Cache {
|
||||
/// Create a new data cache with the given sizes and gas price corpus expiration time.
|
||||
pub fn new(sizes: CacheSizes, corpus_expiration: Duration) -> Self {
|
||||
Cache {
|
||||
headers: MemoryLruCache::new(sizes.headers),
|
||||
canon_hashes: MemoryLruCache::new(sizes.canon_hashes),
|
||||
bodies: MemoryLruCache::new(sizes.bodies),
|
||||
receipts: MemoryLruCache::new(sizes.receipts),
|
||||
chain_score: MemoryLruCache::new(sizes.chain_score),
|
||||
corpus: None,
|
||||
corpus_expiration,
|
||||
}
|
||||
}
|
||||
|
||||
/// Query header by hash.
|
||||
pub fn block_header(&mut self, hash: &H256) -> Option<encoded::Header> {
|
||||
self.headers.get_mut(hash).cloned()
|
||||
}
|
||||
|
||||
/// Query hash by number.
|
||||
pub fn block_hash(&mut self, num: BlockNumber) -> Option<H256> {
|
||||
self.canon_hashes.get_mut(&num).map(|h| *h)
|
||||
}
|
||||
|
||||
/// Query block body by block hash.
|
||||
pub fn block_body(&mut self, hash: &H256) -> Option<encoded::Body> {
|
||||
self.bodies.get_mut(hash).cloned()
|
||||
}
|
||||
|
||||
/// Query block receipts by block hash.
|
||||
pub fn block_receipts(&mut self, hash: &H256) -> Option<Vec<Receipt>> {
|
||||
self.receipts.get_mut(hash).cloned()
|
||||
}
|
||||
|
||||
/// Query chain score by block hash.
|
||||
pub fn chain_score(&mut self, hash: &H256) -> Option<U256> {
|
||||
self.chain_score.get_mut(hash).map(|h| *h)
|
||||
}
|
||||
|
||||
/// Cache the given header.
|
||||
pub fn insert_block_header(&mut self, hash: H256, hdr: encoded::Header) {
|
||||
self.headers.insert(hash, hdr);
|
||||
}
|
||||
|
||||
/// Cache the given canonical block hash.
|
||||
pub fn insert_block_hash(&mut self, num: BlockNumber, hash: H256) {
|
||||
self.canon_hashes.insert(num, hash);
|
||||
}
|
||||
|
||||
/// Cache the given block body.
|
||||
pub fn insert_block_body(&mut self, hash: H256, body: encoded::Body) {
|
||||
self.bodies.insert(hash, body);
|
||||
}
|
||||
|
||||
/// Cache the given block receipts.
|
||||
pub fn insert_block_receipts(&mut self, hash: H256, receipts: Vec<Receipt>) {
|
||||
self.receipts.insert(hash, receipts);
|
||||
}
|
||||
|
||||
/// Cache the given chain scoring.
|
||||
pub fn insert_chain_score(&mut self, hash: H256, score: U256) {
|
||||
self.chain_score.insert(hash, score);
|
||||
}
|
||||
|
||||
/// Get gas price corpus, if recent enough.
|
||||
pub fn gas_price_corpus(&self) -> Option<Corpus<U256>> {
|
||||
let now = Instant::now();
|
||||
|
||||
self.corpus.as_ref().and_then(|&(ref corpus, ref tm)| {
|
||||
if *tm + self.corpus_expiration >= now {
|
||||
Some(corpus.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Set the cached gas price corpus.
|
||||
pub fn set_gas_price_corpus(&mut self, corpus: Corpus<U256>) {
|
||||
self.corpus = Some((corpus, Instant::now()))
|
||||
}
|
||||
|
||||
/// Get the memory used.
|
||||
pub fn mem_used(&self) -> usize {
|
||||
self.heap_size_of_children()
|
||||
}
|
||||
}
|
||||
|
||||
impl HeapSizeOf for Cache {
|
||||
fn heap_size_of_children(&self) -> usize {
|
||||
self.headers.current_size()
|
||||
+ self.canon_hashes.current_size()
|
||||
+ self.bodies.current_size()
|
||||
+ self.receipts.current_size()
|
||||
+ self.chain_score.current_size()
|
||||
// TODO: + corpus
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Cache;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn corpus_inaccessible() {
|
||||
let duration = Duration::from_secs(20);
|
||||
let mut cache = Cache::new(Default::default(), duration.clone());
|
||||
|
||||
cache.set_gas_price_corpus(vec![].into());
|
||||
assert_eq!(cache.gas_price_corpus(), Some(vec![].into()));
|
||||
|
||||
{
|
||||
let corpus_time = &mut cache.corpus.as_mut().unwrap().1;
|
||||
*corpus_time = *corpus_time - duration;
|
||||
}
|
||||
assert!(cache.gas_price_corpus().is_none());
|
||||
}
|
||||
}
|
@ -1,227 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Canonical hash trie definitions and helper functions.
|
||||
//!
|
||||
//! Each CHT is a trie mapping block numbers to canonical hashes and total difficulty.
|
||||
//! One is generated for every `SIZE` blocks, allowing us to discard those blocks in
|
||||
//! favor of the trie root. When the "ancient" blocks need to be accessed, we simply
|
||||
//! request an inclusion proof of a specific block number against the trie with the
|
||||
//! root has. A correct proof implies that the claimed block is identical to the one
|
||||
//! we discarded.
|
||||
|
||||
use bytes::Bytes;
|
||||
use common_types::ids::BlockId;
|
||||
use ethereum_types::{H256, U256};
|
||||
use ethtrie::{self, TrieDB, TrieDBMut};
|
||||
use hash_db::HashDB;
|
||||
use journaldb::new_memory_db;
|
||||
use keccak_hasher::KeccakHasher;
|
||||
use kvdb::DBValue;
|
||||
use memory_db::MemoryDB;
|
||||
use rlp::{Rlp, RlpStream};
|
||||
use trie::{Recorder, Trie, TrieMut};
|
||||
|
||||
// encode a key.
|
||||
macro_rules! key {
|
||||
($num: expr) => {
|
||||
::rlp::encode(&$num)
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! val {
|
||||
($hash: expr, $td: expr) => {{
|
||||
let mut stream = RlpStream::new_list(2);
|
||||
stream.append(&$hash).append(&$td);
|
||||
stream.drain()
|
||||
}};
|
||||
}
|
||||
|
||||
/// The size of each CHT.
|
||||
pub const SIZE: u64 = 2048;
|
||||
|
||||
/// A canonical hash trie. This is generic over any database it can query.
|
||||
/// See module docs for more details.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CHT<DB: HashDB<KeccakHasher, DBValue>> {
|
||||
db: DB,
|
||||
root: H256, // the root of this CHT.
|
||||
number: u64,
|
||||
}
|
||||
|
||||
impl<DB: HashDB<KeccakHasher, DBValue>> CHT<DB> {
|
||||
/// Query the root of the CHT.
|
||||
pub fn root(&self) -> H256 {
|
||||
self.root
|
||||
}
|
||||
|
||||
/// Query the number of the CHT.
|
||||
pub fn number(&self) -> u64 {
|
||||
self.number
|
||||
}
|
||||
|
||||
/// Generate an inclusion proof for the entry at a specific block.
|
||||
/// Nodes before level `from_level` will be omitted.
|
||||
/// Returns an error on an incomplete trie, and `Ok(None)` on an unprovable request.
|
||||
pub fn prove(&self, num: u64, from_level: u32) -> ethtrie::Result<Option<Vec<Bytes>>> {
|
||||
if block_to_cht_number(num) != Some(self.number) {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let mut recorder = Recorder::with_depth(from_level);
|
||||
let db: &dyn HashDB<_, _> = &self.db;
|
||||
let t = TrieDB::new(&db, &self.root)?;
|
||||
t.get_with(&key!(num), &mut recorder)?;
|
||||
|
||||
Ok(Some(recorder.drain().into_iter().map(|x| x.data).collect()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Block information necessary to build a CHT.
|
||||
pub struct BlockInfo {
|
||||
/// The block's hash.
|
||||
pub hash: H256,
|
||||
/// The block's parent's hash.
|
||||
pub parent_hash: H256,
|
||||
/// The block's total difficulty.
|
||||
pub total_difficulty: U256,
|
||||
}
|
||||
|
||||
/// Build an in-memory CHT from a closure which provides necessary information
|
||||
/// about blocks. If the fetcher ever fails to provide the info, the CHT
|
||||
/// will not be generated.
|
||||
pub fn build<F>(cht_num: u64, mut fetcher: F) -> Option<CHT<MemoryDB<KeccakHasher, DBValue>>>
|
||||
where
|
||||
F: FnMut(BlockId) -> Option<BlockInfo>,
|
||||
{
|
||||
let mut db = new_memory_db();
|
||||
|
||||
// start from the last block by number and work backwards.
|
||||
let last_num = start_number(cht_num + 1) - 1;
|
||||
let mut id = BlockId::Number(last_num);
|
||||
|
||||
let mut root = H256::default();
|
||||
|
||||
{
|
||||
let mut t = TrieDBMut::new(&mut db, &mut root);
|
||||
for blk_num in (0..SIZE).map(|n| last_num - n) {
|
||||
let info = match fetcher(id) {
|
||||
Some(info) => info,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
id = BlockId::Hash(info.parent_hash);
|
||||
t.insert(&key!(blk_num), &val!(info.hash, info.total_difficulty))
|
||||
.expect("fresh in-memory database is infallible; qed");
|
||||
}
|
||||
}
|
||||
|
||||
Some(CHT {
|
||||
db,
|
||||
root,
|
||||
number: cht_num,
|
||||
})
|
||||
}
|
||||
|
||||
/// Compute a CHT root from an iterator of (hash, td) pairs. Fails if shorter than
|
||||
/// SIZE items. The items are assumed to proceed sequentially from `start_number(cht_num)`.
|
||||
/// Discards the trie's nodes.
|
||||
pub fn compute_root<I>(cht_num: u64, iterable: I) -> Option<H256>
|
||||
where
|
||||
I: IntoIterator<Item = (H256, U256)>,
|
||||
{
|
||||
let mut v = Vec::with_capacity(SIZE as usize);
|
||||
let start_num = start_number(cht_num) as usize;
|
||||
|
||||
for (i, (h, td)) in iterable.into_iter().take(SIZE as usize).enumerate() {
|
||||
v.push((key!(i + start_num), val!(h, td)))
|
||||
}
|
||||
|
||||
if v.len() == SIZE as usize {
|
||||
Some(::triehash::trie_root(v))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Check a proof for a CHT.
|
||||
/// Given a set of a trie nodes, a number to query, and a trie root,
|
||||
/// verify the given trie branch and extract the canonical hash and total difficulty.
|
||||
// TODO: better support for partially-checked queries.
|
||||
pub fn check_proof(proof: &[Bytes], num: u64, root: H256) -> Option<(H256, U256)> {
|
||||
let mut db = new_memory_db();
|
||||
|
||||
for node in proof {
|
||||
db.insert(&node[..]);
|
||||
}
|
||||
let res = match TrieDB::new(&db, &root) {
|
||||
Err(_) => return None,
|
||||
Ok(trie) => trie.get_with(&key!(num), |val: &[u8]| {
|
||||
let rlp = Rlp::new(val);
|
||||
rlp.val_at::<H256>(0)
|
||||
.and_then(|h| rlp.val_at::<U256>(1).map(|td| (h, td)))
|
||||
.ok()
|
||||
}),
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(Some(Some((hash, td)))) => Some((hash, td)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a block number to a CHT number.
|
||||
/// Returns `None` for `block_num` == 0, `Some` otherwise.
|
||||
pub fn block_to_cht_number(block_num: u64) -> Option<u64> {
|
||||
match block_num {
|
||||
0 => None,
|
||||
n => Some((n - 1) / SIZE),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the starting block of a given CHT.
|
||||
/// CHT 0 includes block 1...SIZE,
|
||||
/// CHT 1 includes block SIZE + 1 ... 2*SIZE
|
||||
/// More generally: CHT N includes block (1 + N*SIZE)...((N+1)*SIZE).
|
||||
/// This is because the genesis hash is assumed to be known
|
||||
/// and including it would be redundant.
|
||||
pub fn start_number(cht_num: u64) -> u64 {
|
||||
(cht_num * SIZE) + 1
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn size_is_lt_usize() {
|
||||
// to ensure safe casting on the target platform.
|
||||
assert!(::cht::SIZE < usize::max_value() as u64)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn block_to_cht_number() {
|
||||
assert!(::cht::block_to_cht_number(0).is_none());
|
||||
assert_eq!(::cht::block_to_cht_number(1).unwrap(), 0);
|
||||
assert_eq!(::cht::block_to_cht_number(::cht::SIZE + 1).unwrap(), 1);
|
||||
assert_eq!(::cht::block_to_cht_number(::cht::SIZE).unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn start_number() {
|
||||
assert_eq!(::cht::start_number(0), 1);
|
||||
assert_eq!(::cht::start_number(1), ::cht::SIZE + 1);
|
||||
assert_eq!(::cht::start_number(2), ::cht::SIZE * 2 + 1);
|
||||
}
|
||||
}
|
@ -1,87 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Trait for fetching chain data.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_types::{encoded, header::Header, receipt::Receipt};
|
||||
use ethcore::{
|
||||
engines::{EthEngine, StateDependentProof},
|
||||
machine::EthereumMachine,
|
||||
};
|
||||
use ethereum_types::H256;
|
||||
use futures::future::IntoFuture;
|
||||
|
||||
/// Provides full chain data.
|
||||
pub trait ChainDataFetcher: Send + Sync + 'static {
|
||||
/// Error type when data unavailable.
|
||||
type Error: ::std::fmt::Debug;
|
||||
|
||||
/// Future for fetching block body.
|
||||
type Body: IntoFuture<Item = encoded::Block, Error = Self::Error>;
|
||||
/// Future for fetching block receipts.
|
||||
type Receipts: IntoFuture<Item = Vec<Receipt>, Error = Self::Error>;
|
||||
/// Future for fetching epoch transition
|
||||
type Transition: IntoFuture<Item = Vec<u8>, Error = Self::Error>;
|
||||
|
||||
/// Fetch a block body.
|
||||
fn block_body(&self, header: &Header) -> Self::Body;
|
||||
|
||||
/// Fetch block receipts.
|
||||
fn block_receipts(&self, header: &Header) -> Self::Receipts;
|
||||
|
||||
/// Fetch epoch transition proof at given header.
|
||||
fn epoch_transition(
|
||||
&self,
|
||||
_hash: H256,
|
||||
_engine: Arc<dyn EthEngine>,
|
||||
_checker: Arc<dyn StateDependentProof<EthereumMachine>>,
|
||||
) -> Self::Transition;
|
||||
}
|
||||
|
||||
/// Fetcher implementation which cannot fetch anything.
|
||||
pub struct Unavailable;
|
||||
|
||||
/// Create a fetcher which has all data unavailable.
|
||||
pub fn unavailable() -> Unavailable {
|
||||
Unavailable
|
||||
}
|
||||
|
||||
impl ChainDataFetcher for Unavailable {
|
||||
type Error = &'static str;
|
||||
|
||||
type Body = Result<encoded::Block, &'static str>;
|
||||
type Receipts = Result<Vec<Receipt>, &'static str>;
|
||||
type Transition = Result<Vec<u8>, &'static str>;
|
||||
|
||||
fn block_body(&self, _header: &Header) -> Self::Body {
|
||||
Err("fetching block bodies unavailable")
|
||||
}
|
||||
|
||||
fn block_receipts(&self, _header: &Header) -> Self::Receipts {
|
||||
Err("fetching block receipts unavailable")
|
||||
}
|
||||
|
||||
fn epoch_transition(
|
||||
&self,
|
||||
_hash: H256,
|
||||
_engine: Arc<dyn EthEngine>,
|
||||
_checker: Arc<dyn StateDependentProof<EthereumMachine>>,
|
||||
) -> Self::Transition {
|
||||
Err("fetching epoch transition proofs unavailable")
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,688 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Light client implementation. Stores data from light sync
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use common_types::{
|
||||
block_status::BlockStatus, blockchain_info::BlockChainInfo, encoded, header::Header,
|
||||
ids::BlockId, BlockNumber,
|
||||
};
|
||||
use ethcore::{
|
||||
client::{traits::ForceUpdateSealing, ClientIoMessage, ClientReport, EnvInfo},
|
||||
engines::{epoch, EpochChange, EpochTransition, EthEngine, Proof},
|
||||
error::{Error, EthcoreResult},
|
||||
machine::EthereumMachine,
|
||||
spec::{Spec, SpecHardcodedSync},
|
||||
verification::queue::{self, HeaderQueue},
|
||||
};
|
||||
use ethereum_types::{H256, U256};
|
||||
use futures::{Future, IntoFuture};
|
||||
use io::IoChannel;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
|
||||
use kvdb::KeyValueDB;
|
||||
|
||||
use self::{
|
||||
fetch::ChainDataFetcher,
|
||||
header_chain::{AncestryIter, HardcodedSync, HeaderChain},
|
||||
};
|
||||
|
||||
use cache::Cache;
|
||||
|
||||
pub use self::service::Service;
|
||||
|
||||
mod header_chain;
|
||||
mod service;
|
||||
|
||||
pub mod fetch;
|
||||
|
||||
/// Configuration for the light client.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Config {
|
||||
/// Verification queue config.
|
||||
pub queue: queue::Config,
|
||||
/// Chain column in database.
|
||||
pub chain_column: Option<u32>,
|
||||
/// Should it do full verification of blocks?
|
||||
pub verify_full: bool,
|
||||
/// Should it check the seal of blocks?
|
||||
pub check_seal: bool,
|
||||
/// Disable hardcoded sync.
|
||||
pub no_hardcoded_sync: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
queue: Default::default(),
|
||||
chain_column: None,
|
||||
verify_full: true,
|
||||
check_seal: true,
|
||||
no_hardcoded_sync: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for interacting with the header chain abstractly.
|
||||
pub trait LightChainClient: Send + Sync {
|
||||
/// Adds a new `LightChainNotify` listener.
|
||||
fn add_listener(&self, listener: Weak<dyn LightChainNotify>);
|
||||
|
||||
/// Get chain info.
|
||||
fn chain_info(&self) -> BlockChainInfo;
|
||||
|
||||
/// Queue header to be verified. Required that all headers queued have their
|
||||
/// parent queued prior.
|
||||
fn queue_header(&self, header: Header) -> EthcoreResult<H256>;
|
||||
|
||||
/// Attempt to get a block hash by block id.
|
||||
fn block_hash(&self, id: BlockId) -> Option<H256>;
|
||||
|
||||
/// Attempt to get block header by block id.
|
||||
fn block_header(&self, id: BlockId) -> Option<encoded::Header>;
|
||||
|
||||
/// Get the best block header.
|
||||
fn best_block_header(&self) -> encoded::Header;
|
||||
|
||||
/// Get a block's chain score by ID.
|
||||
fn score(&self, id: BlockId) -> Option<U256>;
|
||||
|
||||
/// Get an iterator over a block and its ancestry.
|
||||
fn ancestry_iter<'a>(
|
||||
&'a self,
|
||||
start: BlockId,
|
||||
) -> Box<dyn Iterator<Item = encoded::Header> + 'a>;
|
||||
|
||||
/// Get the signing chain ID.
|
||||
fn signing_chain_id(&self) -> Option<u64>;
|
||||
|
||||
/// Get environment info for execution at a given block.
|
||||
/// Fails if that block's header is not stored.
|
||||
fn env_info(&self, id: BlockId) -> Option<EnvInfo>;
|
||||
|
||||
/// Get a handle to the consensus engine.
|
||||
fn engine(&self) -> &Arc<dyn EthEngine>;
|
||||
|
||||
/// Query whether a block is known.
|
||||
fn is_known(&self, hash: &H256) -> bool;
|
||||
|
||||
/// Set the chain via a spec name.
|
||||
fn set_spec_name(&self, new_spec_name: String) -> Result<(), ()>;
|
||||
|
||||
/// Clear the queue.
|
||||
fn clear_queue(&self);
|
||||
|
||||
/// Flush the queue.
|
||||
fn flush_queue(&self);
|
||||
|
||||
/// Get queue info.
|
||||
fn queue_info(&self) -> queue::QueueInfo;
|
||||
|
||||
/// Get the `i`th CHT root.
|
||||
fn cht_root(&self, i: usize) -> Option<H256>;
|
||||
|
||||
/// Get a report of import activity since the last call.
|
||||
fn report(&self) -> ClientReport;
|
||||
}
|
||||
|
||||
/// An actor listening to light chain events.
|
||||
pub trait LightChainNotify: Send + Sync {
|
||||
/// Notifies about imported headers.
|
||||
fn new_headers(&self, good: &[H256]);
|
||||
}
|
||||
|
||||
/// Something which can be treated as a `LightChainClient`.
|
||||
pub trait AsLightClient {
|
||||
/// The kind of light client this can be treated as.
|
||||
type Client: LightChainClient;
|
||||
|
||||
/// Access the underlying light client.
|
||||
fn as_light_client(&self) -> &Self::Client;
|
||||
}
|
||||
|
||||
impl<T: LightChainClient> AsLightClient for T {
|
||||
type Client = Self;
|
||||
|
||||
fn as_light_client(&self) -> &Self {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Light client implementation.
|
||||
pub struct Client<T> {
|
||||
queue: HeaderQueue,
|
||||
engine: Arc<dyn EthEngine>,
|
||||
chain: HeaderChain,
|
||||
report: RwLock<ClientReport>,
|
||||
import_lock: Mutex<()>,
|
||||
db: Arc<dyn KeyValueDB>,
|
||||
listeners: RwLock<Vec<Weak<dyn LightChainNotify>>>,
|
||||
fetcher: T,
|
||||
verify_full: bool,
|
||||
/// A closure to call when we want to restart the client
|
||||
exit_handler: Mutex<Option<Box<dyn Fn(String) + 'static + Send>>>,
|
||||
}
|
||||
|
||||
impl<T: ChainDataFetcher> Client<T> {
|
||||
/// Create a new `Client`.
|
||||
pub fn new(
|
||||
config: Config,
|
||||
db: Arc<dyn KeyValueDB>,
|
||||
chain_col: Option<u32>,
|
||||
spec: &Spec,
|
||||
fetcher: T,
|
||||
io_channel: IoChannel<ClientIoMessage>,
|
||||
cache: Arc<Mutex<Cache>>,
|
||||
) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
queue: HeaderQueue::new(
|
||||
config.queue,
|
||||
spec.engine.clone(),
|
||||
io_channel,
|
||||
config.check_seal,
|
||||
),
|
||||
engine: spec.engine.clone(),
|
||||
chain: {
|
||||
let hs_cfg = if config.no_hardcoded_sync {
|
||||
HardcodedSync::Deny
|
||||
} else {
|
||||
HardcodedSync::Allow
|
||||
};
|
||||
HeaderChain::new(db.clone(), chain_col, &spec, cache, hs_cfg)?
|
||||
},
|
||||
report: RwLock::new(ClientReport::default()),
|
||||
import_lock: Mutex::new(()),
|
||||
db,
|
||||
listeners: RwLock::new(vec![]),
|
||||
fetcher,
|
||||
verify_full: config.verify_full,
|
||||
exit_handler: Mutex::new(None),
|
||||
})
|
||||
}
|
||||
|
||||
/// Generates the specifications for hardcoded sync. This is typically only called manually
|
||||
/// from time to time by a Parity developer in order to update the chain specifications.
|
||||
///
|
||||
/// Returns `None` if we are at the genesis block.
|
||||
pub fn read_hardcoded_sync(&self) -> Result<Option<SpecHardcodedSync>, Error> {
|
||||
self.chain.read_hardcoded_sync()
|
||||
}
|
||||
|
||||
/// Adds a new `LightChainNotify` listener.
|
||||
pub fn add_listener(&self, listener: Weak<dyn LightChainNotify>) {
|
||||
self.listeners.write().push(listener);
|
||||
}
|
||||
|
||||
/// Import a header to the queue for additional verification.
|
||||
pub fn import_header(&self, header: Header) -> EthcoreResult<H256> {
|
||||
self.queue.import(header).map_err(|(_, e)| e)
|
||||
}
|
||||
|
||||
/// Inquire about the status of a given header.
|
||||
pub fn status(&self, hash: &H256) -> BlockStatus {
|
||||
match self.queue.status(hash) {
|
||||
queue::Status::Unknown => self.chain.status(hash),
|
||||
other => other.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the chain info.
|
||||
pub fn chain_info(&self) -> BlockChainInfo {
|
||||
let best_hdr = self.chain.best_header();
|
||||
let best_td = self.chain.best_block().total_difficulty;
|
||||
|
||||
let first_block = self.chain.first_block();
|
||||
let genesis_hash = self.chain.genesis_hash();
|
||||
|
||||
BlockChainInfo {
|
||||
total_difficulty: best_td,
|
||||
pending_total_difficulty: best_td + self.queue.total_difficulty(),
|
||||
genesis_hash,
|
||||
best_block_hash: best_hdr.hash(),
|
||||
best_block_number: best_hdr.number(),
|
||||
best_block_timestamp: best_hdr.timestamp(),
|
||||
ancient_block_hash: if first_block.is_some() {
|
||||
Some(genesis_hash)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
ancient_block_number: if first_block.is_some() { Some(0) } else { None },
|
||||
first_block_hash: first_block.as_ref().map(|first| first.hash),
|
||||
first_block_number: first_block.as_ref().map(|first| first.number),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the header queue info.
|
||||
pub fn queue_info(&self) -> queue::QueueInfo {
|
||||
self.queue.queue_info()
|
||||
}
|
||||
|
||||
/// Attempt to get a block hash by block id.
|
||||
pub fn block_hash(&self, id: BlockId) -> Option<H256> {
|
||||
self.chain.block_hash(id)
|
||||
}
|
||||
|
||||
/// Get a block header by Id.
|
||||
pub fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
|
||||
self.chain.block_header(id)
|
||||
}
|
||||
|
||||
/// Get the best block header.
|
||||
pub fn best_block_header(&self) -> encoded::Header {
|
||||
self.chain.best_header()
|
||||
}
|
||||
|
||||
/// Get a block's chain score.
|
||||
pub fn score(&self, id: BlockId) -> Option<U256> {
|
||||
self.chain.score(id)
|
||||
}
|
||||
|
||||
/// Get an iterator over a block and its ancestry.
|
||||
pub fn ancestry_iter(&self, start: BlockId) -> AncestryIter {
|
||||
self.chain.ancestry_iter(start)
|
||||
}
|
||||
|
||||
/// Get the signing chain id.
|
||||
pub fn signing_chain_id(&self) -> Option<u64> {
|
||||
self.engine.signing_chain_id(&self.latest_env_info())
|
||||
}
|
||||
|
||||
/// Flush the header queue.
|
||||
pub fn flush_queue(&self) {
|
||||
self.queue.flush()
|
||||
}
|
||||
|
||||
/// Get the `i`th CHT root.
|
||||
pub fn cht_root(&self, i: usize) -> Option<H256> {
|
||||
self.chain.cht_root(i)
|
||||
}
|
||||
|
||||
/// Import a set of pre-verified headers from the queue.
|
||||
pub fn import_verified(&self) {
|
||||
const MAX: usize = 256;
|
||||
|
||||
let _lock = self.import_lock.lock();
|
||||
|
||||
let mut bad = Vec::new();
|
||||
let mut good = Vec::new();
|
||||
for verified_header in self.queue.drain(MAX) {
|
||||
let (num, hash) = (verified_header.number(), verified_header.hash());
|
||||
trace!(target: "client", "importing block {}", num);
|
||||
|
||||
if self.verify_full && !self.check_header(&mut bad, &verified_header) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let write_proof_result = match self.check_epoch_signal(&verified_header) {
|
||||
Ok(Some(proof)) => self.write_pending_proof(&verified_header, proof),
|
||||
Ok(None) => Ok(()),
|
||||
Err(e) => panic!("Unable to fetch epoch transition proof: {:?}", e),
|
||||
};
|
||||
|
||||
if let Err(e) = write_proof_result {
|
||||
warn!(target: "client", "Error writing pending transition proof to DB: {:?} \
|
||||
The node may not be able to synchronize further.", e);
|
||||
}
|
||||
|
||||
let epoch_proof = self.engine.is_epoch_end_light(
|
||||
&verified_header,
|
||||
&|h| {
|
||||
self.chain
|
||||
.block_header(BlockId::Hash(h))
|
||||
.and_then(|hdr| hdr.decode().ok())
|
||||
},
|
||||
&|h| self.chain.pending_transition(h),
|
||||
);
|
||||
|
||||
let mut tx = self.db.transaction();
|
||||
let pending = match self.chain.insert(&mut tx, &verified_header, epoch_proof) {
|
||||
Ok(pending) => {
|
||||
good.push(hash);
|
||||
self.report.write().blocks_imported += 1;
|
||||
pending
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(target: "client", "Error importing header {:?}: {:?}", (num, hash), e);
|
||||
bad.push(hash);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
self.db.write_buffered(tx);
|
||||
self.chain.apply_pending(pending);
|
||||
}
|
||||
|
||||
if let Err(e) = self.db.flush() {
|
||||
panic!("Database flush failed: {}. Check disk health and space.", e);
|
||||
}
|
||||
|
||||
self.queue.mark_as_bad(&bad);
|
||||
self.queue.mark_as_good(&good);
|
||||
|
||||
self.notify(|listener| listener.new_headers(&good));
|
||||
}
|
||||
|
||||
/// Get a report about blocks imported.
|
||||
pub fn report(&self) -> ClientReport {
|
||||
self.report.read().clone()
|
||||
}
|
||||
|
||||
/// Get blockchain mem usage in bytes.
|
||||
pub fn chain_mem_used(&self) -> usize {
|
||||
use heapsize::HeapSizeOf;
|
||||
|
||||
self.chain.heap_size_of_children()
|
||||
}
|
||||
|
||||
/// Set a closure to call when the client wants to be restarted.
|
||||
///
|
||||
/// The parameter passed to the callback is the name of the new chain spec to use after
|
||||
/// the restart.
|
||||
pub fn set_exit_handler<F>(&self, f: F)
|
||||
where
|
||||
F: Fn(String) + 'static + Send,
|
||||
{
|
||||
*self.exit_handler.lock() = Some(Box::new(f));
|
||||
}
|
||||
|
||||
/// Get a handle to the verification engine.
|
||||
pub fn engine(&self) -> &Arc<dyn EthEngine> {
|
||||
&self.engine
|
||||
}
|
||||
|
||||
/// Get the latest environment info.
|
||||
pub fn latest_env_info(&self) -> EnvInfo {
|
||||
self.env_info(BlockId::Latest)
|
||||
.expect("Best block header and recent hashes always stored; qed")
|
||||
}
|
||||
|
||||
/// Get environment info for a given block.
|
||||
pub fn env_info(&self, id: BlockId) -> Option<EnvInfo> {
|
||||
let header = match self.block_header(id) {
|
||||
Some(hdr) => hdr,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
Some(EnvInfo {
|
||||
number: header.number(),
|
||||
author: header.author(),
|
||||
timestamp: header.timestamp(),
|
||||
difficulty: header.difficulty(),
|
||||
last_hashes: self.build_last_hashes(header.parent_hash()),
|
||||
gas_used: Default::default(),
|
||||
gas_limit: header.gas_limit(),
|
||||
})
|
||||
}
|
||||
|
||||
fn build_last_hashes(&self, mut parent_hash: H256) -> Arc<Vec<H256>> {
|
||||
let mut v = Vec::with_capacity(256);
|
||||
for _ in 0..255 {
|
||||
v.push(parent_hash);
|
||||
match self.block_header(BlockId::Hash(parent_hash)) {
|
||||
Some(header) => parent_hash = header.hash(),
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
Arc::new(v)
|
||||
}
|
||||
|
||||
fn notify<F: Fn(&dyn LightChainNotify)>(&self, f: F) {
|
||||
for listener in &*self.listeners.read() {
|
||||
if let Some(listener) = listener.upgrade() {
|
||||
f(&*listener)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// return false if should skip, true otherwise. may push onto bad if
|
||||
// should skip.
|
||||
fn check_header(&self, bad: &mut Vec<H256>, verified_header: &Header) -> bool {
|
||||
let hash = verified_header.hash();
|
||||
let parent_header = match self
|
||||
.chain
|
||||
.block_header(BlockId::Hash(*verified_header.parent_hash()))
|
||||
{
|
||||
Some(header) => header,
|
||||
None => {
|
||||
trace!(target: "client", "No parent for block ({}, {})",
|
||||
verified_header.number(), hash);
|
||||
return false; // skip import of block with missing parent.
|
||||
}
|
||||
};
|
||||
|
||||
// Verify Block Family
|
||||
|
||||
let verify_family_result = {
|
||||
parent_header
|
||||
.decode()
|
||||
.map_err(|dec_err| dec_err.into())
|
||||
.and_then(|decoded| self.engine.verify_block_family(&verified_header, &decoded))
|
||||
};
|
||||
if let Err(e) = verify_family_result {
|
||||
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}",
|
||||
verified_header.number(), verified_header.hash(), e);
|
||||
bad.push(hash);
|
||||
return false;
|
||||
};
|
||||
|
||||
// "external" verification.
|
||||
let verify_external_result = self.engine.verify_block_external(&verified_header);
|
||||
if let Err(e) = verify_external_result {
|
||||
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}",
|
||||
verified_header.number(), verified_header.hash(), e);
|
||||
|
||||
bad.push(hash);
|
||||
return false;
|
||||
};
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
fn check_epoch_signal(
|
||||
&self,
|
||||
verified_header: &Header,
|
||||
) -> Result<Option<Proof<EthereumMachine>>, T::Error> {
|
||||
use ethcore::machine::{AuxiliaryData, AuxiliaryRequest};
|
||||
|
||||
let mut block: Option<Vec<u8>> = None;
|
||||
let mut receipts: Option<Vec<_>> = None;
|
||||
|
||||
loop {
|
||||
let is_signal = {
|
||||
let auxiliary = AuxiliaryData {
|
||||
bytes: block.as_ref().map(|x| &x[..]),
|
||||
receipts: receipts.as_ref().map(|x| &x[..]),
|
||||
};
|
||||
|
||||
self.engine.signals_epoch_end(verified_header, auxiliary)
|
||||
};
|
||||
|
||||
// check with any auxiliary data fetched so far
|
||||
match is_signal {
|
||||
EpochChange::No => return Ok(None),
|
||||
EpochChange::Yes(proof) => return Ok(Some(proof)),
|
||||
EpochChange::Unsure(unsure) => {
|
||||
let (b, r) = match unsure {
|
||||
AuxiliaryRequest::Body => {
|
||||
(Some(self.fetcher.block_body(verified_header)), None)
|
||||
}
|
||||
AuxiliaryRequest::Receipts => {
|
||||
(None, Some(self.fetcher.block_receipts(verified_header)))
|
||||
}
|
||||
AuxiliaryRequest::Both => (
|
||||
Some(self.fetcher.block_body(verified_header)),
|
||||
Some(self.fetcher.block_receipts(verified_header)),
|
||||
),
|
||||
};
|
||||
|
||||
if let Some(b) = b {
|
||||
block = Some(b.into_future().wait()?.into_inner());
|
||||
}
|
||||
|
||||
if let Some(r) = r {
|
||||
receipts = Some(r.into_future().wait()?);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// attempts to fetch the epoch proof from the network until successful.
|
||||
fn write_pending_proof(
|
||||
&self,
|
||||
header: &Header,
|
||||
proof: Proof<EthereumMachine>,
|
||||
) -> Result<(), T::Error> {
|
||||
let proof = match proof {
|
||||
Proof::Known(known) => known,
|
||||
Proof::WithState(state_dependent) => self
|
||||
.fetcher
|
||||
.epoch_transition(header.hash(), self.engine.clone(), state_dependent)
|
||||
.into_future()
|
||||
.wait()?,
|
||||
};
|
||||
|
||||
let mut batch = self.db.transaction();
|
||||
self.chain.insert_pending_transition(
|
||||
&mut batch,
|
||||
header.hash(),
|
||||
&epoch::PendingTransition { proof },
|
||||
);
|
||||
self.db.write_buffered(batch);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ChainDataFetcher> LightChainClient for Client<T> {
|
||||
fn add_listener(&self, listener: Weak<dyn LightChainNotify>) {
|
||||
Client::add_listener(self, listener)
|
||||
}
|
||||
|
||||
fn chain_info(&self) -> BlockChainInfo {
|
||||
Client::chain_info(self)
|
||||
}
|
||||
|
||||
fn queue_header(&self, header: Header) -> EthcoreResult<H256> {
|
||||
self.import_header(header)
|
||||
}
|
||||
|
||||
fn block_hash(&self, id: BlockId) -> Option<H256> {
|
||||
Client::block_hash(self, id)
|
||||
}
|
||||
|
||||
fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
|
||||
Client::block_header(self, id)
|
||||
}
|
||||
|
||||
fn best_block_header(&self) -> encoded::Header {
|
||||
Client::best_block_header(self)
|
||||
}
|
||||
|
||||
fn score(&self, id: BlockId) -> Option<U256> {
|
||||
Client::score(self, id)
|
||||
}
|
||||
|
||||
fn ancestry_iter<'a>(
|
||||
&'a self,
|
||||
start: BlockId,
|
||||
) -> Box<dyn Iterator<Item = encoded::Header> + 'a> {
|
||||
Box::new(Client::ancestry_iter(self, start))
|
||||
}
|
||||
|
||||
fn signing_chain_id(&self) -> Option<u64> {
|
||||
Client::signing_chain_id(self)
|
||||
}
|
||||
|
||||
fn env_info(&self, id: BlockId) -> Option<EnvInfo> {
|
||||
Client::env_info(self, id)
|
||||
}
|
||||
|
||||
fn engine(&self) -> &Arc<dyn EthEngine> {
|
||||
Client::engine(self)
|
||||
}
|
||||
|
||||
fn set_spec_name(&self, new_spec_name: String) -> Result<(), ()> {
|
||||
trace!(target: "mode", "Client::set_spec_name({:?})", new_spec_name);
|
||||
if let Some(ref h) = *self.exit_handler.lock() {
|
||||
(*h)(new_spec_name);
|
||||
Ok(())
|
||||
} else {
|
||||
warn!("Not hypervised; cannot change chain.");
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
|
||||
fn is_known(&self, hash: &H256) -> bool {
|
||||
self.status(hash) == BlockStatus::InChain
|
||||
}
|
||||
|
||||
fn clear_queue(&self) {
|
||||
self.queue.clear()
|
||||
}
|
||||
|
||||
fn flush_queue(&self) {
|
||||
Client::flush_queue(self);
|
||||
}
|
||||
|
||||
fn queue_info(&self) -> queue::QueueInfo {
|
||||
self.queue.queue_info()
|
||||
}
|
||||
|
||||
fn cht_root(&self, i: usize) -> Option<H256> {
|
||||
Client::cht_root(self, i)
|
||||
}
|
||||
|
||||
fn report(&self) -> ClientReport {
|
||||
Client::report(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ChainDataFetcher> ::ethcore::client::ChainInfo for Client<T> {
|
||||
fn chain_info(&self) -> BlockChainInfo {
|
||||
Client::chain_info(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ChainDataFetcher> ::ethcore::client::EngineClient for Client<T> {
|
||||
fn update_sealing(&self, _force: ForceUpdateSealing) {}
|
||||
fn submit_seal(&self, _block_hash: H256, _seal: Vec<Vec<u8>>) {}
|
||||
fn broadcast_consensus_message(&self, _message: Vec<u8>) {}
|
||||
|
||||
fn epoch_transition_for(&self, parent_hash: H256) -> Option<EpochTransition> {
|
||||
self.chain
|
||||
.epoch_transition_for(parent_hash)
|
||||
.map(|(hdr, proof)| EpochTransition {
|
||||
block_hash: hdr.hash(),
|
||||
block_number: hdr.number(),
|
||||
proof,
|
||||
})
|
||||
}
|
||||
|
||||
fn as_full_client(&self) -> Option<&dyn crate::ethcore::client::BlockChainClient> {
|
||||
None
|
||||
}
|
||||
|
||||
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
||||
self.block_header(id).map(|hdr| hdr.number())
|
||||
}
|
||||
|
||||
fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
|
||||
Client::block_header(self, id)
|
||||
}
|
||||
}
|
@ -1,142 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Minimal IO service for light client.
|
||||
//! Just handles block import messages and passes them to the client.
|
||||
|
||||
use std::{fmt, sync::Arc};
|
||||
|
||||
use ethcore::{client::ClientIoMessage, error::Error as CoreError, spec::Spec};
|
||||
use ethcore_blockchain::BlockChainDB;
|
||||
use ethcore_db as db;
|
||||
use io::{IoContext, IoError, IoHandler, IoService};
|
||||
|
||||
use cache::Cache;
|
||||
use parking_lot::Mutex;
|
||||
|
||||
use super::{ChainDataFetcher, Client, Config as ClientConfig, LightChainNotify};
|
||||
|
||||
/// Errors on service initialization.
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
/// Core error.
|
||||
Core(CoreError),
|
||||
/// I/O service error.
|
||||
Io(IoError),
|
||||
}
|
||||
|
||||
impl From<CoreError> for Error {
|
||||
#[inline]
|
||||
fn from(err: CoreError) -> Error {
|
||||
Error::Core(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
Error::Core(ref msg) => write!(f, "Core error: {}", msg),
|
||||
Error::Io(ref err) => write!(f, "I/O service error: {}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Light client service.
|
||||
pub struct Service<T> {
|
||||
client: Arc<Client<T>>,
|
||||
io_service: IoService<ClientIoMessage>,
|
||||
}
|
||||
|
||||
impl<T: ChainDataFetcher> Service<T> {
|
||||
/// Start the service: initialize I/O workers and client itself.
|
||||
pub fn start(
|
||||
config: ClientConfig,
|
||||
spec: &Spec,
|
||||
fetcher: T,
|
||||
db: Arc<dyn BlockChainDB>,
|
||||
cache: Arc<Mutex<Cache>>,
|
||||
) -> Result<Self, Error> {
|
||||
let io_service = IoService::<ClientIoMessage>::start().map_err(Error::Io)?;
|
||||
let client = Arc::new(Client::new(
|
||||
config,
|
||||
db.key_value().clone(),
|
||||
db::COL_LIGHT_CHAIN,
|
||||
spec,
|
||||
fetcher,
|
||||
io_service.channel(),
|
||||
cache,
|
||||
)?);
|
||||
|
||||
io_service
|
||||
.register_handler(Arc::new(ImportBlocks(client.clone())))
|
||||
.map_err(Error::Io)?;
|
||||
spec.engine.register_client(Arc::downgrade(&client) as _);
|
||||
|
||||
Ok(Service { client, io_service })
|
||||
}
|
||||
|
||||
/// Set the actor to be notified on certain chain events
|
||||
pub fn add_notify(&self, notify: Arc<dyn LightChainNotify>) {
|
||||
self.client.add_listener(Arc::downgrade(¬ify));
|
||||
}
|
||||
|
||||
/// Register an I/O handler on the service.
|
||||
pub fn register_handler(
|
||||
&self,
|
||||
handler: Arc<dyn IoHandler<ClientIoMessage> + Send>,
|
||||
) -> Result<(), IoError> {
|
||||
self.io_service.register_handler(handler)
|
||||
}
|
||||
|
||||
/// Get a handle to the client.
|
||||
pub fn client(&self) -> &Arc<Client<T>> {
|
||||
&self.client
|
||||
}
|
||||
}
|
||||
|
||||
struct ImportBlocks<T>(Arc<Client<T>>);
|
||||
|
||||
impl<T: ChainDataFetcher> IoHandler<ClientIoMessage> for ImportBlocks<T> {
|
||||
fn message(&self, _io: &IoContext<ClientIoMessage>, message: &ClientIoMessage) {
|
||||
if let ClientIoMessage::BlockVerified = *message {
|
||||
self.0.import_verified();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Service;
|
||||
use ethcore::spec::Spec;
|
||||
|
||||
use cache::Cache;
|
||||
use client::fetch;
|
||||
use ethcore::test_helpers;
|
||||
use parking_lot::Mutex;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let db = test_helpers::new_db();
|
||||
let spec = Spec::new_test();
|
||||
let cache = Arc::new(Mutex::new(Cache::new(
|
||||
Default::default(),
|
||||
Duration::from_secs(6 * 3600),
|
||||
)));
|
||||
|
||||
Service::start(Default::default(), &spec, fetch::unavailable(), db, cache).unwrap();
|
||||
}
|
||||
}
|
@ -1,97 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Light client logic and implementation.
|
||||
//!
|
||||
//! A "light" client stores very little chain-related data locally
|
||||
//! unlike a full node, which stores all blocks, headers, receipts, and more.
|
||||
//!
|
||||
//! This enables the client to have a much lower resource footprint in
|
||||
//! exchange for the cost of having to ask the network for state data
|
||||
//! while responding to queries. This makes a light client unsuitable for
|
||||
//! low-latency applications, but perfectly suitable for simple everyday
|
||||
//! use-cases like sending transactions from a personal account.
|
||||
//!
|
||||
//! The light client performs a header-only sync, doing verification and pruning
|
||||
//! historical blocks. Upon pruning, batches of 2048 blocks have a number => (hash, TD)
|
||||
//! mapping sealed into "canonical hash tries" which can later be used to verify
|
||||
//! historical block queries from peers.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
pub mod cache;
|
||||
pub mod cht;
|
||||
pub mod client;
|
||||
pub mod net;
|
||||
pub mod on_demand;
|
||||
pub mod provider;
|
||||
pub mod transaction_queue;
|
||||
|
||||
mod types;
|
||||
|
||||
pub use self::{
|
||||
cache::Cache,
|
||||
provider::{Provider, MAX_HEADERS_PER_REQUEST},
|
||||
transaction_queue::TransactionQueue,
|
||||
};
|
||||
pub use types::request;
|
||||
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
extern crate bincode;
|
||||
extern crate common_types;
|
||||
extern crate ethcore;
|
||||
extern crate ethcore_blockchain;
|
||||
extern crate ethcore_db;
|
||||
extern crate ethcore_io as io;
|
||||
extern crate ethcore_network as network;
|
||||
extern crate ethereum_types;
|
||||
extern crate failsafe;
|
||||
extern crate fastmap;
|
||||
extern crate futures;
|
||||
extern crate hash_db;
|
||||
extern crate heapsize;
|
||||
extern crate itertools;
|
||||
extern crate keccak_hasher;
|
||||
extern crate memory_db;
|
||||
extern crate parity_bytes as bytes;
|
||||
extern crate parking_lot;
|
||||
extern crate patricia_trie_ethereum as ethtrie;
|
||||
extern crate rand;
|
||||
extern crate rlp;
|
||||
extern crate trie_db as trie;
|
||||
#[macro_use]
|
||||
extern crate rlp_derive;
|
||||
extern crate keccak_hash as hash;
|
||||
extern crate kvdb;
|
||||
extern crate memory_cache;
|
||||
extern crate serde;
|
||||
extern crate smallvec;
|
||||
extern crate stats;
|
||||
extern crate triehash_ethereum as triehash;
|
||||
extern crate vm;
|
||||
#[macro_use]
|
||||
extern crate error_chain;
|
||||
|
||||
extern crate journaldb;
|
||||
#[cfg(test)]
|
||||
extern crate kvdb_memorydb;
|
||||
#[cfg(test)]
|
||||
extern crate tempdir;
|
@ -1,195 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! I/O and event context generalizations.
|
||||
|
||||
use network::{NetworkContext, NodeId, PeerId};
|
||||
|
||||
use super::{error::Error, Announcement, LightProtocol, ReqId};
|
||||
use request::NetworkRequests as Requests;
|
||||
|
||||
/// An I/O context which allows sending and receiving packets as well as
|
||||
/// disconnecting peers. This is used as a generalization of the portions
|
||||
/// of a p2p network which the light protocol structure makes use of.
|
||||
pub trait IoContext {
|
||||
/// Send a packet to a specific peer.
|
||||
fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec<u8>);
|
||||
|
||||
/// Respond to a peer's message. Only works if this context is a byproduct
|
||||
/// of a packet handler.
|
||||
fn respond(&self, packet_id: u8, packet_body: Vec<u8>);
|
||||
|
||||
/// Disconnect a peer.
|
||||
fn disconnect_peer(&self, peer: PeerId);
|
||||
|
||||
/// Disable a peer -- this is a disconnect + a time-out.
|
||||
fn disable_peer(&self, peer: PeerId);
|
||||
|
||||
/// Get a peer's protocol version.
|
||||
fn protocol_version(&self, peer: PeerId) -> Option<u8>;
|
||||
|
||||
/// Persistent peer id
|
||||
fn persistent_peer_id(&self, peer: PeerId) -> Option<NodeId>;
|
||||
|
||||
/// Whether given peer id is reserved peer
|
||||
fn is_reserved_peer(&self, peer: PeerId) -> bool;
|
||||
}
|
||||
|
||||
impl<T> IoContext for T
|
||||
where
|
||||
T: ?Sized + NetworkContext,
|
||||
{
|
||||
fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec<u8>) {
|
||||
if let Err(e) = self.send(peer, packet_id, packet_body) {
|
||||
debug!(target: "pip", "Error sending packet to peer {}: {}", peer, e);
|
||||
}
|
||||
}
|
||||
|
||||
fn respond(&self, packet_id: u8, packet_body: Vec<u8>) {
|
||||
if let Err(e) = self.respond(packet_id, packet_body) {
|
||||
debug!(target: "pip", "Error responding to peer message: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
fn disconnect_peer(&self, peer: PeerId) {
|
||||
trace!(target: "pip", "Initiating disconnect of peer {}", peer);
|
||||
NetworkContext::disconnect_peer(self, peer);
|
||||
}
|
||||
|
||||
fn disable_peer(&self, peer: PeerId) {
|
||||
trace!(target: "pip", "Initiating disable of peer {}", peer);
|
||||
NetworkContext::disable_peer(self, peer);
|
||||
}
|
||||
|
||||
fn protocol_version(&self, peer: PeerId) -> Option<u8> {
|
||||
self.protocol_version(self.subprotocol_name(), peer)
|
||||
}
|
||||
|
||||
fn persistent_peer_id(&self, peer: PeerId) -> Option<NodeId> {
|
||||
self.session_info(peer).and_then(|info| info.id)
|
||||
}
|
||||
|
||||
fn is_reserved_peer(&self, peer: PeerId) -> bool {
|
||||
NetworkContext::is_reserved_peer(self, peer)
|
||||
}
|
||||
}
|
||||
|
||||
/// Basic context for the protocol.
|
||||
pub trait BasicContext {
|
||||
/// Returns the relevant's peer persistent Id (aka NodeId).
|
||||
fn persistent_peer_id(&self, peer: PeerId) -> Option<NodeId>;
|
||||
|
||||
/// Make a request from a peer.
|
||||
///
|
||||
/// Fails on: nonexistent peer, network error, peer not server,
|
||||
/// insufficient credits. Does not check capabilities before sending.
|
||||
/// On success, returns a request id which can later be coordinated
|
||||
/// with an event.
|
||||
fn request_from(&self, peer: PeerId, request: Requests) -> Result<ReqId, Error>;
|
||||
|
||||
/// Make an announcement of new capabilities to the rest of the peers.
|
||||
// TODO: maybe just put this on a timer in LightProtocol?
|
||||
fn make_announcement(&self, announcement: Announcement);
|
||||
|
||||
/// Disconnect a peer.
|
||||
fn disconnect_peer(&self, peer: PeerId);
|
||||
|
||||
/// Disable a peer.
|
||||
fn disable_peer(&self, peer: PeerId);
|
||||
}
|
||||
|
||||
/// Context for a protocol event which has a peer ID attached.
|
||||
pub trait EventContext: BasicContext {
|
||||
/// Get the peer relevant to the event e.g. message sender,
|
||||
/// disconnected/connected peer.
|
||||
fn peer(&self) -> PeerId;
|
||||
|
||||
/// Treat the event context as a basic context.
|
||||
fn as_basic(&self) -> &dyn BasicContext;
|
||||
}
|
||||
|
||||
/// Basic context.
|
||||
pub struct TickCtx<'a> {
|
||||
/// Io context to enable dispatch.
|
||||
pub io: &'a dyn IoContext,
|
||||
/// Protocol implementation.
|
||||
pub proto: &'a LightProtocol,
|
||||
}
|
||||
|
||||
impl<'a> BasicContext for TickCtx<'a> {
|
||||
fn persistent_peer_id(&self, id: PeerId) -> Option<NodeId> {
|
||||
self.io.persistent_peer_id(id)
|
||||
}
|
||||
|
||||
fn request_from(&self, peer: PeerId, requests: Requests) -> Result<ReqId, Error> {
|
||||
self.proto.request_from(self.io, peer, requests)
|
||||
}
|
||||
|
||||
fn make_announcement(&self, announcement: Announcement) {
|
||||
self.proto.make_announcement(self.io, announcement);
|
||||
}
|
||||
|
||||
fn disconnect_peer(&self, peer: PeerId) {
|
||||
self.io.disconnect_peer(peer);
|
||||
}
|
||||
|
||||
fn disable_peer(&self, peer: PeerId) {
|
||||
self.io.disable_peer(peer);
|
||||
}
|
||||
}
|
||||
|
||||
/// Concrete implementation of `EventContext` over the light protocol struct and
|
||||
/// an io context.
|
||||
pub struct Ctx<'a> {
|
||||
/// Io context to enable immediate response to events.
|
||||
pub io: &'a dyn IoContext,
|
||||
/// Protocol implementation.
|
||||
pub proto: &'a LightProtocol,
|
||||
/// Relevant peer for event.
|
||||
pub peer: PeerId,
|
||||
}
|
||||
|
||||
impl<'a> BasicContext for Ctx<'a> {
|
||||
fn persistent_peer_id(&self, id: PeerId) -> Option<NodeId> {
|
||||
self.io.persistent_peer_id(id)
|
||||
}
|
||||
|
||||
fn request_from(&self, peer: PeerId, requests: Requests) -> Result<ReqId, Error> {
|
||||
self.proto.request_from(self.io, peer, requests)
|
||||
}
|
||||
|
||||
fn make_announcement(&self, announcement: Announcement) {
|
||||
self.proto.make_announcement(self.io, announcement);
|
||||
}
|
||||
|
||||
fn disconnect_peer(&self, peer: PeerId) {
|
||||
self.io.disconnect_peer(peer);
|
||||
}
|
||||
|
||||
fn disable_peer(&self, peer: PeerId) {
|
||||
self.io.disable_peer(peer);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> EventContext for Ctx<'a> {
|
||||
fn peer(&self) -> PeerId {
|
||||
self.peer
|
||||
}
|
||||
|
||||
fn as_basic(&self) -> &dyn BasicContext {
|
||||
&*self
|
||||
}
|
||||
}
|
@ -1,127 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Defines error types and levels of punishment to use upon
|
||||
//! encountering.
|
||||
|
||||
use network;
|
||||
use rlp;
|
||||
use std::fmt;
|
||||
|
||||
/// Levels of punishment.
|
||||
///
|
||||
/// Currently just encompasses two different kinds of disconnect and
|
||||
/// no punishment, but this is where reputation systems might come into play.
|
||||
// In ascending order
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum Punishment {
|
||||
/// Perform no punishment.
|
||||
None,
|
||||
/// Disconnect the peer, but don't prevent them from reconnecting.
|
||||
Disconnect,
|
||||
/// Disconnect the peer and prevent them from reconnecting.
|
||||
Disable,
|
||||
}
|
||||
|
||||
/// Kinds of errors which can be encountered in the course of LES.
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
/// An RLP decoding error.
|
||||
Rlp(rlp::DecoderError),
|
||||
/// A network error.
|
||||
Network(network::Error),
|
||||
/// Out of credits.
|
||||
NoCredits,
|
||||
/// Unrecognized packet code.
|
||||
UnrecognizedPacket(u8),
|
||||
/// Unexpected handshake.
|
||||
UnexpectedHandshake,
|
||||
/// Peer on wrong network (wrong NetworkId or genesis hash)
|
||||
WrongNetwork,
|
||||
/// Unknown peer.
|
||||
UnknownPeer,
|
||||
/// Unsolicited response.
|
||||
UnsolicitedResponse,
|
||||
/// Bad back-reference in request.
|
||||
BadBackReference,
|
||||
/// Not a server.
|
||||
NotServer,
|
||||
/// Unsupported protocol version.
|
||||
UnsupportedProtocolVersion(u8),
|
||||
/// Bad protocol version.
|
||||
BadProtocolVersion,
|
||||
/// Peer is overburdened.
|
||||
Overburdened,
|
||||
/// No handler kept the peer.
|
||||
RejectedByHandlers,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// What level of punishment does this error warrant?
|
||||
pub fn punishment(&self) -> Punishment {
|
||||
match *self {
|
||||
Error::Rlp(_) => Punishment::Disable,
|
||||
Error::Network(_) => Punishment::None,
|
||||
Error::NoCredits => Punishment::Disable,
|
||||
Error::UnrecognizedPacket(_) => Punishment::Disconnect,
|
||||
Error::UnexpectedHandshake => Punishment::Disconnect,
|
||||
Error::WrongNetwork => Punishment::Disable,
|
||||
Error::UnknownPeer => Punishment::Disconnect,
|
||||
Error::UnsolicitedResponse => Punishment::Disable,
|
||||
Error::BadBackReference => Punishment::Disable,
|
||||
Error::NotServer => Punishment::Disable,
|
||||
Error::UnsupportedProtocolVersion(_) => Punishment::Disable,
|
||||
Error::BadProtocolVersion => Punishment::Disable,
|
||||
Error::Overburdened => Punishment::None,
|
||||
Error::RejectedByHandlers => Punishment::Disconnect,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<rlp::DecoderError> for Error {
|
||||
fn from(err: rlp::DecoderError) -> Self {
|
||||
Error::Rlp(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<network::Error> for Error {
|
||||
fn from(err: network::Error) -> Self {
|
||||
Error::Network(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
Error::Rlp(ref err) => err.fmt(f),
|
||||
Error::Network(ref err) => err.fmt(f),
|
||||
Error::NoCredits => write!(f, "Out of request credits"),
|
||||
Error::UnrecognizedPacket(code) => write!(f, "Unrecognized packet: 0x{:x}", code),
|
||||
Error::UnexpectedHandshake => write!(f, "Unexpected handshake"),
|
||||
Error::WrongNetwork => write!(f, "Wrong network"),
|
||||
Error::UnknownPeer => write!(f, "Unknown peer"),
|
||||
Error::UnsolicitedResponse => write!(f, "Peer provided unsolicited data"),
|
||||
Error::BadBackReference => write!(f, "Bad back-reference in request."),
|
||||
Error::NotServer => write!(f, "Peer not a server."),
|
||||
Error::UnsupportedProtocolVersion(pv) => {
|
||||
write!(f, "Unsupported protocol version: {}", pv)
|
||||
}
|
||||
Error::BadProtocolVersion => write!(f, "Bad protocol version in handshake"),
|
||||
Error::Overburdened => write!(f, "Peer overburdened"),
|
||||
Error::RejectedByHandlers => write!(f, "No handler kept this peer"),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,305 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Request load timer and distribution manager.
|
||||
//!
|
||||
//! This uses empirical samples of the length of time taken to respond
|
||||
//! to requests in order to inform request credit costs.
|
||||
//!
|
||||
//! The average request time is determined by an exponential moving average
|
||||
//! of the mean request times during the last `MOVING_SAMPLE_SIZE` time periods of
|
||||
//! length `TIME_PERIOD_MS`, with the exception that time periods where no data is
|
||||
//! gathered are excluded.
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
fs::File,
|
||||
path::PathBuf,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use request::{CompleteRequest, Kind};
|
||||
|
||||
use bincode;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
|
||||
/// Number of time periods samples should be kept for.
|
||||
pub const MOVING_SAMPLE_SIZE: usize = 256;
|
||||
|
||||
/// Stores rolling load timer samples.
|
||||
// TODO: switch to bigint if possible (FP casts aren't available)
|
||||
pub trait SampleStore: Send + Sync {
|
||||
/// Load samples.
|
||||
fn load(&self) -> HashMap<Kind, VecDeque<u64>>;
|
||||
|
||||
/// Store all samples.
|
||||
fn store(&self, samples: &HashMap<Kind, VecDeque<u64>>);
|
||||
}
|
||||
|
||||
// get a hardcoded, arbitrarily determined (but intended overestimate)
|
||||
// of the time it takes to serve a request of the given kind.
|
||||
//
|
||||
// TODO: seed this with empirical data.
|
||||
fn hardcoded_serve_time(kind: Kind) -> Duration {
|
||||
Duration::new(
|
||||
0,
|
||||
match kind {
|
||||
Kind::Headers => 500_000,
|
||||
Kind::HeaderProof => 500_000,
|
||||
Kind::TransactionIndex => 500_000,
|
||||
Kind::Receipts => 1_000_000,
|
||||
Kind::Body => 1_000_000,
|
||||
Kind::Account => 1_500_000,
|
||||
Kind::Storage => 2_000_000,
|
||||
Kind::Code => 1_500_000,
|
||||
Kind::Execution => 250, // per gas.
|
||||
Kind::Signal => 500_000,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// A no-op store.
|
||||
pub struct NullStore;
|
||||
|
||||
impl SampleStore for NullStore {
|
||||
fn load(&self) -> HashMap<Kind, VecDeque<u64>> {
|
||||
HashMap::new()
|
||||
}
|
||||
fn store(&self, _samples: &HashMap<Kind, VecDeque<u64>>) {}
|
||||
}
|
||||
|
||||
/// Request load distributions.
|
||||
pub struct LoadDistribution {
|
||||
active_period: RwLock<HashMap<Kind, Mutex<(u64, u64)>>>,
|
||||
samples: RwLock<HashMap<Kind, VecDeque<u64>>>,
|
||||
}
|
||||
|
||||
impl LoadDistribution {
|
||||
/// Load rolling samples from the given store.
|
||||
pub fn load(store: &dyn SampleStore) -> Self {
|
||||
let mut samples = store.load();
|
||||
|
||||
for kind_samples in samples.values_mut() {
|
||||
while kind_samples.len() > MOVING_SAMPLE_SIZE {
|
||||
kind_samples.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
LoadDistribution {
|
||||
active_period: RwLock::new(HashMap::new()),
|
||||
samples: RwLock::new(samples),
|
||||
}
|
||||
}
|
||||
|
||||
/// Begin a timer.
|
||||
pub fn begin_timer<'a>(&'a self, req: &CompleteRequest) -> LoadTimer<'a> {
|
||||
let kind = req.kind();
|
||||
let n = match *req {
|
||||
CompleteRequest::Headers(ref req) => req.max,
|
||||
CompleteRequest::Execution(ref req) => req.gas.low_u64(),
|
||||
_ => 1,
|
||||
};
|
||||
|
||||
LoadTimer {
|
||||
start: Instant::now(),
|
||||
n,
|
||||
dist: self,
|
||||
kind,
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate EMA of load for a specific request kind.
|
||||
/// If there is no data for the given request kind, no EMA will be calculated,
|
||||
/// but a hardcoded time will be returned.
|
||||
pub fn expected_time(&self, kind: Kind) -> Duration {
|
||||
let samples = self.samples.read();
|
||||
samples
|
||||
.get(&kind)
|
||||
.and_then(|s| {
|
||||
if s.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let alpha: f64 = 1_f64 / s.len() as f64;
|
||||
let start = *s.front().expect("length known to be non-zero; qed") as f64;
|
||||
let ema = s
|
||||
.iter()
|
||||
.skip(1)
|
||||
.fold(start, |a, &c| (alpha * c as f64) + ((1.0 - alpha) * a));
|
||||
|
||||
Some(Duration::from_nanos(ema as u64))
|
||||
})
|
||||
.unwrap_or_else(move || hardcoded_serve_time(kind))
|
||||
}
|
||||
|
||||
/// End the current time period. Provide a store to
|
||||
pub fn end_period(&self, store: &dyn SampleStore) {
|
||||
let active_period = self.active_period.read();
|
||||
let mut samples = self.samples.write();
|
||||
|
||||
for (&kind, set) in active_period.iter() {
|
||||
let (elapsed, n) = ::std::mem::replace(&mut *set.lock(), (0, 0));
|
||||
if n == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let kind_samples = samples
|
||||
.entry(kind)
|
||||
.or_insert_with(|| VecDeque::with_capacity(MOVING_SAMPLE_SIZE));
|
||||
|
||||
if kind_samples.len() == MOVING_SAMPLE_SIZE {
|
||||
kind_samples.pop_front();
|
||||
}
|
||||
kind_samples.push_back(elapsed / n);
|
||||
}
|
||||
|
||||
store.store(&*samples);
|
||||
}
|
||||
|
||||
fn update(&self, kind: Kind, elapsed: Duration, n: u64) {
|
||||
macro_rules! update_counters {
|
||||
($counters: expr) => {
|
||||
$counters.0 = $counters.0.saturating_add({
|
||||
elapsed.as_secs() * 1_000_000_000 + elapsed.subsec_nanos() as u64
|
||||
});
|
||||
$counters.1 = $counters.1.saturating_add(n);
|
||||
};
|
||||
};
|
||||
|
||||
{
|
||||
let set = self.active_period.read();
|
||||
if let Some(counters) = set.get(&kind) {
|
||||
let mut counters = counters.lock();
|
||||
update_counters!(counters);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let mut set = self.active_period.write();
|
||||
let counters = set.entry(kind).or_insert_with(|| Mutex::new((0, 0)));
|
||||
|
||||
update_counters!(counters.get_mut());
|
||||
}
|
||||
}
|
||||
|
||||
/// A timer for a single request.
|
||||
/// On drop, this will update the distribution.
|
||||
pub struct LoadTimer<'a> {
|
||||
start: Instant,
|
||||
n: u64,
|
||||
dist: &'a LoadDistribution,
|
||||
kind: Kind,
|
||||
}
|
||||
|
||||
impl<'a> Drop for LoadTimer<'a> {
|
||||
fn drop(&mut self) {
|
||||
let elapsed = self.start.elapsed();
|
||||
self.dist.update(self.kind, elapsed, self.n);
|
||||
}
|
||||
}
|
||||
|
||||
/// A store which writes directly to a file.
|
||||
pub struct FileStore(pub PathBuf);
|
||||
|
||||
impl SampleStore for FileStore {
|
||||
fn load(&self) -> HashMap<Kind, VecDeque<u64>> {
|
||||
File::open(&self.0)
|
||||
.map_err(|e| Box::new(bincode::ErrorKind::IoError(e)))
|
||||
.and_then(|mut file| bincode::deserialize_from(&mut file, bincode::Infinite))
|
||||
.unwrap_or_else(|_| HashMap::new())
|
||||
}
|
||||
|
||||
fn store(&self, samples: &HashMap<Kind, VecDeque<u64>>) {
|
||||
let res = File::create(&self.0)
|
||||
.map_err(|e| Box::new(bincode::ErrorKind::IoError(e)))
|
||||
.and_then(|mut file| bincode::serialize_into(&mut file, samples, bincode::Infinite));
|
||||
|
||||
if let Err(e) = res {
|
||||
warn!(target: "pip", "Error writing light request timing samples to file: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use request::Kind;
|
||||
|
||||
#[test]
|
||||
fn hardcoded_before_data() {
|
||||
let dist = LoadDistribution::load(&NullStore);
|
||||
assert_eq!(
|
||||
dist.expected_time(Kind::Headers),
|
||||
hardcoded_serve_time(Kind::Headers)
|
||||
);
|
||||
|
||||
dist.update(Kind::Headers, Duration::new(0, 100_000), 100);
|
||||
dist.end_period(&NullStore);
|
||||
|
||||
assert_eq!(dist.expected_time(Kind::Headers), Duration::new(0, 1000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn moving_average() {
|
||||
let dist = LoadDistribution::load(&NullStore);
|
||||
|
||||
let mut sum = 0;
|
||||
|
||||
for (i, x) in (0..10).map(|x| x * 10_000).enumerate() {
|
||||
dist.update(Kind::Headers, Duration::new(0, x), 1);
|
||||
dist.end_period(&NullStore);
|
||||
|
||||
sum += x;
|
||||
if i == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let moving_average = dist.expected_time(Kind::Headers);
|
||||
|
||||
// should be weighted below the maximum entry.
|
||||
let arith_average = (sum as f64 / (i + 1) as f64) as u32;
|
||||
assert!(moving_average < Duration::new(0, x));
|
||||
|
||||
// when there are only 2 entries, they should be equal due to choice of
|
||||
// ALPHA = 1/N.
|
||||
// otherwise, the weight should be below the arithmetic mean because the much
|
||||
// smaller previous values are discounted less.
|
||||
if i == 1 {
|
||||
assert_eq!(moving_average, Duration::new(0, arith_average));
|
||||
} else {
|
||||
assert!(moving_average < Duration::new(0, arith_average))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn file_store() {
|
||||
let tempdir = ::tempdir::TempDir::new("").unwrap();
|
||||
let path = tempdir.path().join("file");
|
||||
let store = FileStore(path);
|
||||
|
||||
let mut samples = store.load();
|
||||
assert!(samples.is_empty());
|
||||
samples.insert(Kind::Headers, vec![5, 2, 7, 2, 2, 4].into());
|
||||
samples.insert(Kind::Execution, vec![1, 1, 100, 250].into());
|
||||
|
||||
store.store(&samples);
|
||||
|
||||
let dup = store.load();
|
||||
|
||||
assert_eq!(samples, dup);
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,473 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Request credit management.
|
||||
//!
|
||||
//! Every request in the light protocol leads to a reduction
|
||||
//! of the requester's amount of credits as a rate-limiting mechanism.
|
||||
//! The amount of credits will recharge at a set rate.
|
||||
//!
|
||||
//! This module provides an interface for configuration of
|
||||
//! costs and recharge rates of request credits.
|
||||
//!
|
||||
//! Current default costs are picked completely arbitrarily, not based
|
||||
//! on any empirical timings or mathematical models.
|
||||
|
||||
use super::error::Error;
|
||||
use request::{self, Request};
|
||||
|
||||
use ethereum_types::U256;
|
||||
use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// Credits value.
|
||||
///
|
||||
/// Produced and recharged using `FlowParams`.
|
||||
/// Definitive updates can be made as well -- these will reset the recharge
|
||||
/// point to the time of the update.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Credits {
|
||||
estimate: U256,
|
||||
recharge_point: Instant,
|
||||
}
|
||||
|
||||
impl Credits {
|
||||
/// Get the current amount of credits..
|
||||
pub fn current(&self) -> U256 {
|
||||
self.estimate
|
||||
}
|
||||
|
||||
/// Make a definitive update.
|
||||
/// This will be the value obtained after receiving
|
||||
/// a response to a request.
|
||||
pub fn update_to(&mut self, value: U256) {
|
||||
self.estimate = value;
|
||||
self.recharge_point = Instant::now();
|
||||
}
|
||||
|
||||
/// Maintain ratio to current limit against an old limit.
|
||||
pub fn maintain_ratio(&mut self, old_limit: U256, new_limit: U256) {
|
||||
self.estimate = (new_limit * self.estimate) / old_limit;
|
||||
}
|
||||
|
||||
/// Attempt to apply the given cost to the amount of credits.
|
||||
///
|
||||
/// If successful, the cost will be deducted successfully.
|
||||
///
|
||||
/// If unsuccessful, the structure will be unaltered an an
|
||||
/// error will be produced.
|
||||
pub fn deduct_cost(&mut self, cost: U256) -> Result<(), Error> {
|
||||
if cost > self.estimate {
|
||||
Err(Error::NoCredits)
|
||||
} else {
|
||||
self.estimate = self.estimate - cost;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A cost table, mapping requests to base and per-request costs.
|
||||
/// Costs themselves may be missing.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct CostTable {
|
||||
base: U256, // cost per packet.
|
||||
headers: Option<U256>, // cost per header
|
||||
transaction_index: Option<U256>,
|
||||
body: Option<U256>,
|
||||
receipts: Option<U256>,
|
||||
account: Option<U256>,
|
||||
storage: Option<U256>,
|
||||
code: Option<U256>,
|
||||
header_proof: Option<U256>,
|
||||
transaction_proof: Option<U256>, // cost per gas.
|
||||
epoch_signal: Option<U256>,
|
||||
}
|
||||
|
||||
impl CostTable {
|
||||
fn costs_set(&self) -> usize {
|
||||
let mut num_set = 0;
|
||||
|
||||
{
|
||||
let mut incr_if_set = |cost: &Option<_>| {
|
||||
if cost.is_some() {
|
||||
num_set += 1
|
||||
}
|
||||
};
|
||||
incr_if_set(&self.headers);
|
||||
incr_if_set(&self.transaction_index);
|
||||
incr_if_set(&self.body);
|
||||
incr_if_set(&self.receipts);
|
||||
incr_if_set(&self.account);
|
||||
incr_if_set(&self.storage);
|
||||
incr_if_set(&self.code);
|
||||
incr_if_set(&self.header_proof);
|
||||
incr_if_set(&self.transaction_proof);
|
||||
incr_if_set(&self.epoch_signal);
|
||||
}
|
||||
|
||||
num_set
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CostTable {
|
||||
fn default() -> Self {
|
||||
// arbitrarily chosen constants.
|
||||
CostTable {
|
||||
base: 100_000.into(),
|
||||
headers: Some(10000.into()),
|
||||
transaction_index: Some(10000.into()),
|
||||
body: Some(15000.into()),
|
||||
receipts: Some(5000.into()),
|
||||
account: Some(25000.into()),
|
||||
storage: Some(25000.into()),
|
||||
code: Some(20000.into()),
|
||||
header_proof: Some(15000.into()),
|
||||
transaction_proof: Some(2.into()),
|
||||
epoch_signal: Some(10000.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for CostTable {
|
||||
fn rlp_append(&self, s: &mut RlpStream) {
|
||||
fn append_cost(s: &mut RlpStream, cost: &Option<U256>, kind: request::Kind) {
|
||||
if let Some(ref cost) = *cost {
|
||||
s.begin_list(2);
|
||||
// hack around https://github.com/paritytech/parity-ethereum/issues/4356
|
||||
Encodable::rlp_append(&kind, s);
|
||||
s.append(cost);
|
||||
}
|
||||
}
|
||||
|
||||
s.begin_list(1 + self.costs_set()).append(&self.base);
|
||||
append_cost(s, &self.headers, request::Kind::Headers);
|
||||
append_cost(s, &self.transaction_index, request::Kind::TransactionIndex);
|
||||
append_cost(s, &self.body, request::Kind::Body);
|
||||
append_cost(s, &self.receipts, request::Kind::Receipts);
|
||||
append_cost(s, &self.account, request::Kind::Account);
|
||||
append_cost(s, &self.storage, request::Kind::Storage);
|
||||
append_cost(s, &self.code, request::Kind::Code);
|
||||
append_cost(s, &self.header_proof, request::Kind::HeaderProof);
|
||||
append_cost(s, &self.transaction_proof, request::Kind::Execution);
|
||||
append_cost(s, &self.epoch_signal, request::Kind::Signal);
|
||||
}
|
||||
}
|
||||
|
||||
impl Decodable for CostTable {
|
||||
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
|
||||
let base = rlp.val_at(0)?;
|
||||
|
||||
let mut headers = None;
|
||||
let mut transaction_index = None;
|
||||
let mut body = None;
|
||||
let mut receipts = None;
|
||||
let mut account = None;
|
||||
let mut storage = None;
|
||||
let mut code = None;
|
||||
let mut header_proof = None;
|
||||
let mut transaction_proof = None;
|
||||
let mut epoch_signal = None;
|
||||
|
||||
for cost_list in rlp.iter().skip(1) {
|
||||
let cost = cost_list.val_at(1)?;
|
||||
match cost_list.val_at(0)? {
|
||||
request::Kind::Headers => headers = Some(cost),
|
||||
request::Kind::TransactionIndex => transaction_index = Some(cost),
|
||||
request::Kind::Body => body = Some(cost),
|
||||
request::Kind::Receipts => receipts = Some(cost),
|
||||
request::Kind::Account => account = Some(cost),
|
||||
request::Kind::Storage => storage = Some(cost),
|
||||
request::Kind::Code => code = Some(cost),
|
||||
request::Kind::HeaderProof => header_proof = Some(cost),
|
||||
request::Kind::Execution => transaction_proof = Some(cost),
|
||||
request::Kind::Signal => epoch_signal = Some(cost),
|
||||
}
|
||||
}
|
||||
|
||||
let table = CostTable {
|
||||
base,
|
||||
headers,
|
||||
transaction_index,
|
||||
body,
|
||||
receipts,
|
||||
account,
|
||||
storage,
|
||||
code,
|
||||
header_proof,
|
||||
transaction_proof,
|
||||
epoch_signal,
|
||||
};
|
||||
|
||||
if table.costs_set() == 0 {
|
||||
Err(DecoderError::Custom("no cost types set."))
|
||||
} else {
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles costs, recharge, limits of request credits.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct FlowParams {
|
||||
costs: CostTable,
|
||||
limit: U256,
|
||||
recharge: U256,
|
||||
}
|
||||
|
||||
impl FlowParams {
|
||||
/// Create new flow parameters from a request cost table,
|
||||
/// credit limit, and (minimum) rate of recharge.
|
||||
pub fn new(limit: U256, costs: CostTable, recharge: U256) -> Self {
|
||||
FlowParams {
|
||||
costs,
|
||||
limit,
|
||||
recharge,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new flow parameters from ,
|
||||
/// proportion of total capacity which should be given to a peer,
|
||||
/// and stored capacity a peer can accumulate.
|
||||
pub fn from_request_times<F: Fn(::request::Kind) -> Duration>(
|
||||
request_time: F,
|
||||
load_share: f64,
|
||||
max_stored: Duration,
|
||||
) -> Self {
|
||||
use request::Kind;
|
||||
|
||||
let load_share = load_share.abs();
|
||||
|
||||
let recharge: u64 = 100_000_000;
|
||||
let max = {
|
||||
let sec = max_stored.as_secs().saturating_mul(recharge);
|
||||
let nanos = (max_stored.subsec_nanos() as u64).saturating_mul(recharge) / 1_000_000_000;
|
||||
sec + nanos
|
||||
};
|
||||
|
||||
let cost_for_kind = |kind| {
|
||||
// how many requests we can handle per second
|
||||
let rq_dur = request_time(kind);
|
||||
let second_duration = {
|
||||
let as_ns =
|
||||
rq_dur.as_secs() as f64 * 1_000_000_000f64 + rq_dur.subsec_nanos() as f64;
|
||||
1_000_000_000f64 / as_ns
|
||||
};
|
||||
|
||||
// scale by share of the load given to this peer.
|
||||
let serve_per_second = second_duration * load_share;
|
||||
let serve_per_second = serve_per_second.max(1.0 / 10_000.0);
|
||||
|
||||
// as a percentage of the recharge per second.
|
||||
Some(U256::from((recharge as f64 / serve_per_second) as u64))
|
||||
};
|
||||
|
||||
let costs = CostTable {
|
||||
base: 0.into(),
|
||||
headers: cost_for_kind(Kind::Headers),
|
||||
transaction_index: cost_for_kind(Kind::TransactionIndex),
|
||||
body: cost_for_kind(Kind::Body),
|
||||
receipts: cost_for_kind(Kind::Receipts),
|
||||
account: cost_for_kind(Kind::Account),
|
||||
storage: cost_for_kind(Kind::Storage),
|
||||
code: cost_for_kind(Kind::Code),
|
||||
header_proof: cost_for_kind(Kind::HeaderProof),
|
||||
transaction_proof: cost_for_kind(Kind::Execution),
|
||||
epoch_signal: cost_for_kind(Kind::Signal),
|
||||
};
|
||||
|
||||
FlowParams {
|
||||
costs,
|
||||
limit: max.into(),
|
||||
recharge: recharge.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create effectively infinite flow params.
|
||||
pub fn free() -> Self {
|
||||
let free_cost: Option<U256> = Some(0.into());
|
||||
FlowParams {
|
||||
limit: (!0_u64).into(),
|
||||
recharge: 1.into(),
|
||||
costs: CostTable {
|
||||
base: 0.into(),
|
||||
headers: free_cost,
|
||||
transaction_index: free_cost,
|
||||
body: free_cost,
|
||||
receipts: free_cost,
|
||||
account: free_cost,
|
||||
storage: free_cost,
|
||||
code: free_cost,
|
||||
header_proof: free_cost,
|
||||
transaction_proof: free_cost,
|
||||
epoch_signal: free_cost,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a reference to the credit limit.
|
||||
pub fn limit(&self) -> &U256 {
|
||||
&self.limit
|
||||
}
|
||||
|
||||
/// Get a reference to the cost table.
|
||||
pub fn cost_table(&self) -> &CostTable {
|
||||
&self.costs
|
||||
}
|
||||
|
||||
/// Get the base cost of a request.
|
||||
pub fn base_cost(&self) -> U256 {
|
||||
self.costs.base
|
||||
}
|
||||
|
||||
/// Get a reference to the recharge rate.
|
||||
pub fn recharge_rate(&self) -> &U256 {
|
||||
&self.recharge
|
||||
}
|
||||
|
||||
/// Compute the actual cost of a request, given the kind of request
|
||||
/// and number of requests made.
|
||||
pub fn compute_cost(&self, request: &Request) -> Option<U256> {
|
||||
match *request {
|
||||
Request::Headers(ref req) => self.costs.headers.map(|c| c * U256::from(req.max)),
|
||||
Request::HeaderProof(_) => self.costs.header_proof,
|
||||
Request::TransactionIndex(_) => self.costs.transaction_index,
|
||||
Request::Body(_) => self.costs.body,
|
||||
Request::Receipts(_) => self.costs.receipts,
|
||||
Request::Account(_) => self.costs.account,
|
||||
Request::Storage(_) => self.costs.storage,
|
||||
Request::Code(_) => self.costs.code,
|
||||
Request::Execution(ref req) => self.costs.transaction_proof.map(|c| c * req.gas),
|
||||
Request::Signal(_) => self.costs.epoch_signal,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the cost of a set of requests.
|
||||
/// This is the base cost plus the cost of each individual request.
|
||||
pub fn compute_cost_multi(&self, requests: &[Request]) -> Option<U256> {
|
||||
let mut cost = self.costs.base;
|
||||
for request in requests {
|
||||
match self.compute_cost(request) {
|
||||
Some(c) => cost = cost + c,
|
||||
None => return None,
|
||||
}
|
||||
}
|
||||
|
||||
Some(cost)
|
||||
}
|
||||
|
||||
/// Create initial credits.
|
||||
pub fn create_credits(&self) -> Credits {
|
||||
Credits {
|
||||
estimate: self.limit,
|
||||
recharge_point: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Recharge the given credits based on time passed since last
|
||||
/// update.
|
||||
pub fn recharge(&self, credits: &mut Credits) {
|
||||
let now = Instant::now();
|
||||
|
||||
// recompute and update only in terms of full seconds elapsed
|
||||
// in order to keep the estimate as an underestimate.
|
||||
let elapsed = (now - credits.recharge_point).as_secs();
|
||||
credits.recharge_point += Duration::from_secs(elapsed);
|
||||
|
||||
let elapsed: U256 = elapsed.into();
|
||||
|
||||
credits.estimate =
|
||||
::std::cmp::min(self.limit, credits.estimate + (elapsed * self.recharge));
|
||||
}
|
||||
|
||||
/// Refund some credits which were previously deducted.
|
||||
/// Does not update the recharge timestamp.
|
||||
pub fn refund(&self, credits: &mut Credits, refund_amount: U256) {
|
||||
credits.estimate = credits.estimate + refund_amount;
|
||||
|
||||
if credits.estimate > self.limit {
|
||||
credits.estimate = self.limit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FlowParams {
|
||||
fn default() -> Self {
|
||||
FlowParams {
|
||||
limit: 50_000_000.into(),
|
||||
costs: CostTable::default(),
|
||||
recharge: 100_000.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn should_serialize_cost_table() {
|
||||
let costs = CostTable::default();
|
||||
let serialized = ::rlp::encode(&costs);
|
||||
|
||||
let new_costs: CostTable = ::rlp::decode(&*serialized).unwrap();
|
||||
|
||||
assert_eq!(costs, new_costs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn credits_mechanism() {
|
||||
use std::{thread, time::Duration};
|
||||
|
||||
let flow_params = FlowParams::new(100.into(), Default::default(), 20.into());
|
||||
let mut credits = flow_params.create_credits();
|
||||
|
||||
assert!(credits.deduct_cost(101.into()).is_err());
|
||||
assert!(credits.deduct_cost(10.into()).is_ok());
|
||||
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
|
||||
flow_params.recharge(&mut credits);
|
||||
|
||||
assert_eq!(credits.estimate, 100.into());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn scale_by_load_share_and_time() {
|
||||
let flow_params = FlowParams::from_request_times(
|
||||
|_| Duration::new(0, 10_000),
|
||||
0.05,
|
||||
Duration::from_secs(60),
|
||||
);
|
||||
|
||||
let flow_params2 = FlowParams::from_request_times(
|
||||
|_| Duration::new(0, 10_000),
|
||||
0.1,
|
||||
Duration::from_secs(60),
|
||||
);
|
||||
|
||||
let flow_params3 = FlowParams::from_request_times(
|
||||
|_| Duration::new(0, 5_000),
|
||||
0.05,
|
||||
Duration::from_secs(60),
|
||||
);
|
||||
|
||||
assert_eq!(flow_params2.costs, flow_params3.costs);
|
||||
assert_eq!(
|
||||
flow_params.costs.headers.unwrap(),
|
||||
flow_params2.costs.headers.unwrap() * 2u32
|
||||
);
|
||||
}
|
||||
}
|
@ -1,211 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Pending request set.
|
||||
//!
|
||||
//! Stores pending requests and does timeout computation according to the rule
|
||||
//! that only the earliest submitted request within the structure may time out.
|
||||
//!
|
||||
//! Whenever a request becomes the earliest, its timeout period begins at that moment.
|
||||
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
iter::FromIterator,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use ethereum_types::U256;
|
||||
use net::{timeout, ReqId};
|
||||
use request::{NetworkRequests as Requests, Request};
|
||||
|
||||
// Request set entry: requests + cost.
|
||||
#[derive(Debug)]
|
||||
struct Entry(Requests, U256);
|
||||
|
||||
/// Request set.
|
||||
#[derive(Debug)]
|
||||
pub struct RequestSet {
|
||||
counter: u64,
|
||||
cumulative_cost: U256,
|
||||
base: Option<Instant>,
|
||||
ids: HashMap<ReqId, u64>,
|
||||
reqs: BTreeMap<u64, Entry>,
|
||||
}
|
||||
|
||||
impl Default for RequestSet {
|
||||
fn default() -> Self {
|
||||
RequestSet {
|
||||
counter: 0,
|
||||
cumulative_cost: 0.into(),
|
||||
base: None,
|
||||
ids: HashMap::new(),
|
||||
reqs: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestSet {
|
||||
/// Push requests onto the stack.
|
||||
pub fn insert(&mut self, req_id: ReqId, req: Requests, cost: U256, now: Instant) {
|
||||
let counter = self.counter;
|
||||
self.cumulative_cost = self.cumulative_cost + cost;
|
||||
|
||||
self.ids.insert(req_id, counter);
|
||||
self.reqs.insert(counter, Entry(req, cost));
|
||||
|
||||
if self.reqs.keys().next().map_or(true, |x| *x == counter) {
|
||||
self.base = Some(now);
|
||||
}
|
||||
|
||||
self.counter += 1;
|
||||
}
|
||||
|
||||
/// Remove a set of requests from the stack.
|
||||
pub fn remove(&mut self, req_id: ReqId, now: Instant) -> Option<Requests> {
|
||||
let id = match self.ids.remove(&req_id) {
|
||||
Some(id) => id,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let Entry(req, cost) = self
|
||||
.reqs
|
||||
.remove(&id)
|
||||
.expect("entry in `ids` implies entry in `reqs`; qed");
|
||||
|
||||
match self.reqs.keys().next() {
|
||||
Some(k) if *k > id => self.base = Some(now),
|
||||
None => self.base = None,
|
||||
_ => {}
|
||||
}
|
||||
|
||||
self.cumulative_cost = self.cumulative_cost - cost;
|
||||
Some(req)
|
||||
}
|
||||
|
||||
/// Check for timeout against the given time. Returns true if
|
||||
/// has timed out, false otherwise.
|
||||
pub fn check_timeout(&self, now: Instant) -> bool {
|
||||
let base = match self.base.as_ref().cloned() {
|
||||
Some(base) => base,
|
||||
None => return false,
|
||||
};
|
||||
|
||||
let first_req = self
|
||||
.reqs
|
||||
.values()
|
||||
.next()
|
||||
.expect("base existing implies `reqs` non-empty; qed");
|
||||
|
||||
base + compute_timeout(&first_req.0) <= now
|
||||
}
|
||||
|
||||
/// Collect all pending request ids.
|
||||
pub fn collect_ids<F>(&self) -> F
|
||||
where
|
||||
F: FromIterator<ReqId>,
|
||||
{
|
||||
self.ids.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Number of requests in the set.
|
||||
pub fn len(&self) -> usize {
|
||||
self.ids.len()
|
||||
}
|
||||
|
||||
/// Whether the set is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
/// The cumulative cost of all requests in the set.
|
||||
// this may be useful later for load balancing.
|
||||
#[allow(dead_code)]
|
||||
pub fn cumulative_cost(&self) -> U256 {
|
||||
self.cumulative_cost
|
||||
}
|
||||
}
|
||||
|
||||
// helper to calculate timeout for a specific set of requests.
|
||||
// it's a base amount + some amount per request.
|
||||
fn compute_timeout(reqs: &Requests) -> Duration {
|
||||
Duration::from_millis(reqs.requests().iter().fold(timeout::BASE, |tm, req| {
|
||||
tm + match *req {
|
||||
Request::Headers(_) => timeout::HEADERS,
|
||||
Request::HeaderProof(_) => timeout::HEADER_PROOF,
|
||||
Request::TransactionIndex(_) => timeout::TRANSACTION_INDEX,
|
||||
Request::Receipts(_) => timeout::RECEIPT,
|
||||
Request::Body(_) => timeout::BODY,
|
||||
Request::Account(_) => timeout::PROOF,
|
||||
Request::Storage(_) => timeout::PROOF,
|
||||
Request::Code(_) => timeout::CONTRACT_CODE,
|
||||
Request::Execution(_) => timeout::TRANSACTION_PROOF,
|
||||
Request::Signal(_) => timeout::EPOCH_SIGNAL,
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{compute_timeout, RequestSet};
|
||||
use net::ReqId;
|
||||
use request::Builder;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[test]
|
||||
fn multi_timeout() {
|
||||
let test_begin = Instant::now();
|
||||
let mut req_set = RequestSet::default();
|
||||
|
||||
let the_req = Builder::default().build();
|
||||
let req_time = compute_timeout(&the_req);
|
||||
req_set.insert(ReqId(0), the_req.clone(), 0.into(), test_begin);
|
||||
req_set.insert(
|
||||
ReqId(1),
|
||||
the_req,
|
||||
0.into(),
|
||||
test_begin + Duration::from_secs(1),
|
||||
);
|
||||
|
||||
assert_eq!(req_set.base, Some(test_begin));
|
||||
|
||||
let test_end = test_begin + req_time;
|
||||
assert!(req_set.check_timeout(test_end));
|
||||
|
||||
req_set
|
||||
.remove(ReqId(0), test_begin + Duration::from_secs(1))
|
||||
.unwrap();
|
||||
assert!(!req_set.check_timeout(test_end));
|
||||
assert!(req_set.check_timeout(test_end + Duration::from_secs(1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cumulative_cost() {
|
||||
let the_req = Builder::default().build();
|
||||
let test_begin = Instant::now();
|
||||
let test_end = test_begin + Duration::from_secs(1);
|
||||
let mut req_set = RequestSet::default();
|
||||
|
||||
for i in 0..5 {
|
||||
req_set.insert(ReqId(i), the_req.clone(), 1.into(), test_begin);
|
||||
assert_eq!(req_set.cumulative_cost, (i + 1).into());
|
||||
}
|
||||
|
||||
for i in (0..5).rev() {
|
||||
assert!(req_set.remove(ReqId(i), test_end).is_some());
|
||||
assert_eq!(req_set.cumulative_cost, i.into());
|
||||
}
|
||||
}
|
||||
}
|
@ -1,569 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Peer status and capabilities.
|
||||
|
||||
use ethereum_types::{H256, U256};
|
||||
use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream};
|
||||
|
||||
use super::request_credits::FlowParams;
|
||||
|
||||
// recognized handshake/announcement keys.
|
||||
// unknown keys are to be skipped, known keys have a defined order.
|
||||
// their string values are defined in the LES spec.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
|
||||
enum Key {
|
||||
ProtocolVersion,
|
||||
NetworkId,
|
||||
HeadTD,
|
||||
HeadHash,
|
||||
HeadNum,
|
||||
GenesisHash,
|
||||
ServeHeaders,
|
||||
ServeChainSince,
|
||||
ServeStateSince,
|
||||
TxRelay,
|
||||
BufferLimit,
|
||||
BufferCostTable,
|
||||
BufferRechargeRate,
|
||||
}
|
||||
|
||||
impl Key {
|
||||
// get the string value of this key.
|
||||
fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
Key::ProtocolVersion => "protocolVersion",
|
||||
Key::NetworkId => "networkId",
|
||||
Key::HeadTD => "headTd",
|
||||
Key::HeadHash => "headHash",
|
||||
Key::HeadNum => "headNum",
|
||||
Key::GenesisHash => "genesisHash",
|
||||
Key::ServeHeaders => "serveHeaders",
|
||||
Key::ServeChainSince => "serveChainSince",
|
||||
Key::ServeStateSince => "serveStateSince",
|
||||
Key::TxRelay => "txRelay",
|
||||
Key::BufferLimit => "flowControl/BL",
|
||||
Key::BufferCostTable => "flowControl/MRC",
|
||||
Key::BufferRechargeRate => "flowControl/MRR",
|
||||
}
|
||||
}
|
||||
|
||||
// try to parse the key value from a string.
|
||||
fn from_str(s: &str) -> Option<Self> {
|
||||
match s {
|
||||
"protocolVersion" => Some(Key::ProtocolVersion),
|
||||
"networkId" => Some(Key::NetworkId),
|
||||
"headTd" => Some(Key::HeadTD),
|
||||
"headHash" => Some(Key::HeadHash),
|
||||
"headNum" => Some(Key::HeadNum),
|
||||
"genesisHash" => Some(Key::GenesisHash),
|
||||
"serveHeaders" => Some(Key::ServeHeaders),
|
||||
"serveChainSince" => Some(Key::ServeChainSince),
|
||||
"serveStateSince" => Some(Key::ServeStateSince),
|
||||
"txRelay" => Some(Key::TxRelay),
|
||||
"flowControl/BL" => Some(Key::BufferLimit),
|
||||
"flowControl/MRC" => Some(Key::BufferCostTable),
|
||||
"flowControl/MRR" => Some(Key::BufferRechargeRate),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// helper for decoding key-value pairs in the handshake or an announcement.
|
||||
struct Parser<'a> {
|
||||
pos: usize,
|
||||
rlp: &'a Rlp<'a>,
|
||||
}
|
||||
|
||||
impl<'a> Parser<'a> {
|
||||
// expect a specific next key, and decode the value.
|
||||
// error on unexpected key or invalid value.
|
||||
fn expect<T: Decodable>(&mut self, key: Key) -> Result<T, DecoderError> {
|
||||
self.expect_raw(key).and_then(|item| item.as_val())
|
||||
}
|
||||
|
||||
// expect a specific next key, and get the value's RLP.
|
||||
// if the key isn't found, the position isn't advanced.
|
||||
fn expect_raw(&mut self, key: Key) -> Result<Rlp<'a>, DecoderError> {
|
||||
trace!(target: "les", "Expecting key {}", key.as_str());
|
||||
let pre_pos = self.pos;
|
||||
if let Some((k, val)) = self.get_next()? {
|
||||
if k == key {
|
||||
return Ok(val);
|
||||
}
|
||||
}
|
||||
|
||||
self.pos = pre_pos;
|
||||
Err(DecoderError::Custom("Missing expected key"))
|
||||
}
|
||||
|
||||
// get the next key and value RLP.
|
||||
fn get_next(&mut self) -> Result<Option<(Key, Rlp<'a>)>, DecoderError> {
|
||||
while self.pos < self.rlp.item_count()? {
|
||||
let pair = self.rlp.at(self.pos)?;
|
||||
let k: String = pair.val_at(0)?;
|
||||
|
||||
self.pos += 1;
|
||||
match Key::from_str(&k) {
|
||||
Some(key) => return Ok(Some((key, pair.at(1)?))),
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
// Helper for encoding a key-value pair
|
||||
fn encode_pair<T: Encodable>(key: Key, val: &T) -> Vec<u8> {
|
||||
let mut s = RlpStream::new_list(2);
|
||||
s.append(&key.as_str()).append(val);
|
||||
s.out()
|
||||
}
|
||||
|
||||
// Helper for encoding a flag.
|
||||
fn encode_flag(key: Key) -> Vec<u8> {
|
||||
let mut s = RlpStream::new_list(2);
|
||||
s.append(&key.as_str()).append_empty_data();
|
||||
s.out()
|
||||
}
|
||||
|
||||
/// A peer status message.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Status {
|
||||
/// Protocol version.
|
||||
pub protocol_version: u32,
|
||||
/// Network id of this peer.
|
||||
pub network_id: u64,
|
||||
/// Total difficulty of the head of the chain.
|
||||
pub head_td: U256,
|
||||
/// Hash of the best block.
|
||||
pub head_hash: H256,
|
||||
/// Number of the best block.
|
||||
pub head_num: u64,
|
||||
/// Genesis hash
|
||||
pub genesis_hash: H256,
|
||||
/// Last announced chain head and reorg depth to common ancestor.
|
||||
pub last_head: Option<(H256, u64)>,
|
||||
}
|
||||
|
||||
impl Status {
|
||||
/// Update the status from an announcement.
|
||||
pub fn update_from(&mut self, announcement: &Announcement) {
|
||||
self.last_head = Some((self.head_hash, announcement.reorg_depth));
|
||||
self.head_td = announcement.head_td;
|
||||
self.head_hash = announcement.head_hash;
|
||||
self.head_num = announcement.head_num;
|
||||
}
|
||||
}
|
||||
|
||||
/// Peer capabilities.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct Capabilities {
|
||||
/// Whether this peer can serve headers
|
||||
pub serve_headers: bool,
|
||||
/// Earliest block number it can serve block/receipt requests for.
|
||||
/// `None` means no requests will be servable.
|
||||
pub serve_chain_since: Option<u64>,
|
||||
/// Earliest block number it can serve state requests for.
|
||||
/// `None` means no requests will be servable.
|
||||
pub serve_state_since: Option<u64>,
|
||||
/// Whether it can relay transactions to the eth network.
|
||||
pub tx_relay: bool,
|
||||
}
|
||||
|
||||
impl Default for Capabilities {
|
||||
fn default() -> Self {
|
||||
Capabilities {
|
||||
serve_headers: true,
|
||||
serve_chain_since: None,
|
||||
serve_state_since: None,
|
||||
tx_relay: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Capabilities {
|
||||
/// Update the capabilities from an announcement.
|
||||
pub fn update_from(&mut self, announcement: &Announcement) {
|
||||
self.serve_headers = self.serve_headers || announcement.serve_headers;
|
||||
self.serve_state_since = self.serve_state_since.or(announcement.serve_state_since);
|
||||
self.serve_chain_since = self.serve_chain_since.or(announcement.serve_chain_since);
|
||||
self.tx_relay = self.tx_relay || announcement.tx_relay;
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to parse a handshake message into its three parts:
|
||||
/// - chain status
|
||||
/// - serving capabilities
|
||||
/// - request credit parameters
|
||||
pub fn parse_handshake(
|
||||
rlp: &Rlp,
|
||||
) -> Result<(Status, Capabilities, Option<FlowParams>), DecoderError> {
|
||||
let mut parser = Parser { pos: 0, rlp };
|
||||
|
||||
let status = Status {
|
||||
protocol_version: parser.expect(Key::ProtocolVersion)?,
|
||||
network_id: parser.expect(Key::NetworkId)?,
|
||||
head_td: parser.expect(Key::HeadTD)?,
|
||||
head_hash: parser.expect(Key::HeadHash)?,
|
||||
head_num: parser.expect(Key::HeadNum)?,
|
||||
genesis_hash: parser.expect(Key::GenesisHash)?,
|
||||
last_head: None,
|
||||
};
|
||||
|
||||
let capabilities = Capabilities {
|
||||
serve_headers: parser.expect_raw(Key::ServeHeaders).is_ok(),
|
||||
serve_chain_since: parser.expect(Key::ServeChainSince).ok(),
|
||||
serve_state_since: parser.expect(Key::ServeStateSince).ok(),
|
||||
tx_relay: parser.expect_raw(Key::TxRelay).is_ok(),
|
||||
};
|
||||
|
||||
let flow_params = match (
|
||||
parser.expect(Key::BufferLimit),
|
||||
parser.expect(Key::BufferCostTable),
|
||||
parser.expect(Key::BufferRechargeRate),
|
||||
) {
|
||||
(Ok(bl), Ok(bct), Ok(brr)) => Some(FlowParams::new(bl, bct, brr)),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
Ok((status, capabilities, flow_params))
|
||||
}
|
||||
|
||||
/// Write a handshake, given status, capabilities, and flow parameters.
|
||||
pub fn write_handshake(
|
||||
status: &Status,
|
||||
capabilities: &Capabilities,
|
||||
flow_params: Option<&FlowParams>,
|
||||
) -> Vec<u8> {
|
||||
let mut pairs = Vec::new();
|
||||
pairs.push(encode_pair(Key::ProtocolVersion, &status.protocol_version));
|
||||
pairs.push(encode_pair(Key::NetworkId, &(status.network_id as u64)));
|
||||
pairs.push(encode_pair(Key::HeadTD, &status.head_td));
|
||||
pairs.push(encode_pair(Key::HeadHash, &status.head_hash));
|
||||
pairs.push(encode_pair(Key::HeadNum, &status.head_num));
|
||||
pairs.push(encode_pair(Key::GenesisHash, &status.genesis_hash));
|
||||
|
||||
if capabilities.serve_headers {
|
||||
pairs.push(encode_flag(Key::ServeHeaders));
|
||||
}
|
||||
if let Some(ref serve_chain_since) = capabilities.serve_chain_since {
|
||||
pairs.push(encode_pair(Key::ServeChainSince, serve_chain_since));
|
||||
}
|
||||
if let Some(ref serve_state_since) = capabilities.serve_state_since {
|
||||
pairs.push(encode_pair(Key::ServeStateSince, serve_state_since));
|
||||
}
|
||||
if capabilities.tx_relay {
|
||||
pairs.push(encode_flag(Key::TxRelay));
|
||||
}
|
||||
|
||||
if let Some(flow_params) = flow_params {
|
||||
pairs.push(encode_pair(Key::BufferLimit, flow_params.limit()));
|
||||
pairs.push(encode_pair(Key::BufferCostTable, flow_params.cost_table()));
|
||||
pairs.push(encode_pair(
|
||||
Key::BufferRechargeRate,
|
||||
flow_params.recharge_rate(),
|
||||
));
|
||||
}
|
||||
|
||||
let mut stream = RlpStream::new_list(pairs.len());
|
||||
|
||||
for pair in pairs {
|
||||
stream.append_raw(&pair, 1);
|
||||
}
|
||||
|
||||
stream.out()
|
||||
}
|
||||
|
||||
/// An announcement of new chain head or capabilities made by a peer.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Announcement {
|
||||
/// Hash of the best block.
|
||||
pub head_hash: H256,
|
||||
/// Number of the best block.
|
||||
pub head_num: u64,
|
||||
/// Head total difficulty
|
||||
pub head_td: U256,
|
||||
/// reorg depth to common ancestor of last announced head.
|
||||
pub reorg_depth: u64,
|
||||
/// optional new header-serving capability. false means "no change"
|
||||
pub serve_headers: bool,
|
||||
/// optional new state-serving capability
|
||||
pub serve_state_since: Option<u64>,
|
||||
/// optional new chain-serving capability
|
||||
pub serve_chain_since: Option<u64>,
|
||||
/// optional new transaction-relay capability. false means "no change"
|
||||
pub tx_relay: bool,
|
||||
// TODO: changes in request credits.
|
||||
}
|
||||
|
||||
/// Parse an announcement.
|
||||
pub fn parse_announcement(rlp: &Rlp) -> Result<Announcement, DecoderError> {
|
||||
let mut last_key = None;
|
||||
|
||||
let mut announcement = Announcement {
|
||||
head_hash: rlp.val_at(0)?,
|
||||
head_num: rlp.val_at(1)?,
|
||||
head_td: rlp.val_at(2)?,
|
||||
reorg_depth: rlp.val_at(3)?,
|
||||
serve_headers: false,
|
||||
serve_state_since: None,
|
||||
serve_chain_since: None,
|
||||
tx_relay: false,
|
||||
};
|
||||
|
||||
let mut parser = Parser { pos: 4, rlp };
|
||||
|
||||
while let Some((key, item)) = parser.get_next()? {
|
||||
if Some(key) <= last_key {
|
||||
return Err(DecoderError::Custom("Invalid announcement key ordering"));
|
||||
}
|
||||
last_key = Some(key);
|
||||
|
||||
match key {
|
||||
Key::ServeHeaders => announcement.serve_headers = true,
|
||||
Key::ServeStateSince => announcement.serve_state_since = Some(item.as_val()?),
|
||||
Key::ServeChainSince => announcement.serve_chain_since = Some(item.as_val()?),
|
||||
Key::TxRelay => announcement.tx_relay = true,
|
||||
_ => return Err(DecoderError::Custom("Nonsensical key in announcement")),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(announcement)
|
||||
}
|
||||
|
||||
/// Write an announcement out.
|
||||
pub fn write_announcement(announcement: &Announcement) -> Vec<u8> {
|
||||
let mut pairs = Vec::new();
|
||||
if announcement.serve_headers {
|
||||
pairs.push(encode_flag(Key::ServeHeaders));
|
||||
}
|
||||
if let Some(ref serve_chain_since) = announcement.serve_chain_since {
|
||||
pairs.push(encode_pair(Key::ServeChainSince, serve_chain_since));
|
||||
}
|
||||
if let Some(ref serve_state_since) = announcement.serve_state_since {
|
||||
pairs.push(encode_pair(Key::ServeStateSince, serve_state_since));
|
||||
}
|
||||
if announcement.tx_relay {
|
||||
pairs.push(encode_flag(Key::TxRelay));
|
||||
}
|
||||
|
||||
let mut stream = RlpStream::new_list(4 + pairs.len());
|
||||
stream
|
||||
.append(&announcement.head_hash)
|
||||
.append(&announcement.head_num)
|
||||
.append(&announcement.head_td)
|
||||
.append(&announcement.reorg_depth);
|
||||
|
||||
for item in pairs {
|
||||
stream.append_raw(&item, 1);
|
||||
}
|
||||
|
||||
stream.out()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{super::request_credits::FlowParams, *};
|
||||
use ethereum_types::{H256, U256};
|
||||
use rlp::{Rlp, RlpStream};
|
||||
|
||||
#[test]
|
||||
fn full_handshake() {
|
||||
let status = Status {
|
||||
protocol_version: 1,
|
||||
network_id: 1,
|
||||
head_td: U256::default(),
|
||||
head_hash: H256::default(),
|
||||
head_num: 10,
|
||||
genesis_hash: H256::zero(),
|
||||
last_head: None,
|
||||
};
|
||||
|
||||
let capabilities = Capabilities {
|
||||
serve_headers: true,
|
||||
serve_chain_since: Some(5),
|
||||
serve_state_since: Some(8),
|
||||
tx_relay: true,
|
||||
};
|
||||
|
||||
let flow_params = FlowParams::new(1_000_000.into(), Default::default(), 1000.into());
|
||||
|
||||
let handshake = write_handshake(&status, &capabilities, Some(&flow_params));
|
||||
|
||||
let (read_status, read_capabilities, read_flow) =
|
||||
parse_handshake(&Rlp::new(&handshake)).unwrap();
|
||||
|
||||
assert_eq!(read_status, status);
|
||||
assert_eq!(read_capabilities, capabilities);
|
||||
assert_eq!(read_flow.unwrap(), flow_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn partial_handshake() {
|
||||
let status = Status {
|
||||
protocol_version: 1,
|
||||
network_id: 1,
|
||||
head_td: U256::default(),
|
||||
head_hash: H256::default(),
|
||||
head_num: 10,
|
||||
genesis_hash: H256::zero(),
|
||||
last_head: None,
|
||||
};
|
||||
|
||||
let capabilities = Capabilities {
|
||||
serve_headers: false,
|
||||
serve_chain_since: Some(5),
|
||||
serve_state_since: None,
|
||||
tx_relay: true,
|
||||
};
|
||||
|
||||
let flow_params = FlowParams::new(1_000_000.into(), Default::default(), 1000.into());
|
||||
|
||||
let handshake = write_handshake(&status, &capabilities, Some(&flow_params));
|
||||
|
||||
let (read_status, read_capabilities, read_flow) =
|
||||
parse_handshake(&Rlp::new(&handshake)).unwrap();
|
||||
|
||||
assert_eq!(read_status, status);
|
||||
assert_eq!(read_capabilities, capabilities);
|
||||
assert_eq!(read_flow.unwrap(), flow_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn skip_unknown_keys() {
|
||||
let status = Status {
|
||||
protocol_version: 1,
|
||||
network_id: 1,
|
||||
head_td: U256::default(),
|
||||
head_hash: H256::default(),
|
||||
head_num: 10,
|
||||
genesis_hash: H256::zero(),
|
||||
last_head: None,
|
||||
};
|
||||
|
||||
let capabilities = Capabilities {
|
||||
serve_headers: false,
|
||||
serve_chain_since: Some(5),
|
||||
serve_state_since: None,
|
||||
tx_relay: true,
|
||||
};
|
||||
|
||||
let flow_params = FlowParams::new(1_000_000.into(), Default::default(), 1000.into());
|
||||
|
||||
let handshake = write_handshake(&status, &capabilities, Some(&flow_params));
|
||||
let interleaved = {
|
||||
let handshake = Rlp::new(&handshake);
|
||||
let mut stream = RlpStream::new_list(handshake.item_count().unwrap_or(0) * 3);
|
||||
|
||||
for item in handshake.iter() {
|
||||
stream.append_raw(item.as_raw(), 1);
|
||||
let (mut s1, mut s2) = (RlpStream::new_list(2), RlpStream::new_list(2));
|
||||
s1.append(&"foo").append_empty_data();
|
||||
s2.append(&"bar").append_empty_data();
|
||||
stream.append_raw(&s1.out(), 1);
|
||||
stream.append_raw(&s2.out(), 1);
|
||||
}
|
||||
|
||||
stream.out()
|
||||
};
|
||||
|
||||
let (read_status, read_capabilities, read_flow) =
|
||||
parse_handshake(&Rlp::new(&interleaved)).unwrap();
|
||||
|
||||
assert_eq!(read_status, status);
|
||||
assert_eq!(read_capabilities, capabilities);
|
||||
assert_eq!(read_flow.unwrap(), flow_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn announcement_roundtrip() {
|
||||
let announcement = Announcement {
|
||||
head_hash: H256::random(),
|
||||
head_num: 100_000,
|
||||
head_td: 1_000_000.into(),
|
||||
reorg_depth: 4,
|
||||
serve_headers: false,
|
||||
serve_state_since: Some(99_000),
|
||||
serve_chain_since: Some(1),
|
||||
tx_relay: true,
|
||||
};
|
||||
|
||||
let serialized = write_announcement(&announcement);
|
||||
let read = parse_announcement(&Rlp::new(&serialized)).unwrap();
|
||||
|
||||
assert_eq!(read, announcement);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn keys_out_of_order() {
|
||||
use super::{encode_flag, encode_pair, Key};
|
||||
|
||||
let mut stream = RlpStream::new_list(6);
|
||||
stream
|
||||
.append(&H256::zero())
|
||||
.append(&10_u64)
|
||||
.append(&100_000_u64)
|
||||
.append(&2_u64)
|
||||
.append_raw(&encode_pair(Key::ServeStateSince, &44_u64), 1)
|
||||
.append_raw(&encode_flag(Key::ServeHeaders), 1);
|
||||
|
||||
let out = stream.drain();
|
||||
assert!(parse_announcement(&Rlp::new(&out)).is_err());
|
||||
|
||||
let mut stream = RlpStream::new_list(6);
|
||||
stream
|
||||
.append(&H256::zero())
|
||||
.append(&10_u64)
|
||||
.append(&100_000_u64)
|
||||
.append(&2_u64)
|
||||
.append_raw(&encode_flag(Key::ServeHeaders), 1)
|
||||
.append_raw(&encode_pair(Key::ServeStateSince, &44_u64), 1);
|
||||
|
||||
let out = stream.drain();
|
||||
assert!(parse_announcement(&Rlp::new(&out)).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn optional_flow() {
|
||||
let status = Status {
|
||||
protocol_version: 1,
|
||||
network_id: 1,
|
||||
head_td: U256::default(),
|
||||
head_hash: H256::default(),
|
||||
head_num: 10,
|
||||
genesis_hash: H256::zero(),
|
||||
last_head: None,
|
||||
};
|
||||
|
||||
let capabilities = Capabilities {
|
||||
serve_headers: true,
|
||||
serve_chain_since: Some(5),
|
||||
serve_state_since: Some(8),
|
||||
tx_relay: true,
|
||||
};
|
||||
|
||||
let handshake = write_handshake(&status, &capabilities, None);
|
||||
|
||||
let (read_status, read_capabilities, read_flow) =
|
||||
parse_handshake(&Rlp::new(&handshake)).unwrap();
|
||||
|
||||
assert_eq!(read_status, status);
|
||||
assert_eq!(read_capabilities, capabilities);
|
||||
assert!(read_flow.is_none());
|
||||
}
|
||||
}
|
@ -1,927 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Tests for the `LightProtocol` implementation.
|
||||
//! These don't test of the higher level logic on top of
|
||||
|
||||
use common_types::{
|
||||
blockchain_info::BlockChainInfo,
|
||||
encoded,
|
||||
ids::BlockId,
|
||||
transaction::{Action, PendingTransaction},
|
||||
};
|
||||
use ethcore::client::{EachBlockWith, TestBlockChainClient};
|
||||
use ethereum_types::{Address, H256, U256};
|
||||
use net::{
|
||||
context::IoContext,
|
||||
load_timer::MOVING_SAMPLE_SIZE,
|
||||
packet,
|
||||
status::{Capabilities, Status},
|
||||
LightProtocol, Params, Peer, Statistics,
|
||||
};
|
||||
use network::{NodeId, PeerId};
|
||||
use provider::Provider;
|
||||
use request::{self, *};
|
||||
use rlp::{Rlp, RlpStream};
|
||||
|
||||
use std::{sync::Arc, time::Instant};
|
||||
|
||||
// helper for encoding a single request into a packet.
|
||||
// panics on bad backreference.
|
||||
fn encode_single(request: Request) -> NetworkRequests {
|
||||
let mut builder = Builder::default();
|
||||
builder.push(request).unwrap();
|
||||
builder.build()
|
||||
}
|
||||
|
||||
// helper for making a packet out of `Requests`.
|
||||
fn make_packet(req_id: usize, requests: &NetworkRequests) -> Vec<u8> {
|
||||
let mut stream = RlpStream::new_list(2);
|
||||
stream.append(&req_id).append_list(&requests.requests());
|
||||
stream.out()
|
||||
}
|
||||
|
||||
// expected result from a call.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
enum Expect {
|
||||
/// Expect to have message sent to peer.
|
||||
Send(PeerId, u8, Vec<u8>),
|
||||
/// Expect this response.
|
||||
Respond(u8, Vec<u8>),
|
||||
/// Expect a punishment (disconnect/disable)
|
||||
Punish(PeerId),
|
||||
/// Expect nothing.
|
||||
Nothing,
|
||||
}
|
||||
|
||||
impl IoContext for Expect {
|
||||
fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec<u8>) {
|
||||
assert_eq!(self, &Expect::Send(peer, packet_id, packet_body));
|
||||
}
|
||||
|
||||
fn respond(&self, packet_id: u8, packet_body: Vec<u8>) {
|
||||
assert_eq!(self, &Expect::Respond(packet_id, packet_body));
|
||||
}
|
||||
|
||||
fn disconnect_peer(&self, peer: PeerId) {
|
||||
assert_eq!(self, &Expect::Punish(peer));
|
||||
}
|
||||
|
||||
fn disable_peer(&self, peer: PeerId) {
|
||||
assert_eq!(self, &Expect::Punish(peer));
|
||||
}
|
||||
|
||||
fn protocol_version(&self, _peer: PeerId) -> Option<u8> {
|
||||
Some(super::MAX_PROTOCOL_VERSION)
|
||||
}
|
||||
|
||||
fn persistent_peer_id(&self, _peer: PeerId) -> Option<NodeId> {
|
||||
None
|
||||
}
|
||||
|
||||
fn is_reserved_peer(&self, peer: PeerId) -> bool {
|
||||
peer == 0xff
|
||||
}
|
||||
}
|
||||
|
||||
// can't implement directly for Arc due to cross-crate orphan rules.
|
||||
struct TestProvider(Arc<TestProviderInner>);
|
||||
|
||||
struct TestProviderInner {
|
||||
client: TestBlockChainClient,
|
||||
}
|
||||
|
||||
impl Provider for TestProvider {
|
||||
fn chain_info(&self) -> BlockChainInfo {
|
||||
self.0.client.chain_info()
|
||||
}
|
||||
|
||||
fn reorg_depth(&self, a: &H256, b: &H256) -> Option<u64> {
|
||||
self.0.client.reorg_depth(a, b)
|
||||
}
|
||||
|
||||
fn earliest_state(&self) -> Option<u64> {
|
||||
None
|
||||
}
|
||||
|
||||
fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
|
||||
self.0.client.block_header(id)
|
||||
}
|
||||
|
||||
fn transaction_index(
|
||||
&self,
|
||||
req: request::CompleteTransactionIndexRequest,
|
||||
) -> Option<request::TransactionIndexResponse> {
|
||||
Some(request::TransactionIndexResponse {
|
||||
num: 100,
|
||||
hash: req.hash,
|
||||
index: 55,
|
||||
})
|
||||
}
|
||||
|
||||
fn block_body(&self, req: request::CompleteBodyRequest) -> Option<request::BodyResponse> {
|
||||
self.0.client.block_body(req)
|
||||
}
|
||||
|
||||
fn block_receipts(
|
||||
&self,
|
||||
req: request::CompleteReceiptsRequest,
|
||||
) -> Option<request::ReceiptsResponse> {
|
||||
self.0.client.block_receipts(req)
|
||||
}
|
||||
|
||||
fn account_proof(
|
||||
&self,
|
||||
req: request::CompleteAccountRequest,
|
||||
) -> Option<request::AccountResponse> {
|
||||
// sort of a leaf node
|
||||
let mut stream = RlpStream::new_list(2);
|
||||
stream.append(&req.address_hash).append_empty_data();
|
||||
Some(AccountResponse {
|
||||
proof: vec![stream.out()],
|
||||
balance: 10.into(),
|
||||
nonce: 100.into(),
|
||||
code_hash: Default::default(),
|
||||
storage_root: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
fn storage_proof(
|
||||
&self,
|
||||
req: request::CompleteStorageRequest,
|
||||
) -> Option<request::StorageResponse> {
|
||||
Some(StorageResponse {
|
||||
proof: vec![::rlp::encode(&req.key_hash)],
|
||||
value: req.key_hash | req.address_hash,
|
||||
})
|
||||
}
|
||||
|
||||
fn contract_code(&self, req: request::CompleteCodeRequest) -> Option<request::CodeResponse> {
|
||||
Some(CodeResponse {
|
||||
code: req
|
||||
.block_hash
|
||||
.iter()
|
||||
.chain(req.code_hash.iter())
|
||||
.cloned()
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
||||
fn header_proof(
|
||||
&self,
|
||||
_req: request::CompleteHeaderProofRequest,
|
||||
) -> Option<request::HeaderProofResponse> {
|
||||
None
|
||||
}
|
||||
|
||||
fn transaction_proof(
|
||||
&self,
|
||||
_req: request::CompleteExecutionRequest,
|
||||
) -> Option<request::ExecutionResponse> {
|
||||
None
|
||||
}
|
||||
|
||||
fn epoch_signal(
|
||||
&self,
|
||||
_req: request::CompleteSignalRequest,
|
||||
) -> Option<request::SignalResponse> {
|
||||
Some(request::SignalResponse {
|
||||
signal: vec![1, 2, 3, 4],
|
||||
})
|
||||
}
|
||||
|
||||
fn transactions_to_propagate(&self) -> Vec<PendingTransaction> {
|
||||
self.0.client.transactions_to_propagate()
|
||||
}
|
||||
}
|
||||
|
||||
fn capabilities() -> Capabilities {
|
||||
Capabilities {
|
||||
serve_headers: true,
|
||||
serve_chain_since: Some(1),
|
||||
serve_state_since: Some(1),
|
||||
tx_relay: true,
|
||||
}
|
||||
}
|
||||
|
||||
fn write_handshake(status: &Status, capabilities: &Capabilities, proto: &LightProtocol) -> Vec<u8> {
|
||||
let flow_params = proto.flow_params.read().clone();
|
||||
::net::status::write_handshake(status, capabilities, Some(&*flow_params))
|
||||
}
|
||||
|
||||
fn write_free_handshake(
|
||||
status: &Status,
|
||||
capabilities: &Capabilities,
|
||||
proto: &LightProtocol,
|
||||
) -> Vec<u8> {
|
||||
::net::status::write_handshake(status, capabilities, Some(&proto.free_flow_params))
|
||||
}
|
||||
|
||||
// helper for setting up the protocol handler and provider.
|
||||
fn setup(capabilities: Capabilities) -> (Arc<TestProviderInner>, LightProtocol) {
|
||||
let provider = Arc::new(TestProviderInner {
|
||||
client: TestBlockChainClient::new(),
|
||||
});
|
||||
|
||||
let proto = LightProtocol::new(
|
||||
Arc::new(TestProvider(provider.clone())),
|
||||
Params {
|
||||
network_id: 2,
|
||||
config: Default::default(),
|
||||
capabilities: capabilities,
|
||||
sample_store: None,
|
||||
},
|
||||
);
|
||||
|
||||
(provider, proto)
|
||||
}
|
||||
|
||||
fn status(chain_info: BlockChainInfo) -> Status {
|
||||
Status {
|
||||
protocol_version: 1,
|
||||
network_id: 2,
|
||||
head_td: chain_info.total_difficulty,
|
||||
head_hash: chain_info.best_block_hash,
|
||||
head_num: chain_info.best_block_number,
|
||||
genesis_hash: chain_info.genesis_hash,
|
||||
last_head: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn handshake_expected() {
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
|
||||
let status = status(provider.client.chain_info());
|
||||
|
||||
let packet_body = write_handshake(&status, &capabilities, &proto);
|
||||
|
||||
proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reserved_handshake_expected() {
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
|
||||
let status = status(provider.client.chain_info());
|
||||
|
||||
let packet_body = write_free_handshake(&status, &capabilities, &proto);
|
||||
|
||||
proto.on_connect(0xff, &Expect::Send(0xff, packet::STATUS, packet_body));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn genesis_mismatch() {
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
|
||||
let mut status = status(provider.client.chain_info());
|
||||
status.genesis_hash = H256::default();
|
||||
|
||||
let packet_body = write_handshake(&status, &capabilities, &proto);
|
||||
|
||||
proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn credit_overflow() {
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
|
||||
let status = status(provider.client.chain_info());
|
||||
|
||||
{
|
||||
let packet_body = write_handshake(&status, &capabilities, &proto);
|
||||
proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body));
|
||||
}
|
||||
|
||||
{
|
||||
let my_status = write_handshake(&status, &capabilities, &proto);
|
||||
proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &my_status);
|
||||
}
|
||||
|
||||
// 1 billion requests is far too many for the default flow params.
|
||||
let requests = encode_single(Request::Headers(IncompleteHeadersRequest {
|
||||
start: HashOrNumber::Number(1).into(),
|
||||
max: 1_000_000_000,
|
||||
skip: 0,
|
||||
reverse: false,
|
||||
}));
|
||||
let request = make_packet(111, &requests);
|
||||
|
||||
proto.handle_packet(&Expect::Punish(1), 1, packet::REQUEST, &request);
|
||||
}
|
||||
|
||||
// test the basic request types -- these just make sure that requests are parsed
|
||||
// and sent to the provider correctly as well as testing response formatting.
|
||||
|
||||
#[test]
|
||||
fn get_block_headers() {
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
let flow_params = proto.flow_params.read().clone();
|
||||
|
||||
let cur_status = status(provider.client.chain_info());
|
||||
let my_status = write_handshake(&cur_status, &capabilities, &proto);
|
||||
|
||||
provider.client.add_blocks(100, EachBlockWith::Nothing);
|
||||
|
||||
let cur_status = status(provider.client.chain_info());
|
||||
|
||||
{
|
||||
let packet_body = write_handshake(&cur_status, &capabilities, &proto);
|
||||
proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body));
|
||||
proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &my_status);
|
||||
}
|
||||
|
||||
let request = Request::Headers(IncompleteHeadersRequest {
|
||||
start: HashOrNumber::Number(1).into(),
|
||||
max: 10,
|
||||
skip: 0,
|
||||
reverse: false,
|
||||
});
|
||||
|
||||
let req_id = 111;
|
||||
|
||||
let requests = encode_single(request.clone());
|
||||
let request_body = make_packet(req_id, &requests);
|
||||
|
||||
let response = {
|
||||
let headers: Vec<_> = (0..10)
|
||||
.map(|i| {
|
||||
provider
|
||||
.client
|
||||
.block_header(BlockId::Number(i + 1))
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(headers.len(), 10);
|
||||
|
||||
let new_creds =
|
||||
*flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap();
|
||||
|
||||
let response = vec![Response::Headers(HeadersResponse { headers })];
|
||||
|
||||
let mut stream = RlpStream::new_list(3);
|
||||
stream
|
||||
.append(&req_id)
|
||||
.append(&new_creds)
|
||||
.append_list(&response);
|
||||
|
||||
stream.out()
|
||||
};
|
||||
|
||||
let expected = Expect::Respond(packet::RESPONSE, response);
|
||||
proto.handle_packet(&expected, 1, packet::REQUEST, &request_body);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_block_bodies() {
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
let flow_params = proto.flow_params.read().clone();
|
||||
|
||||
let cur_status = status(provider.client.chain_info());
|
||||
let my_status = write_handshake(&cur_status, &capabilities, &proto);
|
||||
|
||||
provider.client.add_blocks(100, EachBlockWith::Nothing);
|
||||
|
||||
let cur_status = status(provider.client.chain_info());
|
||||
|
||||
{
|
||||
let packet_body = write_handshake(&cur_status, &capabilities, &proto);
|
||||
proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body));
|
||||
proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &my_status);
|
||||
}
|
||||
|
||||
let mut builder = Builder::default();
|
||||
let mut bodies = Vec::new();
|
||||
|
||||
for i in 0..10 {
|
||||
let hash = provider
|
||||
.client
|
||||
.block_header(BlockId::Number(i))
|
||||
.unwrap()
|
||||
.hash();
|
||||
builder
|
||||
.push(Request::Body(IncompleteBodyRequest { hash: hash.into() }))
|
||||
.unwrap();
|
||||
bodies.push(Response::Body(
|
||||
provider
|
||||
.client
|
||||
.block_body(CompleteBodyRequest { hash: hash })
|
||||
.unwrap(),
|
||||
));
|
||||
}
|
||||
let req_id = 111;
|
||||
let requests = builder.build();
|
||||
let request_body = make_packet(req_id, &requests);
|
||||
|
||||
let response = {
|
||||
let new_creds =
|
||||
*flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap();
|
||||
|
||||
let mut response_stream = RlpStream::new_list(3);
|
||||
response_stream
|
||||
.append(&req_id)
|
||||
.append(&new_creds)
|
||||
.append_list(&bodies);
|
||||
response_stream.out()
|
||||
};
|
||||
|
||||
let expected = Expect::Respond(packet::RESPONSE, response);
|
||||
proto.handle_packet(&expected, 1, packet::REQUEST, &request_body);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_block_receipts() {
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
let flow_params = proto.flow_params.read().clone();
|
||||
|
||||
let cur_status = status(provider.client.chain_info());
|
||||
let my_status = write_handshake(&cur_status, &capabilities, &proto);
|
||||
|
||||
provider.client.add_blocks(1000, EachBlockWith::Nothing);
|
||||
|
||||
let cur_status = status(provider.client.chain_info());
|
||||
|
||||
{
|
||||
let packet_body = write_handshake(&cur_status, &capabilities, &proto);
|
||||
proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body));
|
||||
proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &my_status);
|
||||
}
|
||||
|
||||
// find the first 10 block hashes starting with `f` because receipts are only provided
|
||||
// by the test client in that case.
|
||||
let block_hashes: Vec<H256> = (0..1000)
|
||||
.map(|i| {
|
||||
provider
|
||||
.client
|
||||
.block_header(BlockId::Number(i))
|
||||
.unwrap()
|
||||
.hash()
|
||||
})
|
||||
.filter(|hash| format!("{}", hash).starts_with("0xf"))
|
||||
.take(10)
|
||||
.collect();
|
||||
|
||||
let mut builder = Builder::default();
|
||||
let mut receipts = Vec::new();
|
||||
for hash in block_hashes.iter().cloned() {
|
||||
builder
|
||||
.push(Request::Receipts(IncompleteReceiptsRequest {
|
||||
hash: hash.into(),
|
||||
}))
|
||||
.unwrap();
|
||||
receipts.push(Response::Receipts(
|
||||
provider
|
||||
.client
|
||||
.block_receipts(CompleteReceiptsRequest { hash: hash })
|
||||
.unwrap(),
|
||||
));
|
||||
}
|
||||
|
||||
let req_id = 111;
|
||||
let requests = builder.build();
|
||||
let request_body = make_packet(req_id, &requests);
|
||||
|
||||
let response = {
|
||||
assert_eq!(receipts.len(), 10);
|
||||
|
||||
let new_creds =
|
||||
*flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap();
|
||||
|
||||
let mut response_stream = RlpStream::new_list(3);
|
||||
response_stream
|
||||
.append(&req_id)
|
||||
.append(&new_creds)
|
||||
.append_list(&receipts);
|
||||
response_stream.out()
|
||||
};
|
||||
|
||||
let expected = Expect::Respond(packet::RESPONSE, response);
|
||||
proto.handle_packet(&expected, 1, packet::REQUEST, &request_body);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_state_proofs() {
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
let flow_params = proto.flow_params.read().clone();
|
||||
|
||||
let provider = TestProvider(provider);
|
||||
|
||||
let cur_status = status(provider.0.client.chain_info());
|
||||
|
||||
{
|
||||
let packet_body = write_handshake(&cur_status, &capabilities, &proto);
|
||||
proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone()));
|
||||
proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body);
|
||||
}
|
||||
|
||||
let req_id = 112;
|
||||
let key1: H256 = U256::from(11223344).into();
|
||||
let key2: H256 = U256::from(99988887).into();
|
||||
|
||||
let mut builder = Builder::default();
|
||||
builder
|
||||
.push(Request::Account(IncompleteAccountRequest {
|
||||
block_hash: H256::default().into(),
|
||||
address_hash: key1.into(),
|
||||
}))
|
||||
.unwrap();
|
||||
builder
|
||||
.push(Request::Storage(IncompleteStorageRequest {
|
||||
block_hash: H256::default().into(),
|
||||
address_hash: key1.into(),
|
||||
key_hash: key2.into(),
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let requests = builder.build();
|
||||
|
||||
let request_body = make_packet(req_id, &requests);
|
||||
let response = {
|
||||
let responses = vec![
|
||||
Response::Account(
|
||||
provider
|
||||
.account_proof(CompleteAccountRequest {
|
||||
block_hash: H256::default(),
|
||||
address_hash: key1,
|
||||
})
|
||||
.unwrap(),
|
||||
),
|
||||
Response::Storage(
|
||||
provider
|
||||
.storage_proof(CompleteStorageRequest {
|
||||
block_hash: H256::default(),
|
||||
address_hash: key1,
|
||||
key_hash: key2,
|
||||
})
|
||||
.unwrap(),
|
||||
),
|
||||
];
|
||||
|
||||
let new_creds =
|
||||
*flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap();
|
||||
|
||||
let mut response_stream = RlpStream::new_list(3);
|
||||
response_stream
|
||||
.append(&req_id)
|
||||
.append(&new_creds)
|
||||
.append_list(&responses);
|
||||
response_stream.out()
|
||||
};
|
||||
|
||||
let expected = Expect::Respond(packet::RESPONSE, response);
|
||||
proto.handle_packet(&expected, 1, packet::REQUEST, &request_body);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_contract_code() {
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
let flow_params = proto.flow_params.read().clone();
|
||||
|
||||
let cur_status = status(provider.client.chain_info());
|
||||
|
||||
{
|
||||
let packet_body = write_handshake(&cur_status, &capabilities, &proto);
|
||||
proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone()));
|
||||
proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body);
|
||||
}
|
||||
|
||||
let req_id = 112;
|
||||
let key1: H256 = U256::from(11223344).into();
|
||||
let key2: H256 = U256::from(99988887).into();
|
||||
|
||||
let request = Request::Code(IncompleteCodeRequest {
|
||||
block_hash: key1.into(),
|
||||
code_hash: key2.into(),
|
||||
});
|
||||
|
||||
let requests = encode_single(request.clone());
|
||||
let request_body = make_packet(req_id, &requests);
|
||||
let response = {
|
||||
let response = vec![Response::Code(CodeResponse {
|
||||
code: key1.iter().chain(key2.iter()).cloned().collect(),
|
||||
})];
|
||||
|
||||
let new_creds =
|
||||
*flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap();
|
||||
|
||||
let mut response_stream = RlpStream::new_list(3);
|
||||
|
||||
response_stream
|
||||
.append(&req_id)
|
||||
.append(&new_creds)
|
||||
.append_list(&response);
|
||||
response_stream.out()
|
||||
};
|
||||
|
||||
let expected = Expect::Respond(packet::RESPONSE, response);
|
||||
proto.handle_packet(&expected, 1, packet::REQUEST, &request_body);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn epoch_signal() {
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
let flow_params = proto.flow_params.read().clone();
|
||||
|
||||
let cur_status = status(provider.client.chain_info());
|
||||
|
||||
{
|
||||
let packet_body = write_handshake(&cur_status, &capabilities, &proto);
|
||||
proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone()));
|
||||
proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body);
|
||||
}
|
||||
|
||||
let req_id = 112;
|
||||
let request = Request::Signal(request::IncompleteSignalRequest {
|
||||
block_hash: H256([1; 32]).into(),
|
||||
});
|
||||
|
||||
let requests = encode_single(request.clone());
|
||||
let request_body = make_packet(req_id, &requests);
|
||||
|
||||
let response = {
|
||||
let response = vec![Response::Signal(SignalResponse {
|
||||
signal: vec![1, 2, 3, 4],
|
||||
})];
|
||||
|
||||
let limit = *flow_params.limit();
|
||||
let cost = flow_params.compute_cost_multi(requests.requests()).unwrap();
|
||||
|
||||
let new_creds = limit - cost;
|
||||
|
||||
let mut response_stream = RlpStream::new_list(3);
|
||||
response_stream
|
||||
.append(&req_id)
|
||||
.append(&new_creds)
|
||||
.append_list(&response);
|
||||
|
||||
response_stream.out()
|
||||
};
|
||||
|
||||
let expected = Expect::Respond(packet::RESPONSE, response);
|
||||
proto.handle_packet(&expected, 1, packet::REQUEST, &request_body);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn proof_of_execution() {
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
let flow_params = proto.flow_params.read().clone();
|
||||
|
||||
let cur_status = status(provider.client.chain_info());
|
||||
|
||||
{
|
||||
let packet_body = write_handshake(&cur_status, &capabilities, &proto);
|
||||
proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone()));
|
||||
proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body);
|
||||
}
|
||||
|
||||
let req_id = 112;
|
||||
let mut request = Request::Execution(request::IncompleteExecutionRequest {
|
||||
block_hash: H256::default().into(),
|
||||
from: Address::default(),
|
||||
action: Action::Call(Address::default()),
|
||||
gas: 100.into(),
|
||||
gas_price: 0.into(),
|
||||
value: 0.into(),
|
||||
data: Vec::new(),
|
||||
});
|
||||
|
||||
// first: a valid amount to request execution of.
|
||||
let requests = encode_single(request.clone());
|
||||
let request_body = make_packet(req_id, &requests);
|
||||
|
||||
let response = {
|
||||
let limit = *flow_params.limit();
|
||||
let cost = flow_params.compute_cost_multi(requests.requests()).unwrap();
|
||||
|
||||
let new_creds = limit - cost;
|
||||
|
||||
let mut response_stream = RlpStream::new_list(3);
|
||||
response_stream
|
||||
.append(&req_id)
|
||||
.append(&new_creds)
|
||||
.begin_list(0);
|
||||
|
||||
response_stream.out()
|
||||
};
|
||||
|
||||
let expected = Expect::Respond(packet::RESPONSE, response);
|
||||
proto.handle_packet(&expected, 1, packet::REQUEST, &request_body);
|
||||
|
||||
// next: way too much requested gas.
|
||||
if let Request::Execution(ref mut req) = request {
|
||||
req.gas = 100_000_000.into();
|
||||
}
|
||||
let req_id = 113;
|
||||
let requests = encode_single(request.clone());
|
||||
let request_body = make_packet(req_id, &requests);
|
||||
|
||||
let expected = Expect::Punish(1);
|
||||
proto.handle_packet(&expected, 1, packet::REQUEST, &request_body);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn id_guard() {
|
||||
use super::{request_set::RequestSet, ReqId};
|
||||
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
let flow_params = proto.flow_params.read().clone();
|
||||
|
||||
let req_id_1 = ReqId(5143);
|
||||
let req_id_2 = ReqId(1111);
|
||||
|
||||
let req = encode_single(Request::Headers(IncompleteHeadersRequest {
|
||||
start: HashOrNumber::Number(5u64).into(),
|
||||
max: 100,
|
||||
skip: 0,
|
||||
reverse: false,
|
||||
}));
|
||||
|
||||
let peer_id = 9876;
|
||||
|
||||
let mut pending_requests = RequestSet::default();
|
||||
|
||||
pending_requests.insert(req_id_1, req.clone(), 0.into(), Instant::now());
|
||||
pending_requests.insert(req_id_2, req, 1.into(), Instant::now());
|
||||
|
||||
proto.peers.write().insert(
|
||||
peer_id,
|
||||
::parking_lot::Mutex::new(Peer {
|
||||
local_credits: flow_params.create_credits(),
|
||||
status: status(provider.client.chain_info()),
|
||||
capabilities,
|
||||
remote_flow: Some((flow_params.create_credits(), (&*flow_params).clone())),
|
||||
sent_head: provider.client.chain_info().best_block_hash,
|
||||
last_update: Instant::now(),
|
||||
pending_requests: pending_requests,
|
||||
failed_requests: Vec::new(),
|
||||
propagated_transactions: Default::default(),
|
||||
skip_update: false,
|
||||
local_flow: flow_params,
|
||||
awaiting_acknowledge: None,
|
||||
}),
|
||||
);
|
||||
|
||||
// first, malformed responses.
|
||||
{
|
||||
let mut stream = RlpStream::new_list(3);
|
||||
stream.append(&req_id_1.0);
|
||||
stream.append(&4_000_000_usize);
|
||||
stream.begin_list(2).append(&125_usize).append(&3_usize);
|
||||
|
||||
let packet = stream.out();
|
||||
assert!(proto
|
||||
.response(peer_id, &Expect::Nothing, &Rlp::new(&packet))
|
||||
.is_err());
|
||||
}
|
||||
|
||||
// next, do an unexpected response.
|
||||
{
|
||||
let mut stream = RlpStream::new_list(3);
|
||||
stream.append(&10000_usize);
|
||||
stream.append(&3_000_000_usize);
|
||||
stream.begin_list(0);
|
||||
|
||||
let packet = stream.out();
|
||||
assert!(proto
|
||||
.response(peer_id, &Expect::Nothing, &Rlp::new(&packet))
|
||||
.is_err());
|
||||
}
|
||||
|
||||
// lastly, do a valid (but empty) response.
|
||||
{
|
||||
let mut stream = RlpStream::new_list(3);
|
||||
stream.append(&req_id_2.0);
|
||||
stream.append(&3_000_000_usize);
|
||||
stream.begin_list(0);
|
||||
|
||||
let packet = stream.out();
|
||||
assert!(proto
|
||||
.response(peer_id, &Expect::Nothing, &Rlp::new(&packet))
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
let peers = proto.peers.read();
|
||||
if let Some(ref peer_info) = peers.get(&peer_id) {
|
||||
let peer_info = peer_info.lock();
|
||||
assert!(peer_info
|
||||
.pending_requests
|
||||
.collect_ids::<Vec<_>>()
|
||||
.is_empty());
|
||||
assert_eq!(peer_info.failed_requests, &[req_id_1]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_transaction_index() {
|
||||
let capabilities = capabilities();
|
||||
|
||||
let (provider, proto) = setup(capabilities);
|
||||
let flow_params = proto.flow_params.read().clone();
|
||||
|
||||
let cur_status = status(provider.client.chain_info());
|
||||
|
||||
{
|
||||
let packet_body = write_handshake(&cur_status, &capabilities, &proto);
|
||||
proto.on_connect(1, &Expect::Send(1, packet::STATUS, packet_body.clone()));
|
||||
proto.handle_packet(&Expect::Nothing, 1, packet::STATUS, &packet_body);
|
||||
}
|
||||
|
||||
let req_id = 112;
|
||||
let key1: H256 = U256::from(11223344).into();
|
||||
|
||||
let request =
|
||||
Request::TransactionIndex(IncompleteTransactionIndexRequest { hash: key1.into() });
|
||||
|
||||
let requests = encode_single(request.clone());
|
||||
let request_body = make_packet(req_id, &requests);
|
||||
let response = {
|
||||
let response = vec![Response::TransactionIndex(TransactionIndexResponse {
|
||||
num: 100,
|
||||
hash: key1,
|
||||
index: 55,
|
||||
})];
|
||||
|
||||
let new_creds =
|
||||
*flow_params.limit() - flow_params.compute_cost_multi(requests.requests()).unwrap();
|
||||
|
||||
let mut response_stream = RlpStream::new_list(3);
|
||||
|
||||
response_stream
|
||||
.append(&req_id)
|
||||
.append(&new_creds)
|
||||
.append_list(&response);
|
||||
response_stream.out()
|
||||
};
|
||||
|
||||
let expected = Expect::Respond(packet::RESPONSE, response);
|
||||
proto.handle_packet(&expected, 1, packet::REQUEST, &request_body);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sync_statistics() {
|
||||
let mut stats = Statistics::new();
|
||||
|
||||
// Empty set should return 1.0
|
||||
assert_eq!(stats.avg_peer_count(), 1.0);
|
||||
|
||||
// Average < 1.0 should return 1.0
|
||||
stats.add_peer_count(0);
|
||||
assert_eq!(stats.avg_peer_count(), 1.0);
|
||||
|
||||
stats = Statistics::new();
|
||||
|
||||
const N: f64 = 50.0;
|
||||
|
||||
for i in 1..(N as usize + 1) {
|
||||
stats.add_peer_count(i);
|
||||
}
|
||||
|
||||
// Compute the average for the sum 1..N
|
||||
assert_eq!(stats.avg_peer_count(), N * (N + 1.0) / 2.0 / N);
|
||||
|
||||
for _ in 1..(MOVING_SAMPLE_SIZE + 1) {
|
||||
stats.add_peer_count(40);
|
||||
}
|
||||
|
||||
// Test that it returns the average of the last
|
||||
// `MOVING_SAMPLE_SIZE` values
|
||||
assert_eq!(stats.avg_peer_count(), 40.0);
|
||||
}
|
@ -1,719 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! On-demand chain requests over LES. This is a major building block for RPCs.
|
||||
//! The request service is implemented using Futures. Higher level request handlers
|
||||
//! will take the raw data received here and extract meaningful results from it.
|
||||
|
||||
use std::{cmp, collections::HashMap, marker::PhantomData, sync::Arc, time::Duration};
|
||||
|
||||
use futures::{
|
||||
sync::oneshot::{self, Receiver},
|
||||
Async, Future, Poll,
|
||||
};
|
||||
use network::PeerId;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use rand::{self, Rng};
|
||||
|
||||
use net::{
|
||||
Announcement, BasicContext, Capabilities, EventContext, Handler, PeerStatus, ReqId, Status,
|
||||
};
|
||||
|
||||
use self::request::CheckedRequest;
|
||||
use cache::Cache;
|
||||
use request::{self as basic_request, Request as NetworkRequest};
|
||||
|
||||
pub use self::{
|
||||
request::{Error as ValidityError, HeaderRef, Request, Response},
|
||||
request_guard::{Error as RequestError, RequestGuard},
|
||||
response_guard::{Error as ResponseGuardError, Inner as ResponseGuardInner, ResponseGuard},
|
||||
};
|
||||
pub use ethcore::executed::ExecutionResult;
|
||||
pub use types::request::ResponseError;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub mod request;
|
||||
mod request_guard;
|
||||
mod response_guard;
|
||||
|
||||
/// The initial backoff interval for OnDemand queries
|
||||
pub const DEFAULT_REQUEST_MIN_BACKOFF_DURATION: Duration = Duration::from_secs(10);
|
||||
/// The maximum request interval for OnDemand queries
|
||||
pub const DEFAULT_REQUEST_MAX_BACKOFF_DURATION: Duration = Duration::from_secs(100);
|
||||
/// The default window length a response is evaluated
|
||||
pub const DEFAULT_RESPONSE_TIME_TO_LIVE: Duration = Duration::from_secs(10);
|
||||
/// The default number of maximum backoff iterations
|
||||
pub const DEFAULT_MAX_REQUEST_BACKOFF_ROUNDS: usize = 10;
|
||||
/// The default number failed request to be regarded as failure
|
||||
pub const DEFAULT_NUM_CONSECUTIVE_FAILED_REQUESTS: usize = 1;
|
||||
|
||||
/// OnDemand related errors
|
||||
pub mod error {
|
||||
// Silence: `use of deprecated item 'std::error::Error::cause': replaced by Error::source, which can support downcasting`
|
||||
// https://github.com/paritytech/parity-ethereum/issues/10302
|
||||
#![allow(deprecated)]
|
||||
|
||||
use futures::sync::oneshot::Canceled;
|
||||
|
||||
error_chain! {
|
||||
|
||||
foreign_links {
|
||||
ChannelCanceled(Canceled) #[doc = "Canceled oneshot channel"];
|
||||
}
|
||||
|
||||
errors {
|
||||
#[doc = "Timeout bad response"]
|
||||
BadResponse(err: String) {
|
||||
description("Max response evaluation time exceeded")
|
||||
display("{}", err)
|
||||
}
|
||||
|
||||
#[doc = "OnDemand requests limit exceeded"]
|
||||
RequestLimit {
|
||||
description("OnDemand request maximum backoff iterations exceeded")
|
||||
display("OnDemand request maximum backoff iterations exceeded")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Public interface for performing network requests `OnDemand`
|
||||
pub trait OnDemandRequester: Send + Sync {
|
||||
/// Submit a strongly-typed batch of requests.
|
||||
///
|
||||
/// Fails if back-reference are not coherent.
|
||||
fn request<T>(
|
||||
&self,
|
||||
ctx: &dyn BasicContext,
|
||||
requests: T,
|
||||
) -> Result<OnResponses<T>, basic_request::NoSuchOutput>
|
||||
where
|
||||
T: request::RequestAdapter;
|
||||
|
||||
/// Submit a vector of requests to be processed together.
|
||||
///
|
||||
/// Fails if back-references are not coherent.
|
||||
/// The returned vector of responses will correspond to the requests exactly.
|
||||
fn request_raw(
|
||||
&self,
|
||||
ctx: &dyn BasicContext,
|
||||
requests: Vec<Request>,
|
||||
) -> Result<Receiver<PendingResponse>, basic_request::NoSuchOutput>;
|
||||
}
|
||||
|
||||
// relevant peer info.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
struct Peer {
|
||||
status: Status,
|
||||
capabilities: Capabilities,
|
||||
}
|
||||
|
||||
impl Peer {
|
||||
// whether this peer can fulfill the necessary capabilities for the given
|
||||
// request.
|
||||
fn can_fulfill(&self, request: &Capabilities) -> bool {
|
||||
let local_caps = &self.capabilities;
|
||||
let can_serve_since = |req, local| match (req, local) {
|
||||
(Some(request_block), Some(serve_since)) => request_block >= serve_since,
|
||||
(Some(_), None) => false,
|
||||
(None, _) => true,
|
||||
};
|
||||
|
||||
local_caps.serve_headers >= request.serve_headers
|
||||
&& can_serve_since(request.serve_chain_since, local_caps.serve_chain_since)
|
||||
&& can_serve_since(request.serve_state_since, local_caps.serve_state_since)
|
||||
}
|
||||
}
|
||||
|
||||
/// Either an array of responses or a single error.
|
||||
type PendingResponse = self::error::Result<Vec<Response>>;
|
||||
|
||||
// Attempted request info and sender to put received value.
|
||||
struct Pending {
|
||||
requests: basic_request::Batch<CheckedRequest>,
|
||||
net_requests: basic_request::Batch<NetworkRequest>,
|
||||
required_capabilities: Capabilities,
|
||||
responses: Vec<Response>,
|
||||
sender: oneshot::Sender<PendingResponse>,
|
||||
request_guard: RequestGuard,
|
||||
response_guard: ResponseGuard,
|
||||
}
|
||||
|
||||
impl Pending {
|
||||
// answer as many of the given requests from the supplied cache as possible.
|
||||
// TODO: support re-shuffling.
|
||||
fn answer_from_cache(&mut self, cache: &Mutex<Cache>) {
|
||||
while !self.requests.is_complete() {
|
||||
let idx = self.requests.num_answered();
|
||||
match self.requests[idx].respond_local(cache) {
|
||||
Some(response) => {
|
||||
self.requests.supply_response_unchecked(&response);
|
||||
|
||||
// update header and back-references after each from-cache
|
||||
// response to ensure that the requests are left in a consistent
|
||||
// state and increase the likelihood of being able to answer
|
||||
// the next request from cache.
|
||||
self.update_header_refs(idx, &response);
|
||||
self.fill_unanswered();
|
||||
|
||||
self.responses.push(response);
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update header refs if the given response contains a header future requests require for
|
||||
// verification.
|
||||
// `idx` is the index of the request the response corresponds to.
|
||||
fn update_header_refs(&mut self, idx: usize, response: &Response) {
|
||||
if let Response::HeaderByHash(ref hdr) = *response {
|
||||
// fill the header for all requests waiting on this one.
|
||||
// TODO: could be faster if we stored a map usize => Vec<usize>
|
||||
// but typical use just has one header request that others
|
||||
// depend on.
|
||||
for r in self.requests.iter_mut().skip(idx + 1) {
|
||||
if r.needs_header().map_or(false, |(i, _)| i == idx) {
|
||||
r.provide_header(hdr.clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// supply a response.
|
||||
fn supply_response(
|
||||
&mut self,
|
||||
cache: &Mutex<Cache>,
|
||||
response: &basic_request::Response,
|
||||
) -> Result<(), basic_request::ResponseError<self::request::Error>> {
|
||||
match self.requests.supply_response(&cache, response) {
|
||||
Ok(response) => {
|
||||
let idx = self.responses.len();
|
||||
self.update_header_refs(idx, &response);
|
||||
self.responses.push(response);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
// if the requests are complete, send the result and consume self.
|
||||
fn try_complete(self) -> Option<Self> {
|
||||
if self.requests.is_complete() {
|
||||
if self.sender.send(Ok(self.responses)).is_err() {
|
||||
debug!(target: "on_demand", "Dropped oneshot channel receiver on request");
|
||||
}
|
||||
None
|
||||
} else {
|
||||
Some(self)
|
||||
}
|
||||
}
|
||||
|
||||
fn fill_unanswered(&mut self) {
|
||||
self.requests.fill_unanswered();
|
||||
}
|
||||
|
||||
// update the cached network requests.
|
||||
fn update_net_requests(&mut self) {
|
||||
use request::IncompleteRequest;
|
||||
|
||||
let mut builder = basic_request::Builder::default();
|
||||
let num_answered = self.requests.num_answered();
|
||||
let mut mapping = move |idx| idx - num_answered;
|
||||
|
||||
for request in self.requests.iter().skip(num_answered) {
|
||||
let mut net_req = request.clone().into_net_request();
|
||||
|
||||
// all back-references with request index less than `num_answered` have
|
||||
// been filled by now. all remaining requests point to nothing earlier
|
||||
// than the next unanswered request.
|
||||
net_req.adjust_refs(&mut mapping);
|
||||
builder
|
||||
.push(net_req)
|
||||
.expect("all back-references to answered requests have been filled; qed");
|
||||
}
|
||||
|
||||
// update pending fields.
|
||||
let capabilities = guess_capabilities(&self.requests[num_answered..]);
|
||||
self.net_requests = builder.build();
|
||||
self.required_capabilities = capabilities;
|
||||
}
|
||||
|
||||
// received too many empty responses, may be away to indicate a faulty request
|
||||
fn bad_response(self, response_err: ResponseGuardError) {
|
||||
let reqs: Vec<&str> = self
|
||||
.requests
|
||||
.requests()
|
||||
.iter()
|
||||
.map(|req| match req {
|
||||
CheckedRequest::HeaderProof(_, _) => "HeaderProof",
|
||||
CheckedRequest::HeaderByHash(_, _) => "HeaderByHash",
|
||||
CheckedRequest::HeaderWithAncestors(_, _) => "HeaderWithAncestors",
|
||||
CheckedRequest::TransactionIndex(_, _) => "TransactionIndex",
|
||||
CheckedRequest::Receipts(_, _) => "Receipts",
|
||||
CheckedRequest::Body(_, _) => "Body",
|
||||
CheckedRequest::Account(_, _) => "Account",
|
||||
CheckedRequest::Code(_, _) => "Code",
|
||||
CheckedRequest::Execution(_, _) => "Execution",
|
||||
CheckedRequest::Signal(_, _) => "Signal",
|
||||
})
|
||||
.collect();
|
||||
|
||||
let err = format!(
|
||||
"Bad response on {}: [ {} ]. {}",
|
||||
if reqs.len() > 1 {
|
||||
"requests"
|
||||
} else {
|
||||
"request"
|
||||
},
|
||||
reqs.join(", "),
|
||||
response_err
|
||||
);
|
||||
|
||||
let err = self::error::ErrorKind::BadResponse(err);
|
||||
if self.sender.send(Err(err.into())).is_err() {
|
||||
debug!(target: "on_demand", "Dropped oneshot channel receiver on no response");
|
||||
}
|
||||
}
|
||||
|
||||
// returning a peer discovery timeout during query attempts
|
||||
fn request_limit_reached(self) {
|
||||
let err = self::error::ErrorKind::RequestLimit;
|
||||
if self.sender.send(Err(err.into())).is_err() {
|
||||
debug!(target: "on_demand", "Dropped oneshot channel receiver on time out");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// helper to guess capabilities required for a given batch of network requests.
|
||||
fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities {
|
||||
let mut caps = Capabilities {
|
||||
serve_headers: false,
|
||||
serve_chain_since: None,
|
||||
serve_state_since: None,
|
||||
tx_relay: false,
|
||||
};
|
||||
|
||||
let update_since = |current: &mut Option<u64>, new| {
|
||||
*current = match *current {
|
||||
Some(x) => Some(::std::cmp::min(x, new)),
|
||||
None => Some(new),
|
||||
}
|
||||
};
|
||||
|
||||
for request in requests {
|
||||
match *request {
|
||||
// TODO: might be worth returning a required block number for this also.
|
||||
CheckedRequest::HeaderProof(_, _) => caps.serve_headers = true,
|
||||
CheckedRequest::HeaderByHash(_, _) => caps.serve_headers = true,
|
||||
CheckedRequest::HeaderWithAncestors(_, _) => caps.serve_headers = true,
|
||||
CheckedRequest::TransactionIndex(_, _) => {} // hashes yield no info.
|
||||
CheckedRequest::Signal(_, _) => caps.serve_headers = true,
|
||||
CheckedRequest::Body(ref req, _) => {
|
||||
if let Ok(ref hdr) = req.0.as_ref() {
|
||||
update_since(&mut caps.serve_chain_since, hdr.number());
|
||||
}
|
||||
}
|
||||
CheckedRequest::Receipts(ref req, _) => {
|
||||
if let Ok(ref hdr) = req.0.as_ref() {
|
||||
update_since(&mut caps.serve_chain_since, hdr.number());
|
||||
}
|
||||
}
|
||||
CheckedRequest::Account(ref req, _) => {
|
||||
if let Ok(ref hdr) = req.header.as_ref() {
|
||||
update_since(&mut caps.serve_state_since, hdr.number());
|
||||
}
|
||||
}
|
||||
CheckedRequest::Code(ref req, _) => {
|
||||
if let Ok(ref hdr) = req.header.as_ref() {
|
||||
update_since(&mut caps.serve_state_since, hdr.number());
|
||||
}
|
||||
}
|
||||
CheckedRequest::Execution(ref req, _) => {
|
||||
if let Ok(ref hdr) = req.header.as_ref() {
|
||||
update_since(&mut caps.serve_state_since, hdr.number());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
caps
|
||||
}
|
||||
|
||||
/// A future extracting the concrete output type of the generic adapter
|
||||
/// from a vector of responses.
|
||||
pub struct OnResponses<T: request::RequestAdapter> {
|
||||
receiver: Receiver<PendingResponse>,
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: request::RequestAdapter> Future for OnResponses<T> {
|
||||
type Item = T::Out;
|
||||
type Error = self::error::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
match self.receiver.poll() {
|
||||
Ok(Async::Ready(Ok(v))) => Ok(Async::Ready(T::extract_from(v))),
|
||||
Ok(Async::Ready(Err(e))) => Err(e),
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// On demand request service. See module docs for more details.
|
||||
/// Accumulates info about all peers' capabilities and dispatches
|
||||
/// requests to them accordingly.
|
||||
// lock in declaration order.
|
||||
pub struct OnDemand {
|
||||
pending: RwLock<Vec<Pending>>,
|
||||
peers: RwLock<HashMap<PeerId, Peer>>,
|
||||
in_transit: RwLock<HashMap<ReqId, Pending>>,
|
||||
cache: Arc<Mutex<Cache>>,
|
||||
no_immediate_dispatch: bool,
|
||||
response_time_window: Duration,
|
||||
request_backoff_start: Duration,
|
||||
request_backoff_max: Duration,
|
||||
request_backoff_rounds_max: usize,
|
||||
request_number_of_consecutive_errors: usize,
|
||||
}
|
||||
|
||||
impl OnDemandRequester for OnDemand {
|
||||
fn request_raw(
|
||||
&self,
|
||||
ctx: &dyn BasicContext,
|
||||
requests: Vec<Request>,
|
||||
) -> Result<Receiver<PendingResponse>, basic_request::NoSuchOutput> {
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
if requests.is_empty() {
|
||||
assert!(
|
||||
sender.send(Ok(Vec::new())).is_ok(),
|
||||
"receiver still in scope; qed"
|
||||
);
|
||||
return Ok(receiver);
|
||||
}
|
||||
|
||||
let mut builder = basic_request::Builder::default();
|
||||
|
||||
let responses = Vec::with_capacity(requests.len());
|
||||
|
||||
let mut header_producers = HashMap::new();
|
||||
for (i, request) in requests.into_iter().enumerate() {
|
||||
let request = CheckedRequest::from(request);
|
||||
|
||||
// ensure that all requests needing headers will get them.
|
||||
if let Some((idx, field)) = request.needs_header() {
|
||||
// a request chain with a header back-reference is valid only if it both
|
||||
// points to a request that returns a header and has the same back-reference
|
||||
// for the block hash.
|
||||
match header_producers.get(&idx) {
|
||||
Some(ref f) if &field == *f => {}
|
||||
_ => return Err(basic_request::NoSuchOutput),
|
||||
}
|
||||
}
|
||||
if let CheckedRequest::HeaderByHash(ref req, _) = request {
|
||||
header_producers.insert(i, req.0);
|
||||
}
|
||||
|
||||
builder.push(request)?;
|
||||
}
|
||||
|
||||
let requests = builder.build();
|
||||
let net_requests = requests.clone().map_requests(|req| req.into_net_request());
|
||||
let capabilities = guess_capabilities(requests.requests());
|
||||
|
||||
self.submit_pending(
|
||||
ctx,
|
||||
Pending {
|
||||
requests,
|
||||
net_requests,
|
||||
required_capabilities: capabilities,
|
||||
responses,
|
||||
sender,
|
||||
request_guard: RequestGuard::new(
|
||||
self.request_number_of_consecutive_errors as u32,
|
||||
self.request_backoff_rounds_max,
|
||||
self.request_backoff_start,
|
||||
self.request_backoff_max,
|
||||
),
|
||||
response_guard: ResponseGuard::new(self.response_time_window),
|
||||
},
|
||||
);
|
||||
|
||||
Ok(receiver)
|
||||
}
|
||||
|
||||
fn request<T>(
|
||||
&self,
|
||||
ctx: &dyn BasicContext,
|
||||
requests: T,
|
||||
) -> Result<OnResponses<T>, basic_request::NoSuchOutput>
|
||||
where
|
||||
T: request::RequestAdapter,
|
||||
{
|
||||
self.request_raw(ctx, requests.make_requests())
|
||||
.map(|recv| OnResponses {
|
||||
receiver: recv,
|
||||
_marker: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl OnDemand {
|
||||
/// Create a new `OnDemand` service with the given cache.
|
||||
pub fn new(
|
||||
cache: Arc<Mutex<Cache>>,
|
||||
response_time_window: Duration,
|
||||
request_backoff_start: Duration,
|
||||
request_backoff_max: Duration,
|
||||
request_backoff_rounds_max: usize,
|
||||
request_number_of_consecutive_errors: usize,
|
||||
) -> Self {
|
||||
Self {
|
||||
pending: RwLock::new(Vec::new()),
|
||||
peers: RwLock::new(HashMap::new()),
|
||||
in_transit: RwLock::new(HashMap::new()),
|
||||
cache,
|
||||
no_immediate_dispatch: false,
|
||||
response_time_window: Self::sanitize_circuit_breaker_input(
|
||||
response_time_window,
|
||||
"Response time window",
|
||||
),
|
||||
request_backoff_start: Self::sanitize_circuit_breaker_input(
|
||||
request_backoff_start,
|
||||
"Request initial backoff time window",
|
||||
),
|
||||
request_backoff_max: Self::sanitize_circuit_breaker_input(
|
||||
request_backoff_max,
|
||||
"Request maximum backoff time window",
|
||||
),
|
||||
request_backoff_rounds_max,
|
||||
request_number_of_consecutive_errors,
|
||||
}
|
||||
}
|
||||
|
||||
fn sanitize_circuit_breaker_input(dur: Duration, name: &'static str) -> Duration {
|
||||
if dur.as_secs() < 1 {
|
||||
warn!(target: "on_demand",
|
||||
"{} is too short must be at least 1 second, configuring it to 1 second", name);
|
||||
Duration::from_secs(1)
|
||||
} else {
|
||||
dur
|
||||
}
|
||||
}
|
||||
|
||||
// make a test version: this doesn't dispatch pending requests
|
||||
// until you trigger it manually.
|
||||
#[cfg(test)]
|
||||
fn new_test(
|
||||
cache: Arc<Mutex<Cache>>,
|
||||
request_ttl: Duration,
|
||||
request_backoff_start: Duration,
|
||||
request_backoff_max: Duration,
|
||||
request_backoff_rounds_max: usize,
|
||||
request_number_of_consecutive_errors: usize,
|
||||
) -> Self {
|
||||
let mut me = OnDemand::new(
|
||||
cache,
|
||||
request_ttl,
|
||||
request_backoff_start,
|
||||
request_backoff_max,
|
||||
request_backoff_rounds_max,
|
||||
request_number_of_consecutive_errors,
|
||||
);
|
||||
me.no_immediate_dispatch = true;
|
||||
|
||||
me
|
||||
}
|
||||
|
||||
// maybe dispatch pending requests.
|
||||
// sometimes
|
||||
fn attempt_dispatch(&self, ctx: &dyn BasicContext) {
|
||||
if !self.no_immediate_dispatch {
|
||||
self.dispatch_pending(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// dispatch pending requests, and discard those for which the corresponding
|
||||
// receiver has been dropped.
|
||||
fn dispatch_pending(&self, ctx: &dyn BasicContext) {
|
||||
if self.pending.read().is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut pending = self.pending.write();
|
||||
|
||||
// iterate over all pending requests, and check them for hang-up.
|
||||
// then, try and find a peer who can serve it.
|
||||
let peers = self.peers.read();
|
||||
|
||||
*pending = ::std::mem::replace(&mut *pending, Vec::new())
|
||||
.into_iter()
|
||||
.filter(|pending| !pending.sender.is_canceled())
|
||||
.filter_map(|mut pending| {
|
||||
|
||||
let num_peers = peers.len();
|
||||
// The first peer to dispatch the request is chosen at random
|
||||
let rand = rand::thread_rng().gen_range(0, cmp::max(1, num_peers));
|
||||
|
||||
for (peer_id, peer) in peers
|
||||
.iter()
|
||||
.cycle()
|
||||
.skip(rand)
|
||||
.take(num_peers)
|
||||
{
|
||||
|
||||
if !peer.can_fulfill(&pending.required_capabilities) {
|
||||
trace!(target: "on_demand", "Peer {} without required capabilities, skipping", peer_id);
|
||||
continue
|
||||
}
|
||||
|
||||
if pending.request_guard.is_call_permitted() {
|
||||
if let Ok(req_id) = ctx.request_from(*peer_id, pending.net_requests.clone()) {
|
||||
self.in_transit.write().insert(req_id, pending);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Register that the request round failed
|
||||
if let RequestError::ReachedLimit = pending.request_guard.register_error() {
|
||||
pending.request_limit_reached();
|
||||
None
|
||||
} else {
|
||||
Some(pending)
|
||||
}
|
||||
})
|
||||
.collect(); // `pending` now contains all requests we couldn't dispatch
|
||||
|
||||
trace!(target: "on_demand", "Was unable to dispatch {} requests.", pending.len());
|
||||
}
|
||||
|
||||
// submit a pending request set. attempts to answer from cache before
|
||||
// going to the network. if complete, sends response and consumes the struct.
|
||||
fn submit_pending(&self, ctx: &dyn BasicContext, mut pending: Pending) {
|
||||
// answer as many requests from cache as we can, and schedule for dispatch
|
||||
// if incomplete.
|
||||
|
||||
pending.answer_from_cache(&*self.cache);
|
||||
if let Some(mut pending) = pending.try_complete() {
|
||||
// update cached requests
|
||||
pending.update_net_requests();
|
||||
// push into `pending` buffer
|
||||
self.pending.write().push(pending);
|
||||
// try to dispatch
|
||||
self.attempt_dispatch(ctx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Handler for OnDemand {
|
||||
fn on_connect(
|
||||
&self,
|
||||
ctx: &dyn EventContext,
|
||||
status: &Status,
|
||||
capabilities: &Capabilities,
|
||||
) -> PeerStatus {
|
||||
self.peers.write().insert(
|
||||
ctx.peer(),
|
||||
Peer {
|
||||
status: status.clone(),
|
||||
capabilities: *capabilities,
|
||||
},
|
||||
);
|
||||
self.attempt_dispatch(ctx.as_basic());
|
||||
PeerStatus::Kept
|
||||
}
|
||||
|
||||
fn on_disconnect(&self, ctx: &dyn EventContext, unfulfilled: &[ReqId]) {
|
||||
self.peers.write().remove(&ctx.peer());
|
||||
let ctx = ctx.as_basic();
|
||||
|
||||
{
|
||||
let mut pending = self.pending.write();
|
||||
for unfulfilled in unfulfilled {
|
||||
if let Some(unfulfilled) = self.in_transit.write().remove(unfulfilled) {
|
||||
trace!(target: "on_demand", "Attempting to reassign dropped request");
|
||||
pending.push(unfulfilled);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.attempt_dispatch(ctx);
|
||||
}
|
||||
|
||||
fn on_announcement(&self, ctx: &dyn EventContext, announcement: &Announcement) {
|
||||
{
|
||||
let mut peers = self.peers.write();
|
||||
if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) {
|
||||
peer.status.update_from(&announcement);
|
||||
peer.capabilities.update_from(&announcement);
|
||||
}
|
||||
}
|
||||
|
||||
self.attempt_dispatch(ctx.as_basic());
|
||||
}
|
||||
|
||||
fn on_responses(
|
||||
&self,
|
||||
ctx: &dyn EventContext,
|
||||
req_id: ReqId,
|
||||
responses: &[basic_request::Response],
|
||||
) {
|
||||
let mut pending = match self.in_transit.write().remove(&req_id) {
|
||||
Some(req) => req,
|
||||
None => return,
|
||||
};
|
||||
|
||||
if responses.is_empty() {
|
||||
// Max number of `bad` responses reached, drop the request
|
||||
if let Err(e) = pending
|
||||
.response_guard
|
||||
.register_error(&ResponseError::Validity(ValidityError::Empty))
|
||||
{
|
||||
pending.bad_response(e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// for each incoming response
|
||||
// 1. ensure verification data filled.
|
||||
// 2. pending.requests.supply_response
|
||||
// 3. if extracted on-demand response, keep it for later.
|
||||
for response in responses {
|
||||
if let Err(e) = pending.supply_response(&*self.cache, response) {
|
||||
let peer = ctx.peer();
|
||||
debug!(target: "on_demand", "Peer {} gave bad response: {:?}", peer, e);
|
||||
ctx.disable_peer(peer);
|
||||
|
||||
// Max number of `bad` responses reached, drop the request
|
||||
if let Err(err) = pending.response_guard.register_error(&e) {
|
||||
pending.bad_response(err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pending.fill_unanswered();
|
||||
self.submit_pending(ctx.as_basic(), pending);
|
||||
}
|
||||
|
||||
fn tick(&self, ctx: &dyn BasicContext) {
|
||||
self.attempt_dispatch(ctx)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,133 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use failsafe;
|
||||
use std::time::Duration;
|
||||
|
||||
type RequestPolicy = failsafe::failure_policy::ConsecutiveFailures<failsafe::backoff::Exponential>;
|
||||
|
||||
/// Error wrapped on-top of `FailsafeError`
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
/// The call is let through
|
||||
LetThrough,
|
||||
/// The call rejected by the guard
|
||||
Rejected,
|
||||
/// The request reached the maximum of backoff iterations
|
||||
ReachedLimit,
|
||||
}
|
||||
|
||||
/// Handle and register requests that can fail
|
||||
#[derive(Debug)]
|
||||
pub struct RequestGuard {
|
||||
backoff_round: usize,
|
||||
max_backoff_rounds: usize,
|
||||
state: failsafe::StateMachine<RequestPolicy, ()>,
|
||||
}
|
||||
|
||||
impl RequestGuard {
|
||||
/// Constructor
|
||||
pub fn new(
|
||||
consecutive_failures: u32,
|
||||
max_backoff_rounds: usize,
|
||||
start_backoff: Duration,
|
||||
max_backoff: Duration,
|
||||
) -> Self {
|
||||
let backoff = failsafe::backoff::exponential(start_backoff, max_backoff);
|
||||
// success_rate not used because only errors are registered
|
||||
let policy =
|
||||
failsafe::failure_policy::consecutive_failures(consecutive_failures as u32, backoff);
|
||||
|
||||
Self {
|
||||
backoff_round: 0,
|
||||
max_backoff_rounds,
|
||||
state: failsafe::StateMachine::new(policy, ()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the state after a `faulty` call
|
||||
pub fn register_error(&mut self) -> Error {
|
||||
trace!(target: "circuit_breaker", "RequestGuard; backoff_round: {}/{}, state {:?}",
|
||||
self.backoff_round, self.max_backoff_rounds, self.state);
|
||||
|
||||
if self.backoff_round >= self.max_backoff_rounds {
|
||||
Error::ReachedLimit
|
||||
} else if self.state.is_call_permitted() {
|
||||
self.state.on_error();
|
||||
if self.state.is_call_permitted() {
|
||||
Error::LetThrough
|
||||
} else {
|
||||
self.backoff_round += 1;
|
||||
Error::Rejected
|
||||
}
|
||||
} else {
|
||||
Error::Rejected
|
||||
}
|
||||
}
|
||||
|
||||
/// Poll the circuit breaker, to check if the call is permitted
|
||||
pub fn is_call_permitted(&self) -> bool {
|
||||
self.state.is_call_permitted()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::{iter, time::Instant};
|
||||
|
||||
#[test]
|
||||
fn one_consecutive_failure_with_10_backoffs() {
|
||||
// 1, 2, 4, 5, 5 .... 5
|
||||
let binary_exp_backoff = vec![1_u64, 2, 4]
|
||||
.into_iter()
|
||||
.chain(iter::repeat(5_u64).take(7));
|
||||
let mut guard = RequestGuard::new(1, 10, Duration::from_secs(1), Duration::from_secs(5));
|
||||
for backoff in binary_exp_backoff {
|
||||
assert_eq!(guard.register_error(), Error::Rejected);
|
||||
let now = Instant::now();
|
||||
while now.elapsed() <= Duration::from_secs(backoff) {}
|
||||
}
|
||||
assert_eq!(
|
||||
guard.register_error(),
|
||||
Error::ReachedLimit,
|
||||
"10 backoffs should be error"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn five_consecutive_failures_with_3_backoffs() {
|
||||
let mut guard = RequestGuard::new(5, 3, Duration::from_secs(1), Duration::from_secs(30));
|
||||
|
||||
// register five errors
|
||||
for _ in 0..4 {
|
||||
assert_eq!(guard.register_error(), Error::LetThrough);
|
||||
}
|
||||
|
||||
let binary_exp_backoff = [1, 2, 4];
|
||||
for backoff in &binary_exp_backoff {
|
||||
assert_eq!(guard.register_error(), Error::Rejected);
|
||||
let now = Instant::now();
|
||||
while now.elapsed() <= Duration::from_secs(*backoff) {}
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
guard.register_error(),
|
||||
Error::ReachedLimit,
|
||||
"3 backoffs should be an error"
|
||||
);
|
||||
}
|
||||
}
|
@ -1,199 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! ResponseGuard implementation.
|
||||
//! It is responsible for the receiving end of `Pending Request` (see `OnDemand` module docs for more information)
|
||||
//! The major functionality is the following:
|
||||
//! 1) Register non-successful responses which will reported back if it fails
|
||||
//! 2) A timeout mechanism that will wait for successful response at most t seconds
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fmt,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use super::{ResponseError, ValidityError};
|
||||
|
||||
/// Response guard error type
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub enum Error {
|
||||
/// No majority, the error reason can't be determined
|
||||
NoMajority(usize),
|
||||
/// Majority, with the error reason
|
||||
Majority(Inner, usize, usize),
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Error::Majority(err, majority, total) => write!(
|
||||
f,
|
||||
"Error cause was {:?}, (majority count: {} / total: {})",
|
||||
err, majority, total
|
||||
),
|
||||
Error::NoMajority(total) => write!(
|
||||
f,
|
||||
"Error cause couldn't be determined, the total number of responses was {}",
|
||||
total
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Dummy type to convert a generic type with no trait bounds
|
||||
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
|
||||
pub enum Inner {
|
||||
/// Bad execution proof
|
||||
BadProof,
|
||||
/// RLP decoding
|
||||
Decoder,
|
||||
/// Empty response
|
||||
EmptyResponse,
|
||||
/// Wrong header sequence
|
||||
HeaderByNumber,
|
||||
/// Too few results
|
||||
TooFewResults,
|
||||
/// Too many results
|
||||
TooManyResults,
|
||||
/// Trie error
|
||||
Trie,
|
||||
/// Unresolved header
|
||||
UnresolvedHeader,
|
||||
/// No responses expected.
|
||||
Unexpected,
|
||||
/// Wrong hash
|
||||
WrongHash,
|
||||
/// Wrong Header sequence
|
||||
WrongHeaderSequence,
|
||||
/// Wrong response kind
|
||||
WrongKind,
|
||||
/// Wrong number
|
||||
WrongNumber,
|
||||
/// Wrong Trie Root
|
||||
WrongTrieRoot,
|
||||
}
|
||||
|
||||
/// Handle and register responses that can fail
|
||||
#[derive(Debug)]
|
||||
pub struct ResponseGuard {
|
||||
request_start: Instant,
|
||||
time_to_live: Duration,
|
||||
responses: HashMap<Inner, usize>,
|
||||
number_responses: usize,
|
||||
}
|
||||
|
||||
impl ResponseGuard {
|
||||
/// Constructor
|
||||
pub fn new(time_to_live: Duration) -> Self {
|
||||
Self {
|
||||
request_start: Instant::now(),
|
||||
time_to_live,
|
||||
responses: HashMap::new(),
|
||||
number_responses: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn into_reason(&self, err: &ResponseError<super::request::Error>) -> Inner {
|
||||
match err {
|
||||
ResponseError::Unexpected => Inner::Unexpected,
|
||||
ResponseError::Validity(ValidityError::BadProof) => Inner::BadProof,
|
||||
ResponseError::Validity(ValidityError::Decoder(_)) => Inner::Decoder,
|
||||
ResponseError::Validity(ValidityError::Empty) => Inner::EmptyResponse,
|
||||
ResponseError::Validity(ValidityError::HeaderByNumber) => Inner::HeaderByNumber,
|
||||
ResponseError::Validity(ValidityError::TooFewResults(_, _)) => Inner::TooFewResults,
|
||||
ResponseError::Validity(ValidityError::TooManyResults(_, _)) => Inner::TooManyResults,
|
||||
ResponseError::Validity(ValidityError::Trie(_)) => Inner::Trie,
|
||||
ResponseError::Validity(ValidityError::UnresolvedHeader(_)) => Inner::UnresolvedHeader,
|
||||
ResponseError::Validity(ValidityError::WrongHash(_, _)) => Inner::WrongHash,
|
||||
ResponseError::Validity(ValidityError::WrongHeaderSequence) => {
|
||||
Inner::WrongHeaderSequence
|
||||
}
|
||||
ResponseError::Validity(ValidityError::WrongKind) => Inner::WrongKind,
|
||||
ResponseError::Validity(ValidityError::WrongNumber(_, _)) => Inner::WrongNumber,
|
||||
ResponseError::Validity(ValidityError::WrongTrieRoot(_, _)) => Inner::WrongTrieRoot,
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the state after a `faulty` call
|
||||
pub fn register_error(
|
||||
&mut self,
|
||||
err: &ResponseError<super::request::Error>,
|
||||
) -> Result<(), Error> {
|
||||
let err = self.into_reason(err);
|
||||
*self.responses.entry(err).or_insert(0) += 1;
|
||||
self.number_responses = self.number_responses.saturating_add(1);
|
||||
trace!(target: "circuit_breaker", "ResponseGuard: {:?}", self.responses);
|
||||
// The request has exceeded its timeout
|
||||
if self.request_start.elapsed() >= self.time_to_live {
|
||||
let (&err, &max_count) = self
|
||||
.responses
|
||||
.iter()
|
||||
.max_by_key(|(_k, v)| *v)
|
||||
.expect("got at least one element; qed");
|
||||
let majority = self.responses.values().filter(|v| **v == max_count).count() == 1;
|
||||
if majority {
|
||||
Err(Error::Majority(err, max_count, self.number_responses))
|
||||
} else {
|
||||
Err(Error::NoMajority(self.number_responses))
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::thread;
|
||||
|
||||
#[test]
|
||||
fn test_basic_by_majority() {
|
||||
let mut guard = ResponseGuard::new(Duration::from_secs(5));
|
||||
guard
|
||||
.register_error(&ResponseError::Validity(ValidityError::Empty))
|
||||
.unwrap();
|
||||
guard.register_error(&ResponseError::Unexpected).unwrap();
|
||||
guard.register_error(&ResponseError::Unexpected).unwrap();
|
||||
guard.register_error(&ResponseError::Unexpected).unwrap();
|
||||
thread::sleep(Duration::from_secs(5));
|
||||
|
||||
assert_eq!(
|
||||
guard.register_error(&ResponseError::Validity(ValidityError::WrongKind)),
|
||||
Err(Error::Majority(Inner::Unexpected, 3, 5))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_majority() {
|
||||
let mut guard = ResponseGuard::new(Duration::from_secs(5));
|
||||
guard
|
||||
.register_error(&ResponseError::Validity(ValidityError::Empty))
|
||||
.unwrap();
|
||||
guard
|
||||
.register_error(&ResponseError::Validity(ValidityError::Empty))
|
||||
.unwrap();
|
||||
guard.register_error(&ResponseError::Unexpected).unwrap();
|
||||
guard.register_error(&ResponseError::Unexpected).unwrap();
|
||||
thread::sleep(Duration::from_secs(5));
|
||||
|
||||
assert_eq!(
|
||||
guard.register_error(&ResponseError::Validity(ValidityError::WrongKind)),
|
||||
Err(Error::NoMajority(5))
|
||||
);
|
||||
}
|
||||
}
|
@ -1,716 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Tests for the on-demand service.
|
||||
|
||||
use cache::Cache;
|
||||
use common_types::header::Header;
|
||||
use ethereum_types::H256;
|
||||
use futures::Future;
|
||||
use net::*;
|
||||
use network::{NodeId, PeerId};
|
||||
use parking_lot::Mutex;
|
||||
use request::{self as basic_request, Response};
|
||||
|
||||
use std::{
|
||||
sync::Arc,
|
||||
thread,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use super::{request, HeaderRef, OnDemand, OnDemandRequester, Peer};
|
||||
|
||||
// useful contexts to give the service.
|
||||
enum Context {
|
||||
NoOp,
|
||||
WithPeer(PeerId),
|
||||
RequestFrom(PeerId, ReqId),
|
||||
Punish(PeerId),
|
||||
FaultyRequest,
|
||||
}
|
||||
|
||||
impl EventContext for Context {
|
||||
fn peer(&self) -> PeerId {
|
||||
match *self {
|
||||
Context::WithPeer(id) | Context::RequestFrom(id, _) | Context::Punish(id) => id,
|
||||
Context::FaultyRequest => 0,
|
||||
_ => panic!("didn't expect to have peer queried."),
|
||||
}
|
||||
}
|
||||
|
||||
fn as_basic(&self) -> &dyn BasicContext {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl BasicContext for Context {
|
||||
/// Returns the relevant's peer persistent Id (aka NodeId).
|
||||
fn persistent_peer_id(&self, _: PeerId) -> Option<NodeId> {
|
||||
panic!("didn't expect to provide persistent ID")
|
||||
}
|
||||
|
||||
fn request_from(&self, peer_id: PeerId, _: ::request::NetworkRequests) -> Result<ReqId, Error> {
|
||||
match *self {
|
||||
Context::RequestFrom(id, req_id) => {
|
||||
if peer_id == id {
|
||||
Ok(req_id)
|
||||
} else {
|
||||
Err(Error::NoCredits)
|
||||
}
|
||||
}
|
||||
Context::FaultyRequest => Err(Error::NoCredits),
|
||||
_ => panic!("didn't expect to have requests dispatched."),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_announcement(&self, _: Announcement) {
|
||||
panic!("didn't expect to make announcement")
|
||||
}
|
||||
|
||||
fn disconnect_peer(&self, id: PeerId) {
|
||||
self.disable_peer(id)
|
||||
}
|
||||
|
||||
fn disable_peer(&self, peer_id: PeerId) {
|
||||
match *self {
|
||||
Context::Punish(id) if id == peer_id => {}
|
||||
_ => panic!("Unexpectedly punished peer."),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// test harness.
|
||||
struct Harness {
|
||||
service: OnDemand,
|
||||
}
|
||||
|
||||
impl Harness {
|
||||
fn create() -> Self {
|
||||
let cache = Arc::new(Mutex::new(Cache::new(
|
||||
Default::default(),
|
||||
Duration::from_secs(60),
|
||||
)));
|
||||
Harness {
|
||||
service: OnDemand::new_test(
|
||||
cache,
|
||||
// Response `time_to_live`
|
||||
Duration::from_secs(5),
|
||||
// Request start backoff
|
||||
Duration::from_secs(1),
|
||||
// Request max backoff
|
||||
Duration::from_secs(20),
|
||||
super::DEFAULT_MAX_REQUEST_BACKOFF_ROUNDS,
|
||||
super::DEFAULT_NUM_CONSECUTIVE_FAILED_REQUESTS,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_peer(&self, id: PeerId, peer: Peer) {
|
||||
self.service.peers.write().insert(id, peer);
|
||||
}
|
||||
}
|
||||
|
||||
fn dummy_status() -> Status {
|
||||
Status {
|
||||
protocol_version: 1,
|
||||
network_id: 999,
|
||||
head_td: 1.into(),
|
||||
head_hash: H256::default(),
|
||||
head_num: 1359,
|
||||
genesis_hash: H256::default(),
|
||||
last_head: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn dummy_capabilities() -> Capabilities {
|
||||
Capabilities {
|
||||
serve_headers: true,
|
||||
serve_chain_since: Some(1),
|
||||
serve_state_since: Some(1),
|
||||
tx_relay: true,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_hangup() {
|
||||
let on_demand = Harness::create().service;
|
||||
let result = on_demand.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![request::HeaderByHash(H256::default().into()).into()],
|
||||
);
|
||||
|
||||
assert_eq!(on_demand.pending.read().len(), 1);
|
||||
drop(result);
|
||||
|
||||
on_demand.dispatch_pending(&Context::NoOp);
|
||||
assert!(on_demand.pending.read().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_request() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_id = 10101;
|
||||
let req_id = ReqId(14426);
|
||||
|
||||
harness.inject_peer(
|
||||
peer_id,
|
||||
Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
},
|
||||
);
|
||||
|
||||
let header = Header::default();
|
||||
let encoded = header.encoded();
|
||||
|
||||
let recv = harness
|
||||
.service
|
||||
.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![request::HeaderByHash(header.hash().into()).into()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness
|
||||
.service
|
||||
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
harness.service.on_responses(
|
||||
&Context::WithPeer(peer_id),
|
||||
req_id,
|
||||
&[Response::Headers(basic_request::HeadersResponse {
|
||||
headers: vec![encoded],
|
||||
})],
|
||||
);
|
||||
|
||||
assert!(recv.wait().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_capabilities() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_id = 10101;
|
||||
|
||||
let mut capabilities = dummy_capabilities();
|
||||
capabilities.serve_headers = false;
|
||||
|
||||
harness.inject_peer(
|
||||
peer_id,
|
||||
Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: capabilities,
|
||||
},
|
||||
);
|
||||
|
||||
let _recv = harness
|
||||
.service
|
||||
.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![request::HeaderByHash(H256::default().into()).into()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness.service.dispatch_pending(&Context::NoOp);
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reassign() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_ids = (10101, 12345);
|
||||
let req_ids = (ReqId(14426), ReqId(555));
|
||||
|
||||
harness.inject_peer(
|
||||
peer_ids.0,
|
||||
Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
},
|
||||
);
|
||||
|
||||
let header = Header::default();
|
||||
let encoded = header.encoded();
|
||||
|
||||
let recv = harness
|
||||
.service
|
||||
.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![request::HeaderByHash(header.hash().into()).into()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness
|
||||
.service
|
||||
.dispatch_pending(&Context::RequestFrom(peer_ids.0, req_ids.0));
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
harness
|
||||
.service
|
||||
.on_disconnect(&Context::WithPeer(peer_ids.0), &[req_ids.0]);
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness.inject_peer(
|
||||
peer_ids.1,
|
||||
Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
},
|
||||
);
|
||||
|
||||
harness
|
||||
.service
|
||||
.dispatch_pending(&Context::RequestFrom(peer_ids.1, req_ids.1));
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
harness.service.on_responses(
|
||||
&Context::WithPeer(peer_ids.1),
|
||||
req_ids.1,
|
||||
&[Response::Headers(basic_request::HeadersResponse {
|
||||
headers: vec![encoded],
|
||||
})],
|
||||
);
|
||||
|
||||
assert!(recv.wait().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn partial_response() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_id = 111;
|
||||
let req_ids = (ReqId(14426), ReqId(555));
|
||||
|
||||
harness.inject_peer(
|
||||
peer_id,
|
||||
Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
},
|
||||
);
|
||||
|
||||
let make = |num| {
|
||||
let mut hdr = Header::default();
|
||||
hdr.set_number(num);
|
||||
|
||||
let encoded = hdr.encoded();
|
||||
(hdr, encoded)
|
||||
};
|
||||
|
||||
let (header1, encoded1) = make(5);
|
||||
let (header2, encoded2) = make(23452);
|
||||
|
||||
// request two headers.
|
||||
let recv = harness
|
||||
.service
|
||||
.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![
|
||||
request::HeaderByHash(header1.hash().into()).into(),
|
||||
request::HeaderByHash(header2.hash().into()).into(),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness
|
||||
.service
|
||||
.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0));
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
// supply only the first one.
|
||||
harness.service.on_responses(
|
||||
&Context::WithPeer(peer_id),
|
||||
req_ids.0,
|
||||
&[Response::Headers(basic_request::HeadersResponse {
|
||||
headers: vec![encoded1],
|
||||
})],
|
||||
);
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness
|
||||
.service
|
||||
.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1));
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
// supply the next one.
|
||||
harness.service.on_responses(
|
||||
&Context::WithPeer(peer_id),
|
||||
req_ids.1,
|
||||
&[Response::Headers(basic_request::HeadersResponse {
|
||||
headers: vec![encoded2],
|
||||
})],
|
||||
);
|
||||
|
||||
assert!(recv.wait().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn part_bad_part_good() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_id = 111;
|
||||
let req_ids = (ReqId(14426), ReqId(555));
|
||||
|
||||
harness.inject_peer(
|
||||
peer_id,
|
||||
Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
},
|
||||
);
|
||||
|
||||
let make = |num| {
|
||||
let mut hdr = Header::default();
|
||||
hdr.set_number(num);
|
||||
|
||||
let encoded = hdr.encoded();
|
||||
(hdr, encoded)
|
||||
};
|
||||
|
||||
let (header1, encoded1) = make(5);
|
||||
let (header2, encoded2) = make(23452);
|
||||
|
||||
// request two headers.
|
||||
let recv = harness
|
||||
.service
|
||||
.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![
|
||||
request::HeaderByHash(header1.hash().into()).into(),
|
||||
request::HeaderByHash(header2.hash().into()).into(),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness
|
||||
.service
|
||||
.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.0));
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
// supply only the first one, but followed by the wrong kind of response.
|
||||
// the first header should be processed.
|
||||
harness.service.on_responses(
|
||||
&Context::Punish(peer_id),
|
||||
req_ids.0,
|
||||
&[
|
||||
Response::Headers(basic_request::HeadersResponse {
|
||||
headers: vec![encoded1],
|
||||
}),
|
||||
Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] }),
|
||||
],
|
||||
);
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness.inject_peer(
|
||||
peer_id,
|
||||
Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
},
|
||||
);
|
||||
|
||||
harness
|
||||
.service
|
||||
.dispatch_pending(&Context::RequestFrom(peer_id, req_ids.1));
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
// supply the next one.
|
||||
harness.service.on_responses(
|
||||
&Context::WithPeer(peer_id),
|
||||
req_ids.1,
|
||||
&[Response::Headers(basic_request::HeadersResponse {
|
||||
headers: vec![encoded2],
|
||||
})],
|
||||
);
|
||||
|
||||
assert!(recv.wait().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_kind() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_id = 10101;
|
||||
let req_id = ReqId(14426);
|
||||
|
||||
harness.inject_peer(
|
||||
peer_id,
|
||||
Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
},
|
||||
);
|
||||
|
||||
let _recv = harness
|
||||
.service
|
||||
.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![request::HeaderByHash(H256::default().into()).into()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness
|
||||
.service
|
||||
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
harness.service.on_responses(
|
||||
&Context::Punish(peer_id),
|
||||
req_id,
|
||||
&[Response::Receipts(basic_request::ReceiptsResponse {
|
||||
receipts: vec![],
|
||||
})],
|
||||
);
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn back_references() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_id = 10101;
|
||||
let req_id = ReqId(14426);
|
||||
|
||||
harness.inject_peer(
|
||||
peer_id,
|
||||
Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
},
|
||||
);
|
||||
|
||||
let header = Header::default();
|
||||
let encoded = header.encoded();
|
||||
|
||||
let recv = harness
|
||||
.service
|
||||
.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![
|
||||
request::HeaderByHash(header.hash().into()).into(),
|
||||
request::BlockReceipts(HeaderRef::Unresolved(0, header.hash().into())).into(),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness
|
||||
.service
|
||||
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
harness.service.on_responses(
|
||||
&Context::WithPeer(peer_id),
|
||||
req_id,
|
||||
&[
|
||||
Response::Headers(basic_request::HeadersResponse {
|
||||
headers: vec![encoded],
|
||||
}),
|
||||
Response::Receipts(basic_request::ReceiptsResponse { receipts: vec![] }),
|
||||
],
|
||||
);
|
||||
|
||||
assert!(recv.wait().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bad_back_reference() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let header = Header::default();
|
||||
|
||||
let _ = harness
|
||||
.service
|
||||
.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![
|
||||
request::HeaderByHash(header.hash().into()).into(),
|
||||
request::BlockReceipts(HeaderRef::Unresolved(1, header.hash().into())).into(),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fill_from_cache() {
|
||||
let harness = Harness::create();
|
||||
|
||||
let peer_id = 10101;
|
||||
let req_id = ReqId(14426);
|
||||
|
||||
harness.inject_peer(
|
||||
peer_id,
|
||||
Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
},
|
||||
);
|
||||
|
||||
let header = Header::default();
|
||||
let encoded = header.encoded();
|
||||
|
||||
let recv = harness
|
||||
.service
|
||||
.request_raw(
|
||||
&Context::NoOp,
|
||||
vec![
|
||||
request::HeaderByHash(header.hash().into()).into(),
|
||||
request::BlockReceipts(HeaderRef::Unresolved(0, header.hash().into())).into(),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
harness
|
||||
.service
|
||||
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
|
||||
harness.service.on_responses(
|
||||
&Context::WithPeer(peer_id),
|
||||
req_id,
|
||||
&[Response::Headers(basic_request::HeadersResponse {
|
||||
headers: vec![encoded],
|
||||
})],
|
||||
);
|
||||
|
||||
assert!(recv.wait().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn request_without_response_should_backoff_and_then_be_dropped() {
|
||||
let harness = Harness::create();
|
||||
let peer_id = 0;
|
||||
let req_id = ReqId(13);
|
||||
|
||||
harness.inject_peer(
|
||||
peer_id,
|
||||
Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
},
|
||||
);
|
||||
|
||||
let binary_exp_backoff: Vec<u64> = vec![1, 2, 4, 8, 16, 20, 20, 20, 20, 20];
|
||||
|
||||
let _recv = harness
|
||||
.service
|
||||
.request_raw(
|
||||
&Context::RequestFrom(peer_id, req_id),
|
||||
vec![request::HeaderByHash(Header::default().encoded().hash().into()).into()],
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(harness.service.pending.read().len(), 1);
|
||||
|
||||
for backoff in &binary_exp_backoff {
|
||||
harness.service.dispatch_pending(&Context::FaultyRequest);
|
||||
assert_eq!(
|
||||
harness.service.pending.read().len(),
|
||||
1,
|
||||
"Request should not be dropped"
|
||||
);
|
||||
let now = Instant::now();
|
||||
while now.elapsed() < Duration::from_secs(*backoff) {}
|
||||
}
|
||||
|
||||
harness.service.dispatch_pending(&Context::FaultyRequest);
|
||||
assert_eq!(
|
||||
harness.service.pending.read().len(),
|
||||
0,
|
||||
"Request exceeded the 10 backoff rounds should be dropped"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_responses_exceeds_limit_should_be_dropped() {
|
||||
let harness = Harness::create();
|
||||
let peer_id = 0;
|
||||
let req_id = ReqId(13);
|
||||
|
||||
harness.inject_peer(
|
||||
peer_id,
|
||||
Peer {
|
||||
status: dummy_status(),
|
||||
capabilities: dummy_capabilities(),
|
||||
},
|
||||
);
|
||||
|
||||
let _recv = harness
|
||||
.service
|
||||
.request_raw(
|
||||
&Context::RequestFrom(peer_id, req_id),
|
||||
vec![request::HeaderByHash(Header::default().encoded().hash().into()).into()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
harness
|
||||
.service
|
||||
.dispatch_pending(&Context::RequestFrom(peer_id, req_id));
|
||||
|
||||
assert_eq!(harness.service.pending.read().len(), 0);
|
||||
assert_eq!(harness.service.in_transit.read().len(), 1);
|
||||
|
||||
let now = Instant::now();
|
||||
|
||||
// Send `empty responses` in the current time window
|
||||
// Use only half of the `time_window` because we can't be sure exactly
|
||||
// when the window started and the clock accurancy
|
||||
while now.elapsed() < harness.service.response_time_window / 2 {
|
||||
harness
|
||||
.service
|
||||
.on_responses(&Context::RequestFrom(13, req_id), req_id, &[]);
|
||||
assert!(harness.service.pending.read().len() != 0);
|
||||
let pending = harness.service.pending.write().remove(0);
|
||||
harness.service.in_transit.write().insert(req_id, pending);
|
||||
}
|
||||
|
||||
// Make sure we passed the first `time window`
|
||||
thread::sleep(Duration::from_secs(5));
|
||||
|
||||
// Now, response is in failure state but need another response to be `polled`
|
||||
harness
|
||||
.service
|
||||
.on_responses(&Context::RequestFrom(13, req_id), req_id, &[]);
|
||||
|
||||
assert!(harness.service.in_transit.read().is_empty());
|
||||
assert!(harness.service.pending.read().is_empty());
|
||||
}
|
@ -1,478 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! A provider for the PIP protocol. This is typically a full node, who can
|
||||
//! give as much data as necessary to its peers.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_types::{
|
||||
blockchain_info::BlockChainInfo, encoded, ids::BlockId, transaction::PendingTransaction,
|
||||
};
|
||||
use ethcore::client::{
|
||||
BlockChainClient, BlockInfo as ClientBlockInfo, ChainInfo, ProvingBlockChainClient,
|
||||
};
|
||||
use ethereum_types::H256;
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use cht::{self, BlockInfo};
|
||||
use client::{AsLightClient, LightChainClient};
|
||||
use transaction_queue::TransactionQueue;
|
||||
|
||||
use request;
|
||||
|
||||
/// Maximum allowed size of a headers request.
|
||||
pub const MAX_HEADERS_PER_REQUEST: u64 = 512;
|
||||
|
||||
/// Defines the operations that a provider for the light subprotocol must fulfill.
|
||||
pub trait Provider: Send + Sync {
|
||||
/// Provide current blockchain info.
|
||||
fn chain_info(&self) -> BlockChainInfo;
|
||||
|
||||
/// Find the depth of a common ancestor between two blocks.
|
||||
/// If either block is unknown or an ancestor can't be found
|
||||
/// then return `None`.
|
||||
fn reorg_depth(&self, a: &H256, b: &H256) -> Option<u64>;
|
||||
|
||||
/// Earliest block where state queries are available.
|
||||
/// If `None`, no state queries are servable.
|
||||
fn earliest_state(&self) -> Option<u64>;
|
||||
|
||||
/// Provide a list of headers starting at the requested block,
|
||||
/// possibly in reverse and skipping `skip` at a time.
|
||||
///
|
||||
/// The returned vector may have any length in the range [0, `max`], but the
|
||||
/// results within must adhere to the `skip` and `reverse` parameters.
|
||||
fn block_headers(
|
||||
&self,
|
||||
req: request::CompleteHeadersRequest,
|
||||
) -> Option<request::HeadersResponse> {
|
||||
use request::HashOrNumber;
|
||||
|
||||
if req.max == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let best_num = self.chain_info().best_block_number;
|
||||
let start_num = match req.start {
|
||||
HashOrNumber::Number(start_num) => start_num,
|
||||
HashOrNumber::Hash(hash) => match self.block_header(BlockId::Hash(hash)) {
|
||||
None => {
|
||||
trace!(target: "pip_provider", "Unknown block hash {} requested", hash);
|
||||
return None;
|
||||
}
|
||||
Some(header) => {
|
||||
let num = header.number();
|
||||
let canon_hash = self.block_header(BlockId::Number(num)).map(|h| h.hash());
|
||||
|
||||
if req.max == 1 || canon_hash != Some(hash) {
|
||||
// Non-canonical header or single header requested.
|
||||
return Some(::request::HeadersResponse {
|
||||
headers: vec![header],
|
||||
});
|
||||
}
|
||||
|
||||
num
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
let max = ::std::cmp::min(MAX_HEADERS_PER_REQUEST, req.max);
|
||||
|
||||
let headers: Vec<_> = (0_u64..max)
|
||||
.map(|x: u64| x.saturating_mul(req.skip.saturating_add(1)))
|
||||
.take_while(|&x| {
|
||||
if req.reverse {
|
||||
x < start_num
|
||||
} else {
|
||||
best_num.saturating_sub(start_num) >= x
|
||||
}
|
||||
})
|
||||
.map(|x| {
|
||||
if req.reverse {
|
||||
start_num.saturating_sub(x)
|
||||
} else {
|
||||
start_num.saturating_add(x)
|
||||
}
|
||||
})
|
||||
.map(|x| self.block_header(BlockId::Number(x)))
|
||||
.take_while(|x| x.is_some())
|
||||
.flat_map(|x| x)
|
||||
.collect();
|
||||
|
||||
if headers.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(::request::HeadersResponse { headers })
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a block header by id.
|
||||
fn block_header(&self, id: BlockId) -> Option<encoded::Header>;
|
||||
|
||||
/// Get a transaction index by hash.
|
||||
fn transaction_index(
|
||||
&self,
|
||||
req: request::CompleteTransactionIndexRequest,
|
||||
) -> Option<request::TransactionIndexResponse>;
|
||||
|
||||
/// Fulfill a block body request.
|
||||
fn block_body(&self, req: request::CompleteBodyRequest) -> Option<request::BodyResponse>;
|
||||
|
||||
/// Fulfill a request for block receipts.
|
||||
fn block_receipts(
|
||||
&self,
|
||||
req: request::CompleteReceiptsRequest,
|
||||
) -> Option<request::ReceiptsResponse>;
|
||||
|
||||
/// Get an account proof.
|
||||
fn account_proof(
|
||||
&self,
|
||||
req: request::CompleteAccountRequest,
|
||||
) -> Option<request::AccountResponse>;
|
||||
|
||||
/// Get a storage proof.
|
||||
fn storage_proof(
|
||||
&self,
|
||||
req: request::CompleteStorageRequest,
|
||||
) -> Option<request::StorageResponse>;
|
||||
|
||||
/// Provide contract code for the specified (block_hash, code_hash) pair.
|
||||
fn contract_code(&self, req: request::CompleteCodeRequest) -> Option<request::CodeResponse>;
|
||||
|
||||
/// Provide a header proof from a given Canonical Hash Trie as well as the
|
||||
/// corresponding header.
|
||||
fn header_proof(
|
||||
&self,
|
||||
req: request::CompleteHeaderProofRequest,
|
||||
) -> Option<request::HeaderProofResponse>;
|
||||
|
||||
/// Provide pending transactions.
|
||||
fn transactions_to_propagate(&self) -> Vec<PendingTransaction>;
|
||||
|
||||
/// Provide a proof-of-execution for the given transaction proof request.
|
||||
/// Returns a vector of all state items necessary to execute the transaction.
|
||||
fn transaction_proof(
|
||||
&self,
|
||||
req: request::CompleteExecutionRequest,
|
||||
) -> Option<request::ExecutionResponse>;
|
||||
|
||||
/// Provide epoch signal data at given block hash. This should be just the
|
||||
fn epoch_signal(&self, req: request::CompleteSignalRequest) -> Option<request::SignalResponse>;
|
||||
}
|
||||
|
||||
// Implementation of a light client data provider for a client.
|
||||
impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
|
||||
fn chain_info(&self) -> BlockChainInfo {
|
||||
ChainInfo::chain_info(self)
|
||||
}
|
||||
|
||||
fn reorg_depth(&self, a: &H256, b: &H256) -> Option<u64> {
|
||||
self.tree_route(a, b).map(|route| route.index as u64)
|
||||
}
|
||||
|
||||
fn earliest_state(&self) -> Option<u64> {
|
||||
Some(self.pruning_info().earliest_state)
|
||||
}
|
||||
|
||||
fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
|
||||
ClientBlockInfo::block_header(self, id)
|
||||
}
|
||||
|
||||
fn transaction_index(
|
||||
&self,
|
||||
req: request::CompleteTransactionIndexRequest,
|
||||
) -> Option<request::TransactionIndexResponse> {
|
||||
use common_types::ids::TransactionId;
|
||||
|
||||
self.transaction_receipt(TransactionId::Hash(req.hash))
|
||||
.map(|receipt| request::TransactionIndexResponse {
|
||||
num: receipt.block_number,
|
||||
hash: receipt.block_hash,
|
||||
index: receipt.transaction_index as u64,
|
||||
})
|
||||
}
|
||||
|
||||
fn block_body(&self, req: request::CompleteBodyRequest) -> Option<request::BodyResponse> {
|
||||
BlockChainClient::block_body(self, BlockId::Hash(req.hash))
|
||||
.map(|body| ::request::BodyResponse { body })
|
||||
}
|
||||
|
||||
fn block_receipts(
|
||||
&self,
|
||||
req: request::CompleteReceiptsRequest,
|
||||
) -> Option<request::ReceiptsResponse> {
|
||||
BlockChainClient::block_receipts(self, &req.hash).map(|x| ::request::ReceiptsResponse {
|
||||
receipts: x.receipts,
|
||||
})
|
||||
}
|
||||
|
||||
fn account_proof(
|
||||
&self,
|
||||
req: request::CompleteAccountRequest,
|
||||
) -> Option<request::AccountResponse> {
|
||||
self.prove_account(req.address_hash, BlockId::Hash(req.block_hash))
|
||||
.map(|(proof, acc)| ::request::AccountResponse {
|
||||
proof,
|
||||
nonce: acc.nonce,
|
||||
balance: acc.balance,
|
||||
code_hash: acc.code_hash,
|
||||
storage_root: acc.storage_root,
|
||||
})
|
||||
}
|
||||
|
||||
fn storage_proof(
|
||||
&self,
|
||||
req: request::CompleteStorageRequest,
|
||||
) -> Option<request::StorageResponse> {
|
||||
self.prove_storage(
|
||||
req.address_hash,
|
||||
req.key_hash,
|
||||
BlockId::Hash(req.block_hash),
|
||||
)
|
||||
.map(|(proof, item)| ::request::StorageResponse { proof, value: item })
|
||||
}
|
||||
|
||||
fn contract_code(&self, req: request::CompleteCodeRequest) -> Option<request::CodeResponse> {
|
||||
self.state_data(&req.code_hash)
|
||||
.map(|code| ::request::CodeResponse { code })
|
||||
}
|
||||
|
||||
fn header_proof(
|
||||
&self,
|
||||
req: request::CompleteHeaderProofRequest,
|
||||
) -> Option<request::HeaderProofResponse> {
|
||||
let cht_number = match cht::block_to_cht_number(req.num) {
|
||||
Some(cht_num) => cht_num,
|
||||
None => {
|
||||
debug!(target: "pip_provider", "Requested CHT proof with invalid block number");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
let mut needed = None;
|
||||
|
||||
// build the CHT, caching the requested header as we pass through it.
|
||||
let cht = {
|
||||
let block_info = |id| {
|
||||
let hdr = self.block_header(id);
|
||||
let td = self.block_total_difficulty(id);
|
||||
|
||||
match (hdr, td) {
|
||||
(Some(hdr), Some(td)) => {
|
||||
let info = BlockInfo {
|
||||
hash: hdr.hash(),
|
||||
parent_hash: hdr.parent_hash(),
|
||||
total_difficulty: td,
|
||||
};
|
||||
|
||||
if hdr.number() == req.num {
|
||||
needed = Some((hdr, td));
|
||||
}
|
||||
|
||||
Some(info)
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
};
|
||||
|
||||
match cht::build(cht_number, block_info) {
|
||||
Some(cht) => cht,
|
||||
None => return None, // incomplete CHT.
|
||||
}
|
||||
};
|
||||
|
||||
let (needed_hdr, needed_td) =
|
||||
needed.expect("`needed` always set in loop, number checked before; qed");
|
||||
|
||||
// prove our result.
|
||||
match cht.prove(req.num, 0) {
|
||||
Ok(Some(proof)) => Some(::request::HeaderProofResponse {
|
||||
proof,
|
||||
hash: needed_hdr.hash(),
|
||||
td: needed_td,
|
||||
}),
|
||||
Ok(None) => None,
|
||||
Err(e) => {
|
||||
debug!(target: "pip_provider", "Error looking up number in freshly-created CHT: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn transaction_proof(
|
||||
&self,
|
||||
req: request::CompleteExecutionRequest,
|
||||
) -> Option<request::ExecutionResponse> {
|
||||
use common_types::transaction::Transaction;
|
||||
|
||||
let id = BlockId::Hash(req.block_hash);
|
||||
let nonce = match self.nonce(&req.from, id) {
|
||||
Some(nonce) => nonce,
|
||||
None => return None,
|
||||
};
|
||||
let transaction = Transaction {
|
||||
nonce,
|
||||
gas: req.gas,
|
||||
gas_price: req.gas_price,
|
||||
action: req.action,
|
||||
value: req.value,
|
||||
data: req.data,
|
||||
}
|
||||
.fake_sign(req.from);
|
||||
|
||||
self.prove_transaction(transaction, id)
|
||||
.map(|(_, proof)| ::request::ExecutionResponse { items: proof })
|
||||
}
|
||||
|
||||
fn transactions_to_propagate(&self) -> Vec<PendingTransaction> {
|
||||
BlockChainClient::transactions_to_propagate(self)
|
||||
.into_iter()
|
||||
.map(|tx| tx.pending().clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn epoch_signal(&self, req: request::CompleteSignalRequest) -> Option<request::SignalResponse> {
|
||||
self.epoch_signal(req.block_hash)
|
||||
.map(|signal| request::SignalResponse { signal })
|
||||
}
|
||||
}
|
||||
|
||||
/// The light client "provider" implementation. This wraps a `LightClient` and
|
||||
/// a light transaction queue.
|
||||
pub struct LightProvider<L> {
|
||||
client: Arc<L>,
|
||||
txqueue: Arc<RwLock<TransactionQueue>>,
|
||||
}
|
||||
|
||||
impl<L> LightProvider<L> {
|
||||
/// Create a new `LightProvider` from the given client and transaction queue.
|
||||
pub fn new(client: Arc<L>, txqueue: Arc<RwLock<TransactionQueue>>) -> Self {
|
||||
LightProvider { client, txqueue }
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: draw from cache (shared between this and the RPC layer)
|
||||
impl<L: AsLightClient + Send + Sync> Provider for LightProvider<L> {
|
||||
fn chain_info(&self) -> BlockChainInfo {
|
||||
self.client.as_light_client().chain_info()
|
||||
}
|
||||
|
||||
fn reorg_depth(&self, _a: &H256, _b: &H256) -> Option<u64> {
|
||||
None
|
||||
}
|
||||
|
||||
fn earliest_state(&self) -> Option<u64> {
|
||||
None
|
||||
}
|
||||
|
||||
fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
|
||||
self.client.as_light_client().block_header(id)
|
||||
}
|
||||
|
||||
fn transaction_index(
|
||||
&self,
|
||||
_req: request::CompleteTransactionIndexRequest,
|
||||
) -> Option<request::TransactionIndexResponse> {
|
||||
None
|
||||
}
|
||||
|
||||
fn block_body(&self, _req: request::CompleteBodyRequest) -> Option<request::BodyResponse> {
|
||||
None
|
||||
}
|
||||
|
||||
fn block_receipts(
|
||||
&self,
|
||||
_req: request::CompleteReceiptsRequest,
|
||||
) -> Option<request::ReceiptsResponse> {
|
||||
None
|
||||
}
|
||||
|
||||
fn account_proof(
|
||||
&self,
|
||||
_req: request::CompleteAccountRequest,
|
||||
) -> Option<request::AccountResponse> {
|
||||
None
|
||||
}
|
||||
|
||||
fn storage_proof(
|
||||
&self,
|
||||
_req: request::CompleteStorageRequest,
|
||||
) -> Option<request::StorageResponse> {
|
||||
None
|
||||
}
|
||||
|
||||
fn contract_code(&self, _req: request::CompleteCodeRequest) -> Option<request::CodeResponse> {
|
||||
None
|
||||
}
|
||||
|
||||
fn header_proof(
|
||||
&self,
|
||||
_req: request::CompleteHeaderProofRequest,
|
||||
) -> Option<request::HeaderProofResponse> {
|
||||
None
|
||||
}
|
||||
|
||||
fn transaction_proof(
|
||||
&self,
|
||||
_req: request::CompleteExecutionRequest,
|
||||
) -> Option<request::ExecutionResponse> {
|
||||
None
|
||||
}
|
||||
|
||||
fn epoch_signal(
|
||||
&self,
|
||||
_req: request::CompleteSignalRequest,
|
||||
) -> Option<request::SignalResponse> {
|
||||
None
|
||||
}
|
||||
|
||||
fn transactions_to_propagate(&self) -> Vec<PendingTransaction> {
|
||||
let chain_info = self.chain_info();
|
||||
self.txqueue.read().ready_transactions(
|
||||
chain_info.best_block_number,
|
||||
chain_info.best_block_timestamp,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: AsLightClient> AsLightClient for LightProvider<L> {
|
||||
type Client = L::Client;
|
||||
|
||||
fn as_light_client(&self) -> &L::Client {
|
||||
self.client.as_light_client()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Provider;
|
||||
use ethcore::client::{EachBlockWith, TestBlockChainClient};
|
||||
|
||||
#[test]
|
||||
fn cht_proof() {
|
||||
let client = TestBlockChainClient::new();
|
||||
client.add_blocks(2000, EachBlockWith::Nothing);
|
||||
|
||||
let req = ::request::CompleteHeaderProofRequest { num: 1500 };
|
||||
|
||||
assert!(client.header_proof(req.clone()).is_none());
|
||||
|
||||
client.add_blocks(48, EachBlockWith::Nothing);
|
||||
|
||||
assert!(client.header_proof(req.clone()).is_some());
|
||||
}
|
||||
}
|
@ -1,597 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Light Transaction Queue.
|
||||
//!
|
||||
//! Manages local transactions,
|
||||
//! but stores all local transactions, removing only on invalidated nonce.
|
||||
//!
|
||||
//! Under the assumption that light nodes will have a relatively limited set of
|
||||
//! accounts for which they create transactions, this queue is structured in an
|
||||
//! address-wise manner.
|
||||
|
||||
use std::{
|
||||
collections::{hash_map::Entry, BTreeMap, HashMap},
|
||||
fmt,
|
||||
};
|
||||
|
||||
use common_types::transaction::{self, Condition, PendingTransaction, SignedTransaction};
|
||||
use ethereum_types::{Address, H256, U256};
|
||||
use fastmap::H256FastMap;
|
||||
|
||||
// Knowledge of an account's current nonce.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
enum CurrentNonce {
|
||||
// Assumed current nonce.
|
||||
Assumed(U256),
|
||||
// Known current nonce.
|
||||
Known(U256),
|
||||
}
|
||||
|
||||
impl CurrentNonce {
|
||||
// whether this nonce is assumed
|
||||
fn is_assumed(&self) -> bool {
|
||||
match *self {
|
||||
CurrentNonce::Assumed(_) => true,
|
||||
CurrentNonce::Known(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
// whether this nonce is known for certain from an external source.
|
||||
fn is_known(&self) -> bool {
|
||||
!self.is_assumed()
|
||||
}
|
||||
|
||||
// the current nonce's value.
|
||||
fn value(&self) -> &U256 {
|
||||
match *self {
|
||||
CurrentNonce::Assumed(ref val) => val,
|
||||
CurrentNonce::Known(ref val) => val,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
struct TransactionInfo {
|
||||
hash: H256,
|
||||
nonce: U256,
|
||||
condition: Option<Condition>,
|
||||
}
|
||||
|
||||
impl<'a> From<&'a PendingTransaction> for TransactionInfo {
|
||||
fn from(tx: &'a PendingTransaction) -> Self {
|
||||
TransactionInfo {
|
||||
hash: tx.hash(),
|
||||
nonce: tx.nonce,
|
||||
condition: tx.condition.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// transactions associated with a specific account.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
struct AccountTransactions {
|
||||
// believed current nonce (gotten from initial given TX or `cull` calls).
|
||||
cur_nonce: CurrentNonce,
|
||||
current: Vec<TransactionInfo>, // ordered "current" transactions (cur_nonce onwards)
|
||||
future: BTreeMap<U256, TransactionInfo>, // "future" transactions.
|
||||
}
|
||||
|
||||
impl AccountTransactions {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.current.is_empty() && self.future.is_empty()
|
||||
}
|
||||
|
||||
fn next_nonce(&self) -> U256 {
|
||||
self.current
|
||||
.last()
|
||||
.map(|last| last.nonce.saturating_add(1.into()))
|
||||
.unwrap_or_else(|| *self.cur_nonce.value())
|
||||
}
|
||||
|
||||
// attempt to move transactions from the future queue into the current queue.
|
||||
fn adjust_future(&mut self) -> Vec<H256> {
|
||||
let mut promoted = Vec::new();
|
||||
let mut next_nonce = self.next_nonce();
|
||||
|
||||
while let Some(tx) = self.future.remove(&next_nonce) {
|
||||
promoted.push(tx.hash);
|
||||
self.current.push(tx);
|
||||
next_nonce = next_nonce.saturating_add(1.into());
|
||||
}
|
||||
|
||||
promoted
|
||||
}
|
||||
}
|
||||
|
||||
/// Transaction import result.
|
||||
pub enum ImportDestination {
|
||||
/// Transaction has been imported to the current queue.
|
||||
///
|
||||
/// It's going to be propagated to peers.
|
||||
Current,
|
||||
/// Transaction has been imported to future queue.
|
||||
///
|
||||
/// It means it won't be propagated until the gap is filled.
|
||||
Future,
|
||||
}
|
||||
|
||||
type Listener = Box<dyn Fn(&[H256]) + Send + Sync>;
|
||||
|
||||
/// Light transaction queue. See module docs for more details.
|
||||
#[derive(Default)]
|
||||
pub struct TransactionQueue {
|
||||
by_account: HashMap<Address, AccountTransactions>,
|
||||
by_hash: H256FastMap<PendingTransaction>,
|
||||
listeners: Vec<Listener>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for TransactionQueue {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("TransactionQueue")
|
||||
.field("by_account", &self.by_account)
|
||||
.field("by_hash", &self.by_hash)
|
||||
.field("listeners", &self.listeners.len())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl TransactionQueue {
|
||||
/// Import a pending transaction to be queued.
|
||||
pub fn import(
|
||||
&mut self,
|
||||
tx: PendingTransaction,
|
||||
) -> Result<ImportDestination, transaction::Error> {
|
||||
let sender = tx.sender();
|
||||
let hash = tx.hash();
|
||||
let nonce = tx.nonce;
|
||||
let tx_info = TransactionInfo::from(&tx);
|
||||
|
||||
if self.by_hash.contains_key(&hash) {
|
||||
return Err(transaction::Error::AlreadyImported);
|
||||
}
|
||||
|
||||
let (res, promoted) = match self.by_account.entry(sender) {
|
||||
Entry::Vacant(entry) => {
|
||||
entry.insert(AccountTransactions {
|
||||
cur_nonce: CurrentNonce::Assumed(nonce),
|
||||
current: vec![tx_info],
|
||||
future: BTreeMap::new(),
|
||||
});
|
||||
|
||||
(ImportDestination::Current, vec![hash])
|
||||
}
|
||||
Entry::Occupied(mut entry) => {
|
||||
let acct_txs = entry.get_mut();
|
||||
if nonce < *acct_txs.cur_nonce.value() {
|
||||
// don't accept txs from before known current nonce.
|
||||
if acct_txs.cur_nonce.is_known() {
|
||||
return Err(transaction::Error::Old);
|
||||
}
|
||||
|
||||
// lower our assumption until corrected later.
|
||||
acct_txs.cur_nonce = CurrentNonce::Assumed(nonce);
|
||||
}
|
||||
|
||||
match acct_txs.current.binary_search_by(|x| x.nonce.cmp(&nonce)) {
|
||||
Ok(idx) => {
|
||||
trace!(target: "txqueue", "Replacing existing transaction from {} with nonce {}",
|
||||
sender, nonce);
|
||||
|
||||
let old = ::std::mem::replace(&mut acct_txs.current[idx], tx_info);
|
||||
self.by_hash.remove(&old.hash);
|
||||
|
||||
(ImportDestination::Current, vec![hash])
|
||||
}
|
||||
Err(idx) => {
|
||||
let cur_len = acct_txs.current.len();
|
||||
let incr_nonce = nonce + 1;
|
||||
|
||||
// current is sorted with one tx per nonce,
|
||||
// so if a tx with given nonce wasn't found that means it is either
|
||||
// earlier in nonce than all other "current" transactions or later.
|
||||
assert!(idx == 0 || idx == cur_len);
|
||||
|
||||
if idx == 0
|
||||
&& acct_txs
|
||||
.current
|
||||
.first()
|
||||
.map_or(false, |f| f.nonce != incr_nonce)
|
||||
{
|
||||
let old_cur = ::std::mem::replace(&mut acct_txs.current, vec![tx_info]);
|
||||
|
||||
trace!(target: "txqueue", "Moving {} transactions with nonce > {} to future",
|
||||
old_cur.len(), incr_nonce);
|
||||
|
||||
for future in old_cur {
|
||||
let future_nonce = future.nonce;
|
||||
acct_txs.future.insert(future_nonce, future);
|
||||
}
|
||||
|
||||
(ImportDestination::Current, vec![hash])
|
||||
} else if idx == cur_len
|
||||
&& acct_txs
|
||||
.current
|
||||
.last()
|
||||
.map_or(false, |f| f.nonce + 1 != nonce)
|
||||
{
|
||||
trace!(target: "txqueue", "Queued future transaction for {}, nonce={}", sender, nonce);
|
||||
let future_nonce = nonce;
|
||||
acct_txs.future.insert(future_nonce, tx_info);
|
||||
|
||||
(ImportDestination::Future, vec![])
|
||||
} else {
|
||||
trace!(target: "txqueue", "Queued current transaction for {}, nonce={}", sender, nonce);
|
||||
|
||||
// insert, then check if we've filled any gaps.
|
||||
acct_txs.current.insert(idx, tx_info);
|
||||
let mut promoted = acct_txs.adjust_future();
|
||||
promoted.insert(0, hash);
|
||||
|
||||
(ImportDestination::Current, promoted)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
self.by_hash.insert(hash, tx);
|
||||
self.notify(&promoted);
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Get pending transaction by hash.
|
||||
pub fn transaction(&self, hash: &H256) -> Option<SignedTransaction> {
|
||||
self.by_hash.get(hash).map(|tx| (&**tx).clone())
|
||||
}
|
||||
|
||||
/// Get the next nonce for a given address based on what's within the queue.
|
||||
/// If the address has no queued transactions, then `None` will be returned
|
||||
/// and the next nonce will have to be deduced via other means.
|
||||
pub fn next_nonce(&self, address: &Address) -> Option<U256> {
|
||||
self.by_account
|
||||
.get(address)
|
||||
.map(AccountTransactions::next_nonce)
|
||||
}
|
||||
|
||||
/// Get all transactions ready to be propagated.
|
||||
/// `best_block_number` and `best_block_timestamp` are used to filter out conditionally
|
||||
/// propagated transactions.
|
||||
///
|
||||
/// Returned transactions are batched by sender, in order of ascending nonce.
|
||||
pub fn ready_transactions(
|
||||
&self,
|
||||
best_block_number: u64,
|
||||
best_block_timestamp: u64,
|
||||
) -> Vec<PendingTransaction> {
|
||||
self.by_account.values()
|
||||
.flat_map(|acct_txs| {
|
||||
acct_txs.current.iter().take_while(|tx| match tx.condition {
|
||||
None => true,
|
||||
Some(Condition::Number(blk_num)) => blk_num <= best_block_number,
|
||||
Some(Condition::Timestamp(time)) => time <= best_block_timestamp,
|
||||
}).map(|info| info.hash)
|
||||
})
|
||||
.filter_map(|hash| match self.by_hash.get(&hash) {
|
||||
Some(tx) => Some(tx.clone()),
|
||||
None => {
|
||||
warn!(target: "txqueue", "Inconsistency detected between `by_hash` and `by_account`: {} not stored.",
|
||||
hash);
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get all transactions not ready to be propagated.
|
||||
/// `best_block_number` and `best_block_timestamp` are used to filter out conditionally
|
||||
/// propagated transactions.
|
||||
///
|
||||
/// Returned transactions are batched by sender, in order of ascending nonce.
|
||||
pub fn future_transactions(
|
||||
&self,
|
||||
best_block_number: u64,
|
||||
best_block_timestamp: u64,
|
||||
) -> Vec<PendingTransaction> {
|
||||
self.by_account.values()
|
||||
.flat_map(|acct_txs| {
|
||||
acct_txs.current.iter().skip_while(|tx| match tx.condition {
|
||||
None => true,
|
||||
Some(Condition::Number(blk_num)) => blk_num <= best_block_number,
|
||||
Some(Condition::Timestamp(time)) => time <= best_block_timestamp,
|
||||
}).chain(acct_txs.future.values()).map(|info| info.hash)
|
||||
})
|
||||
.filter_map(|hash| match self.by_hash.get(&hash) {
|
||||
Some(tx) => Some(tx.clone()),
|
||||
None => {
|
||||
warn!(target: "txqueue", "Inconsistency detected between `by_hash` and `by_account`: {} not stored.",
|
||||
hash);
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Addresses for which we store transactions.
|
||||
pub fn queued_senders(&self) -> Vec<Address> {
|
||||
self.by_account.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Cull out all transactions by the given address which are invalidated by the given nonce.
|
||||
pub fn cull(&mut self, address: Address, cur_nonce: U256) {
|
||||
let mut removed_hashes = vec![];
|
||||
if let Entry::Occupied(mut entry) = self.by_account.entry(address) {
|
||||
{
|
||||
let acct_txs = entry.get_mut();
|
||||
acct_txs.cur_nonce = CurrentNonce::Known(cur_nonce);
|
||||
|
||||
// cull old "future" keys.
|
||||
let old_future: Vec<_> = acct_txs
|
||||
.future
|
||||
.keys()
|
||||
.take_while(|&&k| k < cur_nonce)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
for old in old_future {
|
||||
let hash = acct_txs
|
||||
.future
|
||||
.remove(&old)
|
||||
.expect("key extracted from keys iterator; known to exist; qed")
|
||||
.hash;
|
||||
removed_hashes.push(hash);
|
||||
}
|
||||
|
||||
// then cull from "current".
|
||||
let valid_pos = acct_txs.current.iter().position(|tx| tx.nonce >= cur_nonce);
|
||||
match valid_pos {
|
||||
None => removed_hashes.extend(acct_txs.current.drain(..).map(|tx| tx.hash)),
|
||||
Some(valid) => {
|
||||
removed_hashes.extend(acct_txs.current.drain(..valid).map(|tx| tx.hash))
|
||||
}
|
||||
}
|
||||
|
||||
// now try and move stuff out of future into current.
|
||||
acct_txs.adjust_future();
|
||||
}
|
||||
|
||||
if entry.get_mut().is_empty() {
|
||||
trace!(target: "txqueue", "No more queued transactions for {} after nonce {}",
|
||||
address, cur_nonce);
|
||||
entry.remove();
|
||||
}
|
||||
}
|
||||
|
||||
trace!(target: "txqueue", "Culled {} old transactions from sender {} (nonce={})",
|
||||
removed_hashes.len(), address, cur_nonce);
|
||||
|
||||
for hash in removed_hashes {
|
||||
self.by_hash.remove(&hash);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a transaction by hash.
|
||||
pub fn get(&self, hash: &H256) -> Option<&PendingTransaction> {
|
||||
self.by_hash.get(&hash)
|
||||
}
|
||||
|
||||
/// Add a transaction queue listener.
|
||||
pub fn add_listener(&mut self, f: Listener) {
|
||||
self.listeners.push(f);
|
||||
}
|
||||
|
||||
/// Notifies all listeners about new pending transaction.
|
||||
fn notify(&self, hashes: &[H256]) {
|
||||
for listener in &self.listeners {
|
||||
listener(hashes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::TransactionQueue;
|
||||
use common_types::transaction::{Condition, PendingTransaction, Transaction};
|
||||
use ethereum_types::Address;
|
||||
|
||||
#[test]
|
||||
fn queued_senders() {
|
||||
let sender = Address::default();
|
||||
let mut txq = TransactionQueue::default();
|
||||
let tx = Transaction::default().fake_sign(sender);
|
||||
|
||||
txq.import(tx.into()).unwrap();
|
||||
|
||||
assert_eq!(txq.queued_senders(), vec![sender]);
|
||||
|
||||
txq.cull(sender, 1.into());
|
||||
|
||||
assert_eq!(txq.queued_senders(), vec![]);
|
||||
assert!(txq.by_hash.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn next_nonce() {
|
||||
let sender = Address::default();
|
||||
let mut txq = TransactionQueue::default();
|
||||
|
||||
for i in (0..5).chain(10..15) {
|
||||
let mut tx = Transaction::default();
|
||||
tx.nonce = i.into();
|
||||
|
||||
let tx = tx.fake_sign(sender);
|
||||
|
||||
txq.import(tx.into()).unwrap();
|
||||
}
|
||||
|
||||
// current: 0..5, future: 10..15
|
||||
assert_eq!(txq.ready_transactions(0, 0).len(), 5);
|
||||
assert_eq!(txq.next_nonce(&sender).unwrap(), 5.into());
|
||||
|
||||
txq.cull(sender, 8.into());
|
||||
|
||||
// current: empty, future: 10..15
|
||||
assert_eq!(txq.ready_transactions(0, 0).len(), 0);
|
||||
assert_eq!(txq.next_nonce(&sender).unwrap(), 8.into());
|
||||
|
||||
txq.cull(sender, 10.into());
|
||||
|
||||
// current: 10..15, future: empty
|
||||
assert_eq!(txq.ready_transactions(0, 0).len(), 5);
|
||||
assert_eq!(txq.next_nonce(&sender).unwrap(), 15.into());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn current_to_future() {
|
||||
let sender = Address::default();
|
||||
let mut txq = TransactionQueue::default();
|
||||
|
||||
for i in 5..10 {
|
||||
let mut tx = Transaction::default();
|
||||
tx.nonce = i.into();
|
||||
|
||||
let tx = tx.fake_sign(sender);
|
||||
|
||||
txq.import(tx.into()).unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(txq.ready_transactions(0, 0).len(), 5);
|
||||
assert_eq!(txq.next_nonce(&sender).unwrap(), 10.into());
|
||||
|
||||
for i in 0..3 {
|
||||
let mut tx = Transaction::default();
|
||||
tx.nonce = i.into();
|
||||
|
||||
let tx = tx.fake_sign(sender);
|
||||
|
||||
txq.import(tx.into()).unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(txq.ready_transactions(0, 0).len(), 3);
|
||||
assert_eq!(txq.next_nonce(&sender).unwrap(), 3.into());
|
||||
|
||||
for i in 3..5 {
|
||||
let mut tx = Transaction::default();
|
||||
tx.nonce = i.into();
|
||||
|
||||
let tx = tx.fake_sign(sender);
|
||||
|
||||
txq.import(tx.into()).unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(txq.ready_transactions(0, 0).len(), 10);
|
||||
assert_eq!(txq.next_nonce(&sender).unwrap(), 10.into());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conditional() {
|
||||
let mut txq = TransactionQueue::default();
|
||||
let sender = Address::default();
|
||||
|
||||
for i in 0..5 {
|
||||
let mut tx = Transaction::default();
|
||||
tx.nonce = i.into();
|
||||
let tx = tx.fake_sign(sender);
|
||||
|
||||
txq.import(match i {
|
||||
3 => PendingTransaction::new(tx, Some(Condition::Number(100))),
|
||||
4 => PendingTransaction::new(tx, Some(Condition::Timestamp(1234))),
|
||||
_ => tx.into(),
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(txq.ready_transactions(0, 0).len(), 3);
|
||||
assert_eq!(txq.ready_transactions(0, 1234).len(), 3);
|
||||
assert_eq!(txq.ready_transactions(100, 0).len(), 4);
|
||||
assert_eq!(txq.ready_transactions(100, 1234).len(), 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cull_from_future() {
|
||||
let sender = Address::default();
|
||||
let mut txq = TransactionQueue::default();
|
||||
|
||||
for i in (0..1).chain(3..10) {
|
||||
let mut tx = Transaction::default();
|
||||
tx.nonce = i.into();
|
||||
|
||||
let tx = tx.fake_sign(sender);
|
||||
|
||||
txq.import(tx.into()).unwrap();
|
||||
}
|
||||
|
||||
txq.cull(sender, 6.into());
|
||||
|
||||
assert_eq!(txq.ready_transactions(0, 0).len(), 4);
|
||||
assert_eq!(txq.next_nonce(&sender).unwrap(), 10.into());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_old() {
|
||||
let sender = Address::default();
|
||||
let mut txq = TransactionQueue::default();
|
||||
|
||||
let mut tx_a = Transaction::default();
|
||||
tx_a.nonce = 3.into();
|
||||
|
||||
let mut tx_b = Transaction::default();
|
||||
tx_b.nonce = 2.into();
|
||||
|
||||
txq.import(tx_a.fake_sign(sender).into()).unwrap();
|
||||
txq.cull(sender, 3.into());
|
||||
|
||||
assert!(txq.import(tx_b.fake_sign(sender).into()).is_err())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_is_removed() {
|
||||
let sender = Address::default();
|
||||
let mut txq = TransactionQueue::default();
|
||||
|
||||
let tx_b: PendingTransaction = Transaction::default().fake_sign(sender).into();
|
||||
let tx_a: PendingTransaction = {
|
||||
let mut tx_a = Transaction::default();
|
||||
tx_a.gas_price = tx_b.gas_price + 1;
|
||||
tx_a.fake_sign(sender).into()
|
||||
};
|
||||
|
||||
let hash = tx_a.hash();
|
||||
|
||||
txq.import(tx_a).unwrap();
|
||||
txq.import(tx_b).unwrap();
|
||||
|
||||
assert!(txq.transaction(&hash).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn future_transactions() {
|
||||
let sender = Address::default();
|
||||
let mut txq = TransactionQueue::default();
|
||||
|
||||
for i in (0..1).chain(3..10) {
|
||||
let mut tx = Transaction::default();
|
||||
tx.nonce = i.into();
|
||||
|
||||
let tx = tx.fake_sign(sender);
|
||||
|
||||
txq.import(tx.into()).unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(txq.future_transactions(0, 0).len(), 7);
|
||||
assert_eq!(txq.next_nonce(&sender).unwrap(), 1.into());
|
||||
}
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
pub mod request;
|
@ -1,375 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Request chain builder utility.
|
||||
//! Push requests with `push`. Back-references and data required to verify responses must be
|
||||
//! supplied as well.
|
||||
|
||||
use request::{IncompleteRequest, NoSuchOutput, Output, OutputKind, ResponseError, ResponseLike};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
ops::{Deref, DerefMut},
|
||||
};
|
||||
|
||||
/// Build chained requests. Push them onto the series with `push`,
|
||||
/// and produce a `Batch` object with `build`. Outputs are checked for consistency.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Builder<T> {
|
||||
output_kinds: HashMap<(usize, usize), OutputKind>,
|
||||
requests: Vec<T>,
|
||||
}
|
||||
|
||||
impl<T> Default for Builder<T> {
|
||||
fn default() -> Self {
|
||||
Builder {
|
||||
output_kinds: HashMap::new(),
|
||||
requests: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: IncompleteRequest> Builder<T> {
|
||||
/// Attempt to push a request onto the request chain. Fails if the request
|
||||
/// references a non-existent output of a prior request.
|
||||
pub fn push(&mut self, request: T) -> Result<(), NoSuchOutput> {
|
||||
request.check_outputs(|req, idx, kind| match self.output_kinds.get(&(req, idx)) {
|
||||
Some(k) if k == &kind => Ok(()),
|
||||
_ => Err(NoSuchOutput),
|
||||
})?;
|
||||
let req_idx = self.requests.len();
|
||||
request.note_outputs(|idx, kind| {
|
||||
self.output_kinds.insert((req_idx, idx), kind);
|
||||
});
|
||||
self.requests.push(request);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a reference to the output kinds map.
|
||||
pub fn output_kinds(&self) -> &HashMap<(usize, usize), OutputKind> {
|
||||
&self.output_kinds
|
||||
}
|
||||
|
||||
/// Convert this into a "batch" object.
|
||||
pub fn build(self) -> Batch<T> {
|
||||
Batch {
|
||||
outputs: HashMap::new(),
|
||||
requests: self.requests,
|
||||
answered: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Requests pending responses.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Batch<T> {
|
||||
outputs: HashMap<(usize, usize), Output>,
|
||||
requests: Vec<T>,
|
||||
answered: usize,
|
||||
}
|
||||
|
||||
impl<T> Batch<T> {
|
||||
/// Get access to the underlying slice of requests.
|
||||
// TODO: unimplemented -> Vec<Request>, // do we _have to_ allocate?
|
||||
pub fn requests(&self) -> &[T] {
|
||||
&self.requests
|
||||
}
|
||||
|
||||
/// Get the number of answered requests.
|
||||
pub fn num_answered(&self) -> usize {
|
||||
self.answered
|
||||
}
|
||||
|
||||
/// Whether the batch is complete.
|
||||
pub fn is_complete(&self) -> bool {
|
||||
self.answered == self.requests.len()
|
||||
}
|
||||
|
||||
/// Map requests from one type into another.
|
||||
pub fn map_requests<F, U>(self, f: F) -> Batch<U>
|
||||
where
|
||||
F: FnMut(T) -> U,
|
||||
U: IncompleteRequest,
|
||||
{
|
||||
Batch {
|
||||
outputs: self.outputs,
|
||||
requests: self.requests.into_iter().map(f).collect(),
|
||||
answered: self.answered,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: IncompleteRequest + Clone> Batch<T> {
|
||||
/// Get the next request as a filled request. Returns `None` when all requests answered.
|
||||
pub fn next_complete(&self) -> Option<T::Complete> {
|
||||
if self.is_complete() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
self.requests[self.answered]
|
||||
.clone()
|
||||
.complete()
|
||||
.expect("All outputs checked as invariant of `Batch` object; qed"),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Sweep through all unanswered requests, filling them as necessary.
|
||||
pub fn fill_unanswered(&mut self) {
|
||||
let outputs = &mut self.outputs;
|
||||
|
||||
for req in self.requests.iter_mut().skip(self.answered) {
|
||||
req.fill(|req_idx, out_idx| {
|
||||
outputs
|
||||
.get(&(req_idx, out_idx))
|
||||
.cloned()
|
||||
.ok_or(NoSuchOutput)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Supply a response, asserting its correctness.
|
||||
/// Fill outputs based upon it.
|
||||
pub fn supply_response_unchecked<R: ResponseLike>(&mut self, response: &R) {
|
||||
if self.is_complete() {
|
||||
return;
|
||||
}
|
||||
|
||||
let outputs = &mut self.outputs;
|
||||
let idx = self.answered;
|
||||
response.fill_outputs(|out_idx, output| {
|
||||
// we don't need to check output kinds here because all back-references
|
||||
// are validated in the builder.
|
||||
// TODO: optimization for only storing outputs we "care about"?
|
||||
outputs.insert((idx, out_idx), output);
|
||||
});
|
||||
|
||||
self.answered += 1;
|
||||
|
||||
// fill as much of the next request as we can.
|
||||
if let Some(ref mut req) = self.requests.get_mut(self.answered) {
|
||||
req.fill(|req_idx, out_idx| {
|
||||
outputs
|
||||
.get(&(req_idx, out_idx))
|
||||
.cloned()
|
||||
.ok_or(NoSuchOutput)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: super::CheckedRequest + Clone> Batch<T> {
|
||||
/// Supply a response for the next request.
|
||||
/// Fails on: wrong request kind, all requests answered already.
|
||||
pub fn supply_response(
|
||||
&mut self,
|
||||
env: &T::Environment,
|
||||
response: &T::Response,
|
||||
) -> Result<T::Extract, ResponseError<T::Error>> {
|
||||
let idx = self.answered;
|
||||
|
||||
// check validity.
|
||||
if idx == self.requests.len() {
|
||||
return Err(ResponseError::Unexpected);
|
||||
}
|
||||
let completed = self.next_complete().expect(
|
||||
"only fails when all requests have been answered; this just checked against; qed",
|
||||
);
|
||||
|
||||
let extracted = self.requests[idx]
|
||||
.check_response(&completed, env, response)
|
||||
.map_err(ResponseError::Validity)?;
|
||||
|
||||
self.supply_response_unchecked(response);
|
||||
Ok(extracted)
|
||||
}
|
||||
}
|
||||
|
||||
impl Batch<super::Request> {
|
||||
/// For each request, produce a response.
|
||||
/// The responses vector produced goes up to the point where the responder
|
||||
/// first returns `None`, an invalid response, or until all requests have been responded to.
|
||||
pub fn respond_to_all<F>(mut self, responder: F) -> Vec<super::Response>
|
||||
where
|
||||
F: Fn(super::CompleteRequest) -> Option<super::Response>,
|
||||
{
|
||||
let mut responses = Vec::new();
|
||||
|
||||
while let Some(response) = self.next_complete().and_then(&responder) {
|
||||
match self.supply_response(&(), &response) {
|
||||
Ok(()) => responses.push(response),
|
||||
Err(e) => {
|
||||
debug!(target: "pip", "produced bad response to request: {:?}", e);
|
||||
return responses;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
responses
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: IncompleteRequest> Deref for Batch<T> {
|
||||
type Target = [T];
|
||||
|
||||
fn deref(&self) -> &[T] {
|
||||
&self.requests[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: IncompleteRequest> DerefMut for Batch<T> {
|
||||
fn deref_mut(&mut self) -> &mut [T] {
|
||||
&mut self.requests[..]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Builder;
|
||||
use ethereum_types::H256;
|
||||
use request::*;
|
||||
|
||||
#[test]
|
||||
fn all_scalar() {
|
||||
let mut builder = Builder::default();
|
||||
builder
|
||||
.push(Request::HeaderProof(IncompleteHeaderProofRequest {
|
||||
num: 100.into(),
|
||||
}))
|
||||
.unwrap();
|
||||
builder
|
||||
.push(Request::Receipts(IncompleteReceiptsRequest {
|
||||
hash: H256::default().into(),
|
||||
}))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn missing_backref() {
|
||||
let mut builder = Builder::default();
|
||||
builder
|
||||
.push(Request::HeaderProof(IncompleteHeaderProofRequest {
|
||||
num: Field::BackReference(100, 3),
|
||||
}))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn wrong_kind() {
|
||||
let mut builder = Builder::default();
|
||||
assert!(builder
|
||||
.push(Request::HeaderProof(IncompleteHeaderProofRequest {
|
||||
num: 100.into(),
|
||||
}))
|
||||
.is_ok());
|
||||
builder
|
||||
.push(Request::HeaderProof(IncompleteHeaderProofRequest {
|
||||
num: Field::BackReference(0, 0),
|
||||
}))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn good_backreference() {
|
||||
let mut builder = Builder::default();
|
||||
builder
|
||||
.push(Request::HeaderProof(IncompleteHeaderProofRequest {
|
||||
num: 100.into(), // header proof puts hash at output 0.
|
||||
}))
|
||||
.unwrap();
|
||||
builder
|
||||
.push(Request::Receipts(IncompleteReceiptsRequest {
|
||||
hash: Field::BackReference(0, 0),
|
||||
}))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn batch_tx_index_backreference() {
|
||||
let mut builder = Builder::default();
|
||||
builder
|
||||
.push(Request::HeaderProof(IncompleteHeaderProofRequest {
|
||||
num: 100.into(), // header proof puts hash at output 0.
|
||||
}))
|
||||
.unwrap();
|
||||
builder
|
||||
.push(Request::TransactionIndex(
|
||||
IncompleteTransactionIndexRequest {
|
||||
hash: Field::BackReference(0, 0),
|
||||
},
|
||||
))
|
||||
.unwrap();
|
||||
|
||||
let mut batch = builder.build();
|
||||
batch.requests[1].fill(|_req_idx, _out_idx| Ok(Output::Hash(42.into())));
|
||||
|
||||
assert!(batch.next_complete().is_some());
|
||||
batch.answered += 1;
|
||||
assert!(batch.next_complete().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn batch_tx_index_backreference_public_api() {
|
||||
let mut builder = Builder::default();
|
||||
builder
|
||||
.push(Request::HeaderProof(IncompleteHeaderProofRequest {
|
||||
num: 100.into(), // header proof puts hash at output 0.
|
||||
}))
|
||||
.unwrap();
|
||||
builder
|
||||
.push(Request::TransactionIndex(
|
||||
IncompleteTransactionIndexRequest {
|
||||
hash: Field::BackReference(0, 0),
|
||||
},
|
||||
))
|
||||
.unwrap();
|
||||
|
||||
let mut batch = builder.build();
|
||||
|
||||
assert!(batch.next_complete().is_some());
|
||||
let hdr_proof_res = header_proof::Response {
|
||||
proof: vec![],
|
||||
hash: 12.into(),
|
||||
td: 21.into(),
|
||||
};
|
||||
batch.supply_response_unchecked(&hdr_proof_res);
|
||||
|
||||
assert!(batch.next_complete().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn batch_receipts_backreference() {
|
||||
let mut builder = Builder::default();
|
||||
builder
|
||||
.push(Request::HeaderProof(IncompleteHeaderProofRequest {
|
||||
num: 100.into(), // header proof puts hash at output 0.
|
||||
}))
|
||||
.unwrap();
|
||||
builder
|
||||
.push(Request::Receipts(IncompleteReceiptsRequest {
|
||||
hash: Field::BackReference(0, 0),
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let mut batch = builder.build();
|
||||
batch.requests[1].fill(|_req_idx, _out_idx| Ok(Output::Hash(42.into())));
|
||||
|
||||
assert!(batch.next_complete().is_some());
|
||||
batch.answered += 1;
|
||||
assert!(batch.next_complete().is_some());
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -39,747 +39,6 @@
|
||||
"difficulty": "0x20000",
|
||||
"gasLimit": "0x5B8D80"
|
||||
},
|
||||
"hardcodedSync": {
|
||||
"header": "f90231a0ef1d354e1a2c136ff5d3063dd3d116708f56b600ad6f5bbdcb27cb05a30aff9ca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479417d9c712b71987c084ac11046aceeace393c3cefa0e3252978f6779c72c03d5006ca641b4b781dd02ff35f4bb0f58c4c89b8b7f421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000090fffffffffffffffffffffffffffffffe8316f801837a120080845d7b7c1e8c456c656374726f66727569748412b24c06b841619fbfe1b9b43d13e3d972467bbf92ae37df2ec5daada52a82011bac745999d67ed67c1aecceb5e384845bd870b677a69e2a19dfcbbcc1cf1dadb09c63fa536a01",
|
||||
"totalDifficulty": "512220581561117171220611992449499484793650169",
|
||||
"CHTs": [
|
||||
"0x714aee6de838f6c9152fd3d5cbd18314c29a241a894c1cd1fcc9d15972c7f8e6",
|
||||
"0x5c6086d6f5cf679f7374c55f6d4b6a876ec40452cb92fb97b048f7c5a759491c",
|
||||
"0x2cfa5a82758480ead7f60042b398b6077c1b956e5f5ec50ccd86e50c0bee5173",
|
||||
"0xa5cb0364140cb3159e84d816e1accd65e309f5cf1b86eb83d2a10edc99ab1715",
|
||||
"0xcc2abea1aee03a7eb8daa84c274d46aa8e6f72f1bb5cc383f4c5f67fd80b6312",
|
||||
"0xc8d217b113596af546111f4c247b4b3ccd4d84658ddbb98e9b5300f4aaa4a2ab",
|
||||
"0xe3f6fd83773ba536ea882ad4e6d833b3d23fc5cc6a8dcff2f97aff5a7f27bf6c",
|
||||
"0x13b4237edaaa376a23abe8b34de7e32216cc588c48059373b1ccc04cc4ef01b5",
|
||||
"0x28314339da657ea6639c54e0b1c6e351bdcb99093f216205b1b0097d10345749",
|
||||
"0xd1ca7cd047ccda72f7043f0c60e1c99d4e85feb7a0a492228b1d996e1eef5994",
|
||||
"0x189a968994620bfa76674de1dfecfcb39c693939b2a30e0b8449980f7beb4a20",
|
||||
"0xfa9fea734ef0be6e6633e0fd275bf63245000ad43d5869115e480c6a047918a1",
|
||||
"0x691275f9193dcd3c22e381b4ec23d2faff1ed6bc25afe2347734bbaea89d9753",
|
||||
"0x13bda92eb5954ca11986fdd513164a43f3c9c8752dc7bcceadf9d935ad459f39",
|
||||
"0x52083bade8be4b10625c036d4588ce6b3e59bd27da5d663e53b5c5113579d48d",
|
||||
"0xcf689491a7e4126c5377bc7fa6283df1aadaf99ca82687585404352f4f09bd40",
|
||||
"0x3703c8b32f15f6fb2d48a66a297ab6bedabc7f1547c646cd3d6929f3d03619da",
|
||||
"0xef071a6b1ced8b22869dd8ea825889ec500ef7084dfae02ac2c30b0743a25a01",
|
||||
"0x7d4437b4ad2a1ddcdd1dfb0e1bf3dd013ffbb9e30bcaa1cd08dea9b906c4b6cb",
|
||||
"0xa64d95e73467548da0b239f12dbeaafcc6cca33503f376d3da08d69b0c4dbee7",
|
||||
"0x1d0163fe3a0449f62ac45d0b9f06a220fd24df7fff0762784550783172b53f41",
|
||||
"0xe3f3f82f8409c1ef478881ed0806fa46cbf2097faec716c4aab5a979a7b086c5",
|
||||
"0xfe21c6d36ab71c121db0852c014714f1478f81c6d03d326a6f35740682259499",
|
||||
"0x4f907f52d437c5823f20a5a78ecfb5d7d8a67660d963430bc990bf08f666930c",
|
||||
"0x40d3ce300f6cd0dec0a300e4f2e8613ead69a3f434874612ec01517fa07850c2",
|
||||
"0x0b52bd9dbffe0e5e7b82120e5b157639328678999cc5fbe0d8df1ee37628be1c",
|
||||
"0x6f20f4a01fe4461311635a6aecade16769e5183cb7d0c7d1e7920e66d30b8b55",
|
||||
"0xad06ff74d2d496dbfeb4cf5ef2182c4210b9959677758c3bbaafbb4d77bc2294",
|
||||
"0x1cf52c489ab70a7a9349a30bb839e62fa27580fe5ed2138e723e1e19a276f138",
|
||||
"0xa7fa1fdfb4f1c0c3f257756b3fce92631337bf6d728e9f1d8b0c664166f998f1",
|
||||
"0xca15b546d7e8e4e54aa5ff7ab576fb21b9da5f9495049823f6cd16b174014d10",
|
||||
"0xbec49f305f6cfef5c4192006d3211fc03801604cd17ed08f1557717cf8eaf082",
|
||||
"0x12d0863ea2ba7c7f152b494966276252748daf51c78d8e9afb8b1eb242e569f4",
|
||||
"0x7a5bfe9860427cd13209e973bedc59d6b26f2b26aa51f048580eab80570bb5da",
|
||||
"0x89153f7a218d56a5aa7c7eb5d603692f1ae7d33ba4ca71811a9ad22e0cb9a961",
|
||||
"0x3b817286805f105dfbcaa6d6116f55151885632aa5a1e563864a6a752e4ba271",
|
||||
"0xb7e6558399a7abd11a53a1b59aff4b38f2f72ffc341a9835ac504ccbc9974b8a",
|
||||
"0x002fa5217510abb7d1c7665c82536595cdeae9065aaa20b3a8ed96e27f84cc62",
|
||||
"0x7d212d4588f4cc28f0ce414294fb4454ab61ad264e988d0d6c75ecc2d5124216",
|
||||
"0x4a87e03523d9d8865c6c998a83a46c58a5347343aed9522f8324cdd1bfc087e7",
|
||||
"0xc1ad37c002703037d78cc742903951bfbcecd955f31d310317f62568655c1f49",
|
||||
"0xb9369a5db6984bb236495de201a260b34292344cfe189d6f0352c92a2b34e373",
|
||||
"0xcad20cd17dd6b0569d49813592308a7426d7b38d3e6c68030b63ecf323a9733c",
|
||||
"0x29dfa65912ab6c45a61002df9b6c1d5efb19420e1c751c915140e50b0571955b",
|
||||
"0x54d328d8abba4b4235943658648fffb71bbd1f709b869f703981a6daec9d1745",
|
||||
"0x78e10bd72223d3138b668c209ccf3de9bb3104f983b5f8ded0c077543be4e2eb",
|
||||
"0x71db915f9eb5e303d092bc04bbdcd7a92fa18aaef0087e057f7cb74ae0c42be6",
|
||||
"0x5b5fbcb2a155b97fb1c172b9cd8e44230c45ace454d48714aae771b33d1a70f7",
|
||||
"0xbed5561d7dc0d7536d9919c249e9c7311ef56cf1af46cc4e5ce0d1d94719e13e",
|
||||
"0x53bbdeb4f2c27a9582ad5121dc96279ea70b4b04a090819d23c651673eea4dfa",
|
||||
"0x269f7e304c9ef03f70301dd5a62bc4f932e4e7c4d2cf3bff22745a6e00ba6da9",
|
||||
"0xc581e1f328a0db1245f491f590e80fb7cc6ed8516d505a91b9d0179c3a318963",
|
||||
"0x75a4d02143c90eb0b4f99c555a4663487b569f7400a0baccf102885812e11de7",
|
||||
"0x41509e2b972ba8e919e2e36deaaf88d7102f3afc57999a03d2b2019dfff8fea7",
|
||||
"0x98cf6ad904dbb9d3739ee3fd5dfc8b3809697763f7e9a9ef07f770bf4087b6e9",
|
||||
"0x0086e9b6d28908553abf7c69d92679ec513d9a2e6b05d7e58e2a73c7c950ce54",
|
||||
"0xac8d8b99c4def8e8bd19d0103fde949e2d0d3ab1168015c6b51635830185dc72",
|
||||
"0x1deccdf3e7a2de364456cf4ca26d66af4d053353d2cbcecef2901e8e64eb1107",
|
||||
"0xa88c69866b2b2ff58c80c7f4858ec89a94d57ae0b2f034fcc33ff926b24ce119",
|
||||
"0xf6ba8e87bccc4c1ea1ed826aa19b2f9580e6a4b057259287a2115ff5e23789a1",
|
||||
"0xcb5dadc2403df2e173f0edec17a68926bbb5d738c27312fdd0f2a16a7a76849e",
|
||||
"0x8a8724ef233d58277d04030f53923cb993984757cbad84a636b147a662a821cb",
|
||||
"0xc7b8b84d234651d6a1850e06b2ddbe7ce270216d82f399cf48ec9897d036de0a",
|
||||
"0xbc1ad6afbd752fa72ae21f81c5c5210b9dda3071d010db36395ecbd04590b4b0",
|
||||
"0xdd386b46fd7030c485996644eb4b40adc61c666d9cde21df40d553a9595e0984",
|
||||
"0x3434bfe056b388286e5b68b8b7b24d9d4f2873355c344c45e2104568ff55fd08",
|
||||
"0x9e3d11b88f6e5d76bbef918e1d2532a475d2e90a9234cda64518eb3fece2a68d",
|
||||
"0xdbe894b91d6a3cab4418b6bae4f266d7978ca9b12796c5ca664720ef2b9f5d45",
|
||||
"0x0bff7caa357943200d7b5e02a208cc6277c183f307d612c7dee1886988b247e7",
|
||||
"0xa94e5bcefd87dac0cc2bae19afab04b509178165de79aed98562ae88aa990ab4",
|
||||
"0x2548c84be89eb2f05ee222fe52f26f2eff73d1a1d57b90ad23584bfdfc523d6c",
|
||||
"0x0eac1a412389e244a843c733a948b70b72dcc231a8d9e8680b6141760d7d1fbf",
|
||||
"0x1a6378c731f116e421eeec34afc3dee7b42e3233f4c676aff8571fa377d6fa14",
|
||||
"0xf35661f3d06ea16b8d199f4613459653a10402f2247427149b0df49112649bca",
|
||||
"0xb7cf13c95711c7e68f9bd67b2a2c579f10a05e97327c67583bf9516b27c39a53",
|
||||
"0x8b46a5b7df22c44ffeb097c2bd7c9a4f29430d68210acc721688cc1713415bf9",
|
||||
"0xc29b90fb6e12aab1007a3d4e1bd196be97224d8e8f6540b2b1c48efe88db4f6e",
|
||||
"0xc5a1feaddd996c12195c69e84ecfba76df1de87a389b40f3839a77bf26326076",
|
||||
"0x6b6dff3eb189efc0a6c63efaf81fa62d97e2bf117ee0e336f11a4dd41c338b2d",
|
||||
"0x39e7e6c3e6cc5c5c2f6869e0fe9125062be7176a416ca970e9d863fbce67f158",
|
||||
"0x3523c2958628fc823a2ee64460b47a9f407b7838607c238141942b0c0ca466b2",
|
||||
"0xbefecf8563da251300f67ee0eabc25cda56d6477ff748e10fe52f1708706f008",
|
||||
"0x3995668e8c932eaf52d4cd1ba671aaf923a5ebcd3a76ecf6d8da7546a9aae5b2",
|
||||
"0xce966d928535798736714d5302902ef00cf17b82c680dc9b4a3c4076e363a99e",
|
||||
"0x4be8ec22b32879d8a4ce0f5cb080489179dc7f264f584e42a4c825cb4d9123e0",
|
||||
"0xc74698ea33ccf4df96969499975e05ef02643a74a101138c85bf44ecea503870",
|
||||
"0x797328e01ee9e34c81ec02d14bd2344659bf6fbf05c331d6ceb7501773543a06",
|
||||
"0x0980feb796b32259fe1873cdac1a6a8f35d2cb3b5033a133a65ec154b0dc256a",
|
||||
"0xc71e68cd250b975612d0507e4d1f93c3b5e701e1df7c5531becec7f8e7bc7ed1",
|
||||
"0x420e240ec1a8cd42834bcface9583283fd104ffb20d216c804edf5b3c534fc8a",
|
||||
"0xaee71494321dc715b20c984d901b14c382f1b65a4f90f3e78188a86454ea2a79",
|
||||
"0x6abedab72e5762047089fbd7a97765776ffa8e5ec7e697f82729bb5f1a0a6988",
|
||||
"0x1977721d0744ac77e582c462671d6149c49803dc1b9d5178e7a735ef72da8706",
|
||||
"0x96254fa25f162b4d0af2d64fa6e47c886ac9cc81af6c7949c8ee4aa7e67e45a8",
|
||||
"0x558a616c487edea428d39a46165c3a059c8fed9edb1d4fe53d18bdaadb3530f0",
|
||||
"0x3dcfd178eeae5724999bcd99bde031d06fb630703cd62a0a4743ebb179e1d33f",
|
||||
"0xdc79eb70fc635b5fc0b8c4a6fb4ce8b387aebec78df1825ef242c3387cfa7331",
|
||||
"0xd879a0d98753df27fc6c9adf37ed41d2eb8fb8a9d0aa427034659c6e50a7dcd4",
|
||||
"0xcb0ec7547b79371500f9f2e91b835ccacaab21c2aaf81cde48b8de8a0c62cd76",
|
||||
"0xeed5a9d2b91c6c58ca0c03f5d8d80db4a96da230c45878b4b2fa609b5d250498",
|
||||
"0x32b8bca91321e18e117d32e37ba9e6aa8bc1f792ecc44779b6783071044cec18",
|
||||
"0xabf889edea725c99b393a482341e03b82852ac73e27258a708296a3403fbe941",
|
||||
"0xc55939c05471d73be4de2c1fb3f07326b6796f98a91b7bec0e266b87a7fb2815",
|
||||
"0xeb8894c24486afa4a2a31e8dad510376ddb903436263b860ac8991aa7d66d1fd",
|
||||
"0xb105f9b3d63bb3aacbb1101190cf9063d624f0b0399b782d509b0e716df41933",
|
||||
"0xd5b682f34ae722725d14727428a96de156262712ef6ad49563ebc12a75d0c85c",
|
||||
"0x8aa5d6861854def6262e86979f5e96bcd5474d7cd0cb1272f3a099d1c03bb04c",
|
||||
"0x609885f9afc728ec12b427beca31b632eebfad78f30ccd64282a4f9f1593509c",
|
||||
"0xce2dd1bbdb09ae148f2b6d17636788b27ab7afa2b5b55c437255dea28977e0f2",
|
||||
"0xb71191d5083196c16481eea99b8b1e8f8d59dcc68a2f7ee908efd5f4c57b9e41",
|
||||
"0x7239d211d9e97227e77883bc9c31b7762b1f811c7cca948ff8f7b5753bc7559e",
|
||||
"0x7ef67af5683952e43427227c2f4fc8142c6154d60241b88bbcbcafdffda6ecaa",
|
||||
"0x1ff6023ee784b370ee7a0f64b29958be3fefd8df5c1ba1cfa3e6902061bffbd8",
|
||||
"0xb5cffe1c85d4a096beee99b3023c8b0d611c02d525f8fc28bae13ecdec33298d",
|
||||
"0xcd30a3426ffe2a11178e66031b141599aeba0e8ee9cb4112a599b64435a03af5",
|
||||
"0xce40b45db8860ce0932f2de0bd9fada52b42dee0af04522800039d345ef11a68",
|
||||
"0xa8a34d51cda56c627e9b5393121ae1f6fd00167fbb29074d249f22a9569827bb",
|
||||
"0xe988982bc9adfb197bb73d0658532936478a70a1cb3eb3ce887022ad6140d296",
|
||||
"0x3507f47e984170b96ea9a1ca6c5aa1f7a3001b65a6ac377b432126395ded2590",
|
||||
"0xcbd052896ec1093b29db69924583f980b2607b47c02629f33a87854dff2ebcb8",
|
||||
"0xd8bc082d23f34a737c2ff29dbb4a00cd1e3561e6208e622ca3e2ce0e94a0d195",
|
||||
"0x5d70fc0230d0f98cdd9ece68eecdd10a138e969bfc1a75d8d05a25fe36e7d9a6",
|
||||
"0xc288e634c5d346c78d694a6baaf9bee375ae953ddd2f16a24e4869952b66f27a",
|
||||
"0x3803c0cf512ad51eb8b51e9fe2eac0be9d41113d3fc0575b6f255f8637a3e8bc",
|
||||
"0x0c6c1bdafa2b072d96ee0af733e2f52fb275fef79b25f64dd8ef40723f1a6c9a",
|
||||
"0x485a57ed84df32298477c631079691d4906d36bfa881aa4a62915931a21e37e9",
|
||||
"0xf6eb84d895581d563c393daaf6ea5d82f0a0a4e50b5b4e079b4d70e3ba5bdb44",
|
||||
"0xe793e01ef0d8058aba5fb4a85f2e6c202a71edd641e323b6f6ba42783ce93898",
|
||||
"0x652d4b932a634f32a60b27f7c59ab26b35ca296a0dbc5484e602cb134d88a3ef",
|
||||
"0xf5196d9bf6965d2184a5381c9b71ae005da474806c6926e9c5e3292863e8bede",
|
||||
"0x0d46341310ee13f4cc693588c0edf899aaea3ae5abda6ffb57415b6842e651f6",
|
||||
"0x969d3c7dda1caedcb15d7e144511a6e5ed38614c64414ba74a7529bd69ce5cf6",
|
||||
"0xff9e73f4eb9e19afec1ef71cbcd321fa6e6bfe9dc9f3fa2fce45ae2b899af998",
|
||||
"0xbe4db0c9bd7fd8cbfb00b3d146ea965258b9de545004f430dc2d7526fe10a910",
|
||||
"0x9e2529bfdf7052200c855686a046c3be25b93654e56fd5514311929d6bbbe52b",
|
||||
"0xb900fceac0f3990db4a6aae238271732632402aa838052ac0ad223b218a82b0b",
|
||||
"0xdfd7f34fbccf7151549876d96ee0a85f77cd9ae41f514ace7218ad526ed59b40",
|
||||
"0x68ebfe7ed1b187ceb7382780f142084c91d2254761a1a044609e6325b8a5323d",
|
||||
"0xd04cb8343833c53e4820e60b08573d6d4fb38456b3f2116e29858af483cde125",
|
||||
"0x69fd9199d578dc51ab7cb55981e3748d166b447764667ea10d70e9cdf7e2ce96",
|
||||
"0x25f63dec762f9dcc0f8f46357a7c2cebd5988b1a30d66ca71a2a2103d4b4e9fa",
|
||||
"0x01da2a253712a9d0116dbffe089fc7377d18671b3cf6560ee90c8a1749379605",
|
||||
"0xee62ef05acc1646e48b6aa2822b54e7e5421f3a22f5abbb9f067c30aea8bb192",
|
||||
"0x81c8c6a098061ec62e12a87f5702ed7bdcc5eae8450765f77d1a79354b9f2c6a",
|
||||
"0x95740b63373660ce0a05430bb99da911e63dbce008c4b8d3c51253416f23fcf0",
|
||||
"0xf18496c657560a475910e2a0bbe4be5df0ac56b25cb04d5b98e028175f49a6a8",
|
||||
"0xaecd72638133e3f05497c3d8d505dff33a6b6f6d309af35a67c6ac10fe15cea8",
|
||||
"0xed73a92a9518e5a5e583a750b5d6422c5acf082d9aa671bdf9e4a060fd95c72a",
|
||||
"0x7f135f642bd31c393281320de0dc30975c17a21c51c4e30d7c42fc3fad5242de",
|
||||
"0x0a788be501d80914aa2fd995cead69c17fe04bcaf5db86da2bc4c868513a99b9",
|
||||
"0x538e66a64ff22af3d7b6db368a706e9d981df8a4edb2dfb705e72323aff7e5f3",
|
||||
"0x50c90a4c8affb981ebb9e88d695050a2b558e340d333822f7373b66cad53b973",
|
||||
"0xa090d0ee33cd0d8428132b6d87005dd292b7e28be87f553b4e52566f976e6824",
|
||||
"0xc13b178eeaf966dbbb3d717a163ea003fe99479903e8cb78b53d1e4d240feac4",
|
||||
"0x9ba9806d4a0b20cdd8b69d4d0197bbe7a42c1cb46a88bd012755fbbd785be857",
|
||||
"0xb836710b211df481d99b033a9c0d6e09b34fad6f2e8e4931dd1a054dc03bad57",
|
||||
"0x4792e8b038b6dbbf32a228c292a5f4fb639a0f84f8f70004716c5a47331f4c41",
|
||||
"0xcafa97e065f36a436f0e4e255aef9d04b57c6b12d50e42e99b4f25d148e9d3df",
|
||||
"0xe9e2934d6f8841d486db3533ec6421fa0952ba8f3f9020ff9b5e4fbbd8af817d",
|
||||
"0xe096be0f831214ce1007d352236d2df186cf738b9b3992c0d0943f1b6a76e917",
|
||||
"0x564e83fff721a1e316572dc507cfbb41ff8dc306436ab26b315d23b8f002e390",
|
||||
"0xcf3a00d76c6a4645ae555a96e0bb6f0e441baebea9415db5304d6d8639c03bc9",
|
||||
"0x303013d9ca65b0473921f83a18e5cf38f882e715022554066b72cbc3368056e3",
|
||||
"0x8e47fb7ff881e845958ae6dea28ed9bc3827c7b296ca1c151b7f24b72174bbeb",
|
||||
"0xa87d1ea0d2853c966db0073ff09c85a1ff28117f770239b11369eab1e1f31454",
|
||||
"0xa7d484e51d3c4ca27993f21d5a488ba34aac2ebbd6005ba0ed8f125bc9c9139b",
|
||||
"0xe2811f00e0d3f86193e59c100d7a12ec43f94c6c2228f3b95c5838244abc6406",
|
||||
"0x5e5e04bbf350160da90977ddd5620afb3793f57c1b81306920622ccf400d6e9b",
|
||||
"0xd8c7693ca7de07e9a09e770f93775a7fc3acc3db6ef33e0d84c38ef6d2ca8108",
|
||||
"0xfdc05ae6bf5d112dce90ca725ba52d85636c4c193a7a65d9b6f6efb7ef6a16e5",
|
||||
"0x07abd5fd4ffd9fe50a59f0f32bebf2d2d6ec59476efc152b3bb907ae10632ba8",
|
||||
"0xb6afb22be6200ee06100434548ea3b0b378b00f5282c377f24b04c3d014fb552",
|
||||
"0x199d2c213ee2277dcdf24af1da8f6340dce3b2b9ec800f1a31a2d53e7429ba08",
|
||||
"0xa0c0c4e4398e7d8b7e4cca3fa16384c4d911e24db2b3f1702d9a23fa7bd67fc6",
|
||||
"0x953df624be719579e3f63754b521ff66a5d92c809e3f809e45b0ddd4f1d00227",
|
||||
"0x89e863130cf4a03ded7581c5879fe3f4ac10953b042c4327e2683941ce234f1d",
|
||||
"0xbcd8e60a0e0b747b15f43b1b3172395d73be4514bab8c995dc61b5853507f45a",
|
||||
"0x8efe4df87171eafc2f5516f9bd9c6dec560fd86184f436a3fa9367947ba6acc7",
|
||||
"0xe78b429f1cb75f18c164d32a394e7490d6ef490f7ec26767506e1531971a2d8e",
|
||||
"0xa85bdc71975dc7a6f91ff1c71926adced0293139f42d3dfae448b04e194afa39",
|
||||
"0xb2850db8790941c5b4728e90c60c5761f309ae78a42b7a32f7b6b414417130fa",
|
||||
"0x45a1698db7c1793b66031dc6b9bdce75e9db03cc6d5068c98a1d39e5c3353959",
|
||||
"0xda78cb4f7dc73eb3da39b7841373f858e7a7f8983637d3d41337f07e669d5611",
|
||||
"0x4e7b03eaf17a665906340bfc5d3765c1981b3ad174af2bb936e688520cc7507e",
|
||||
"0x9d1fc1af5a264d076cc49b46d88857c733929ed63d3a9d098f9d02445ce137c8",
|
||||
"0xa58779d88ff3e4f15f9cc8f3aa4e31f29010259bfebb358772a032314248b9a1",
|
||||
"0x85eb21a68a7c544090dde6166e19f18ad527af568cdd662c84098f3e97b6f406",
|
||||
"0xb95a7f4891bb90cf61b5cdc8aaf30440f901bd68caeae2a3246223c1f145cf4d",
|
||||
"0xd26f9c16d45ae57ef27e31bf107baeca391a51a0c2ce5634e53115e45e8e7a3e",
|
||||
"0x34692e04bf78c426ca367759a213bd782e47e2e6842a8f39f62a1030aca31775",
|
||||
"0xd5dc5c4d619d78682034eafb73ca435db6ab53583a7692848dee27820e819f75",
|
||||
"0xa23bda165dd7284398b7817fbf85d3eeba7e7330bec4adde2069e224e17dd2de",
|
||||
"0x4fdc06315460f1e83fe2e92c3d60acd5d7a5bc96066fc8785409e81a1ecf9193",
|
||||
"0xbf4690969f21951ae06fc3828d40fd4a965682047f0bb6f847d080ca7f2fbe05",
|
||||
"0xba4f99d6bafa70a9f08fb41e0c3d7a2518a360dbd2ec98893443fbb256174a70",
|
||||
"0x9366600b95717d1c4840a98fb3c2db95b71374294144341d280f32ec4021c8ce",
|
||||
"0x8caabf9d8a063f2fb18912ae4b2d1be719975cd03b543b79863ae0396c0ae625",
|
||||
"0xf0ddf8b613a1059b97141affb960bec51c331741bb19f85a920adb9b7999123a",
|
||||
"0x81743e8fe0f7c4a2141dd9ab1eec8522a6139df847c9430acbd84df0ade0a9f8",
|
||||
"0x00f7006ec05ff418048bc59526643321dc05936b3c549fa8136b43fed038320c",
|
||||
"0x8cb3a50c86abb41376cc01cc22691a8801a67690ba23b88760c8495ebc9f855f",
|
||||
"0x2279d287f8233c0cff7e31d287d417c1d4721e1c28c3c8ccbfa06610b417a0dc",
|
||||
"0x6681955ad06c3bf7172c69417505f7b8eaa17fa861fe5d8e6be61d53d9774979",
|
||||
"0x7130927ed3a412791f0dd02b874b68b3a393c1ac443718eac7afd4b1f5f3367c",
|
||||
"0x82ddf79f4eaab03c72feec7fcb49d0532ae68dc4dcdb995a06e79efd1875a97a",
|
||||
"0xe99ca08624ac8fabc9cb11f01bd2968ba705f87724ec4c867107c3e82b72c648",
|
||||
"0x0349cc1960fb5f891d9c780fc78f44f4af7a2f06e409cbfae5241f754585b873",
|
||||
"0xc31265d24c7bef15f05303658ab150bb8848e63ef6e5bcd6615ffc59eab4c024",
|
||||
"0x6d6268a5f5e24edc4af8d01a0ffbb406c2a0df0e940bf2042704627e7e3f1eb4",
|
||||
"0xefdd6251b7ebe415aec8cbd03314fe387b9fdaaf910fa6d21bd9d264877a253a",
|
||||
"0x2b77e4b8daeb43198d35aa7787e956aa69ec457129113f2e6fd93d14b1d8be59",
|
||||
"0x243fbbc8073d636741793c42f05d14dd27ce4ea263aeed9c1d7a1960a8215569",
|
||||
"0x63db3b3f6d028b7f46af35e5036e1097b31157a24959745eabbf53e5025023fc",
|
||||
"0x546800bdad232ebcd15ca4a15e74e0bad9c1f0a8488797095900273932280d0c",
|
||||
"0x85ab077a505c4e0112ddffc52fca1a08d8868b740271d474ff6e5211a451c24d",
|
||||
"0x8dce8e8789e21e096136fbca59e79511f63cba3d3f91fbd8dd5584a6837e4d5d",
|
||||
"0x1ba5d8a59539a2dd41cdcf40af0e4e0fe41d0b840a6cb2e388efac0db838341c",
|
||||
"0xd9503acc8efb33e700c4c70878b46a928e1f61de25fe6e93586cb7a34ab26c0c",
|
||||
"0xf5b2ec428fea0fca37cb44237e060b472acd11205fc5825b0899235f9950ad9f",
|
||||
"0x5099403d49c4be182a0838de68324dd50d7c457f84157417adcbaa0f92ddf64c",
|
||||
"0x40f0ceb3ae983ef73b79b1c3e93d1a741bd2620c50f1f6ee1bdba8b1c8f076c7",
|
||||
"0xbc4040b4712fa08cffb7868f0b41a7f0f8da7d591da8896f467028437979850c",
|
||||
"0x39fe703d511c7b9e8527e44d2f17ae7bc01da4845a4ad877acbc14233c1e7280",
|
||||
"0xada7144fba64caed4cee909d14a306925afef2d240370ec85d0d5dcfa43c18c9",
|
||||
"0x3862b40aa1f8222f424a6d6fa45348f5e7d572701be890586a6b9087a977ed5e",
|
||||
"0xbae5129ae5f2cc91c929dd4b716d1736b03af127e633f7a9cb57a2b045fc9f6c",
|
||||
"0x2a6cad932ac68fc5687871ee322563cbcb6b3eb978581de10737165b015dfe8d",
|
||||
"0x8e1e6db9c2260c262481896cf22698acab58c529195100a4332fa0aa08385d84",
|
||||
"0x1f38a384237fcee079b971aa71c97f36ecfce1eeb30c9bdda4ecdeab926072ed",
|
||||
"0x2b1c080df5f7725c4c69281dcc9612b142fd9532ae060f5cfba1bdce71854188",
|
||||
"0x7aa78f311a85f28a7c2363bb6b5af105e33f0e5c96af83d4e34bc7ffd5bc930e",
|
||||
"0xb923a1dbe5611771afa4401e13cb01b10bcc95aee1502590bd2b32f0fe7cd5e7",
|
||||
"0x13d1db98b9811e2282b49c46c5b6289eda7c2d46ee89b354f878dd7b3a234f3f",
|
||||
"0x945f821ffb226364cef42e509d4f2af0770ac0d00ca21de639eed0cd4876b77b",
|
||||
"0xafcf8c8871f48d13cad81dfe3e180d870ca0f4795fa178a3eaac6abead58a4d3",
|
||||
"0x87851815137e4f8dc33997639c17ac1b9b687ca34747d3d58f34b47ff220cad7",
|
||||
"0x0bf7f94232ecb6a86e03ec4fe8273e6c6125bb7ad1433fcf217eb53b0c2e5c92",
|
||||
"0x471f021954e1657ea61acd5f265eddce0dea0c5c731a3ae144d46251d15fc7d8",
|
||||
"0x5b08773ce3fbb2670564d4bc49c9d2e5e5dfcf7d76f33da3aecdb3acf7a33e54",
|
||||
"0x7f6f25f1cba164401bd9e59e9d976263b6ea1efffaec863316291f5725a6e705",
|
||||
"0x42cbd8b2b4c7889c9d5449faedcc7249011332be9010795008cec15666aa9d03",
|
||||
"0x54fea9d9610cbd23d8013bceb95e4b453b45dac5946e714141fa6675e1a79854",
|
||||
"0xd110cc1e0ae11a1ba964923c32233fdd07fd5d258c81e420aa53ad2e6f637e28",
|
||||
"0x1f4547eb2058d3b0beacee9e6d7200cbb5042ba0b33cf3f6522c71b83cda22ff",
|
||||
"0x0e5b8aa76ed29a6d150b848bbe151c442a7de6c782fa1f5ec9d6b60155e5f1d7",
|
||||
"0x665989eebcd06908b02db1b022d20047209d38f8840e88bef92eeccc9dedd08b",
|
||||
"0x6a6ddd746ed465282651edaa1701999ea9b5b2db6c37b37afe0b1f91a03bae3d",
|
||||
"0x9cde4996bc407146502c950727300683474176dab6751f981b975f20d6a31c10",
|
||||
"0x07eaf4e65e9961e5b2072e2abaaa3dbe802c1cdc48b9dfedd52843829a54cbc8",
|
||||
"0xe5293358f5c362425bb44d83e79d2d990dfeb7a6495b43a43ce3bff018f74ada",
|
||||
"0xf1b1852a310ac269d78f2eaade9aca6717ca19b2c48bbf9c8077358333cfc463",
|
||||
"0xb57181c61f5a692ee46c19f64c3396bdf448a6b0ab1463f59cf68698bd10bba5",
|
||||
"0x83fac07398c500fa021f6b79e55279bf49c8c1e0e96f79986e32589d2a7adaeb",
|
||||
"0xed88db70b42f76c1452e9b410e2584059cb8fcbc95b4480135333ed6c4916b41",
|
||||
"0x5b296fb879b3fae9103a3e91ed2f7fb74de9e9c5070675354a532ee6edb119bf",
|
||||
"0x62f5be2297ab556625e80fd9b03291d8fd83e74af992e3a07d0961b185b5b880",
|
||||
"0x5e108d7fbc6d767c7e27b72cbe10b642ffc9ed80c2e5b9ca219eaef68f021325",
|
||||
"0x61e77b16f530de4308d2060f35899a9e95ffd73d8e524ef43b80460a3d8ed95d",
|
||||
"0x86cd27368a871f4c06eaee0e2a141b78c6dc00a5531a3495a12a421546bc034a",
|
||||
"0xdae627010cb7e75f0b6cbf1fba4f3cf15e59697ce2a865ba8b69cb64405cbe87",
|
||||
"0xb8673ef29f76daeb7fdba5b75908c6ed83fd96803b20879c449d1fd9af41b070",
|
||||
"0x08791b77ff600cac68326d00d94933cc853b69359a40fe4e4306fa50f3db64b2",
|
||||
"0x6111f5356a918b94fa8dfde7fdd1bbae9ce941ea729b15d9ccd86274556767a0",
|
||||
"0x4b5f974e1b95e0e2c87dbf21c138bd4beb25deb362350a5085712a931811a8cf",
|
||||
"0xc72c316113911b282b801f7ddb6e870f8e5edf711bdce06382b1430248732875",
|
||||
"0xf70ea2f05532bed9884c81a1a03855029f22b5066cd97b20cbfbff7b9b68b188",
|
||||
"0x9d8a61ab53799cc292bff6879a85912b54bd78614a73c2cd0fdfdcccd2c208d9",
|
||||
"0x56511866e3e3689d00d21b34c7ea471fd1096e8f4b2fb5fb0a6f15095d956c7e",
|
||||
"0x4e28cfcbf428bbad414a60324ed466352f5d5b41b044d8df1a46e124cfdf4a3a",
|
||||
"0x6591fd1a46890b0de4c927ca07ea27bb4a120a3637c7cb1a70a4c1ea6f42c7ed",
|
||||
"0x2e009aa82f01adec2287cc28f22d6368c26d28173d42a5c526e6775f7eb33928",
|
||||
"0xc7363139b3475fa4a0f6b0dbb86f70660486067ae23eb3406245fb445e7981d1",
|
||||
"0x6176bef2af27b05ccc2136bab019e9ac25e8b8fd827fc4f19c14741d63d5e76c",
|
||||
"0xa7c5120a9ddd815b52809d4ec16b73775133413751588f10e707bad274f4cf31",
|
||||
"0x5eab0a1f651856d2c35ecc6f6b33ad285296021f33991f92e91d018eefa70ad0",
|
||||
"0xda2c82132749852c24f2857a75da70a12472d442b6cb7e5ac551ef1646514301",
|
||||
"0x0d49f6c38c25987be6b219ee2d06092790555449190ed95d0f362a0a633f0450",
|
||||
"0x1099bdca056b138051f5fa67352fc972f9fb12f9544f1ab2250c46d8f8597c5b",
|
||||
"0x51509e7be3765fc768ec8d53030e6fb23047ae7047819215a6ea5b79f3c96122",
|
||||
"0x7c9e5291c0d56c49bbd1ef19057b196b044cb1d05ccb70fc287793750c2a82ee",
|
||||
"0x41397b9513e3a4196d6bb00af809065ad5803e4a3384251b8b733ac820b84241",
|
||||
"0xed905526fe31e9fc9879b873afc18c0175c315f282ccac73c1e6d13e0daa0c16",
|
||||
"0x075da3d126c26dcf1f929b3f53561828a6a09f7386e3b169e402d28bd8a0cc2d",
|
||||
"0x67d54555df755a4417831e623e3d55eee0481eb5e8984faccc45aa0c1255f2b4",
|
||||
"0xc6b00466e7b14951514d52f62f56b47c016a76c3e4173a47a933d69d12a7188a",
|
||||
"0x434f9b5e8250b953ef7747d1cb7092f8a911f2ed627095515899a5342b83e767",
|
||||
"0xb99273603a1c4b2ea6aa1efd51e66dfece99982f11812f6b6cacb028f43f9590",
|
||||
"0x7a8d8d26cef67c294acc07588626d87969544475f26e771190a278efa59ccd82",
|
||||
"0x48a7f38cffc5ab62ec2ea284ad9f42f10b0c63ed5561818afad444942bca0895",
|
||||
"0xbb3c61686b7572906fb600991a401ccfa2b72355412716eabf1f6c3a3f806bac",
|
||||
"0x69ee9f4b988e650981f7067a22301df3c448c34afc28433b00e27d006b1c712a",
|
||||
"0x9120d910befcd77d6b104c703f7e3569434eb53ae773dc85b40f0e441cc1f636",
|
||||
"0xa22dccc8cf91a63b9b331e1f7c9b25c0169300ba4306f9de50e2b374fac789a5",
|
||||
"0x2c08520e550b1a695b567df1e1f9436ceee1b9c7d09df2c8d84232a1907e40ba",
|
||||
"0xccc561b243b2cc3f1d79356ca709afbe95982e25478fe9046736b092ac99a21f",
|
||||
"0x60a293cf021470cb053f5e27e9ffe43b4819755c3d0ddf418a556c033abdc4ca",
|
||||
"0x7f8a97b56a6b66cf229c8a107f41cdbabc7d99f86f3aa175560eeaa13e062768",
|
||||
"0xc6cbc4fe07defb00e5498a9c3e45909325ec1d5abe5983cb2f1d1887bf20960d",
|
||||
"0x70425ee97b08771476a8136e35ac1bce6fde67a96c242d480d17e5a4a90e92bf",
|
||||
"0xebe932630aca0dc961cb442d9746ad098c721eabe197960b847e867a212baa8a",
|
||||
"0xb0eb66bfc56500b13bb50ff9c39eca1c00539115cc19e1cd4f5fea8203ace35d",
|
||||
"0xb85cffbd649d8e197230ba7372b2c59be0380740b06d75052e45021c7512345e",
|
||||
"0xd1c7606ba6e7beb2bb0aed775e4a40bcff418178ed4e2ae9b9281ea5d4c89a99",
|
||||
"0x250454e85ab809adaa6eb26c064dd560f0fd55da9a5057043724cd937aa0a442",
|
||||
"0x4f73342c8a55a77c3b67402d5c58e77289e432d04c4ab0e94afbb96bab22262b",
|
||||
"0xa71e103396dc63a998fbf2712980341b7da6a47df39e68130ea8abcbf0d997da",
|
||||
"0xbc3dd28fc3a3ddd169cd0d1d058e9d43cf03c258e89fd1ce2ba66a6b18c86591",
|
||||
"0xe50edb767f9492393223ab088b4583771b5995187ce146e205287e06f664d30a",
|
||||
"0x55892af1bb7d980edd5c22930c2a9cace26e74b09b0bf393848592e57a290ff2",
|
||||
"0xe62e476351a206e25f067f4d80a5b528a228f5dea6c103dd4067339699f10743",
|
||||
"0xc9125964a34760943e51a561cc5e104be4755628995603c989d638c57dedca28",
|
||||
"0x7ee5d6bc020739ecb3a405c9e684ae902d6e266492a0e33ac1b42d80452dc3e9",
|
||||
"0x38030f326e2607f193c1cf8d9c9780b3867b10e04f8057ea682fb28410bd32df",
|
||||
"0x9d47941c403ceb4209fb51ce3c1ad8b89819f2bacfb9990647f8568984a75b06",
|
||||
"0x24497cb73e87b012a7363218d1a583b3e081739a302bc040e1c9907656f6c2a1",
|
||||
"0x5e3e695530ea10576581ab7213466f284d2e377f607e7c59553611fa625c0800",
|
||||
"0x9534cf9516382d223daa23fc333085704aec15e4b62a6d9fda17c481dfbb2c18",
|
||||
"0xfc04952efe456ebd951406046fd35c237d6873d98025cdaca976db02ebbeec2a",
|
||||
"0xec7c47cf69040ba41f49b76eeb79bac1676e206e85ca9b006d873da582312932",
|
||||
"0x2a58960a118d1a76e34f46d7e6eea90e288ec959ef16e675606c37167c64d927",
|
||||
"0xfd21855fe75953305a7c8e19cd2d18dc31468609383d8fef3c611d1dca70cc5a",
|
||||
"0x7c36b9b06490dba54e9c3d8116dd37200bf67e2d3b99e7b5f4b1badbf65f451d",
|
||||
"0x58b631c11b2e4a81b8648dbc1c72144c8a15ab3b1f906c4b5533ac1e940cb0b0",
|
||||
"0xce69899317ef20e930429d323827224372cfc7e29379c60cd24c722450951d02",
|
||||
"0x0ae86d2b84581cd8d9592b296a60a60f7cd394719f4fea13ee9ddaadacd0f9ed",
|
||||
"0x6c6a0784c5a2d0c3c0506f69e055a02a1febf3f2dded61c2f26ca7740a6fd9db",
|
||||
"0x3bbe5e8a57741dc74eabc1a231f4722e63c4578c694a5d52ef1f024e1b344bf7",
|
||||
"0xfab03c10df60220e5fd1d6c3918606bfb90d4bc498af48b747fa217aac63a2eb",
|
||||
"0xf490a009f14a88780da39859b79aa0c7f8655fbb3261b422f7b8a3ce21aaa14c",
|
||||
"0x2153ae46fd41c837373748a60d5452f1c4cb0585f2eb6f1f329566b2000c6679",
|
||||
"0xc91e6a8999eb99fa7e86d4b661e2eb15c306af8a04a14090e915cd1821520142",
|
||||
"0x1b1d4da071b219b824052198c0cd5685b59a3b9bcdedcda978e28f14579532e2",
|
||||
"0x5276badc31ceed3b3e596a7595024995e51d091e4fa58f6c405a45e9aed75f91",
|
||||
"0xef571de5046724ac3584a10243910d2d25263dc3667b5bccccdbad5021e4c502",
|
||||
"0x17e31638e5e973e28baa4799a920a9ed5921326cc7da05ffbcfc10b2fde376cf",
|
||||
"0x935aa2b075335f41085f33dcda503a0437740d929c3824afc4cde5f1824b77e3",
|
||||
"0x43ba24acef98bc089dae5b11869b9be3c7bcf0946ece14d5e196f1f36cb7cc62",
|
||||
"0x18fc58f6b87d7fcb01a9318df66d7e986b77d7a9d46905767a69026a3d09c550",
|
||||
"0x91e5d60399482b861a8ed1d9141cdfa7d11445d60ac1a21c28038be23772ac13",
|
||||
"0x3d9b40dadf84ba03636d18737ce3b577b9e9675f595561938b0fe8d07b42b52e",
|
||||
"0x145b9b2e5f21b588b0496b467cc2dcf8add6ad8fbfb7a9c9f3e2a04caffa3d7d",
|
||||
"0xb4a2d0a642e659ce1c3b7b5d215865c08e50c7b33f0a6244329e5a2e53eabc37",
|
||||
"0x492509b785ce0bf040b59ba9b50f57757090293d840b78e48014e64f92fc4a91",
|
||||
"0x533ba967170734cca134585f7acfc48f5dd8457c2c051513555490a2cca5f5a1",
|
||||
"0xd344b4934df76d3fcbeb0343a5c6e61f660a6e3469adcca9906828623faa9c9c",
|
||||
"0x63a3f3e7a006e04647234ef43b40d33e03da66179c9165a40cc0e91b6fe29ca0",
|
||||
"0x4be159b2914753375587a3edf94a90964fdd9e89c1592998acb1123d751d0a70",
|
||||
"0x0f6d760899b6b4d7db6fab9aa3186361a03680c92de8f9f95bcff1f82c14d5e1",
|
||||
"0x856725ad7e808334d52a234a0b13b9746c6a0219b114cbcd15d3542241340608",
|
||||
"0xa60eff01b179e0895e2eed8a388f3393ae564a166d1be4dfa4b9bab253fdd008",
|
||||
"0xa8ea401e7ead4eb084eecedb91d5e5ee547536bc15cb9b801b25f8635216ae90",
|
||||
"0xcc77a0d7cc348b9bb61935397ae38dd6d6dee596e0f83ee95969128a12350b89",
|
||||
"0x15d7e3e400ebd2828a6ccc97ed2c069c08458ccad8f520c8bb78fa400eda6a1f",
|
||||
"0x477b93b3f6677a548b985fc1d00d8c72afe7bc48246865fc3ce5bcbe5502246b",
|
||||
"0x691516a44309895798db0c5d940f9dc43ea548a3acc986f1bed8fc2d203923a2",
|
||||
"0xe2a60192e1327bfc29f48179752441413c0bcc401d3faec66ff8acb69f1b7dc0",
|
||||
"0x788c0d110678f201199f5789083d78e0fadb0b93afd2e08e670d1de09d0c49d3",
|
||||
"0xb93078ba88b186b277f3174ad85499383ff9fd56db0e4d95ef350c9573f94e19",
|
||||
"0xdcc3eb77af8786d5064f3d15ab184de056c08b28c7a5bf90d604f080f0db03d3",
|
||||
"0x8c39ecb82bdc96c995e5926b15e63019f011ece2b35322adc7eb8fe90df95410",
|
||||
"0x53ce5a4e4f689b47a0a7c1b72540fc87272c577503388cf05656c8e7a5aac5ca",
|
||||
"0x667dadf6d3ffee26f04850f0dc2db09f88d4e9df28bcfce4fdbfd6c5666c96e8",
|
||||
"0x8268ef73279798fb3b798609b15d8b57874b247efce7c33966aaff68538af0ef",
|
||||
"0x493b30f1378e1768056a4082f1694385893c6a4011f03101a81b8377eb27d5d6",
|
||||
"0xc444d87ee2af7e250642421770ae6d1d6766863520a47147bbb609cf48c6724a",
|
||||
"0x1408073558d4ec6d97cf1cc3c0f0cacee31283f105300578e3a27d8ab8377bfb",
|
||||
"0xd72bdf26e22a93a668867753d35868539c011d5bf1a381d36341a676571c1f73",
|
||||
"0x1080a36ac0be6476902b0aedb832b15e1eb4997a367d18415ba590f426e3c716",
|
||||
"0xd1cd609b0ed33ba8881b0d83ad46fb9dcea0d6de5b6a7924dc318cf0455f540b",
|
||||
"0xf0267e1ef1cd4d8e373f662fa3cfda5520512bd33b406726bd3a69a39d24b5a5",
|
||||
"0xe1458e86aca39813bca0c26599296d59f8c651ea67e627d124f2b7c5a1483e46",
|
||||
"0xed9c3973a503ccfb9d1fe3791a741af77a95364d0e6559d816e880391d74d4d1",
|
||||
"0xc0089751f9992f792345e7c555f13615e621b6fc802cdd61e08d676fda864c8e",
|
||||
"0x8ab8308a652ecf00de7c867fcd2eaacf84f2aac65adf13515af3fe3ee7a69be0",
|
||||
"0x3081ecee0be32a0b842d93d629adb79fad174fc9c7f6613346c0ee44ffca901e",
|
||||
"0xe254a689eafd19c6686dcd89a06b1abb9001499aa1f03414387fbfe2b19f5a4d",
|
||||
"0xa92bd85cf194870d1aa15738db60b6a30ca6e426e099351560a8ce5d3b13a408",
|
||||
"0xa2f57cf261aab3cfb0928fa160be10a9c257bf2a8f1e51103d4e5ab9f9ba26f0",
|
||||
"0x4cf15ba1d4ce5e51025a2744c7355001e6c20ab2e1b537bbf26f120726df2bb4",
|
||||
"0xf70f4f87a77b54faab70f1b0cbbdf3debc966392890b919e1c0509ac1c9b8ee7",
|
||||
"0x28986d5a5ddf44af8fbe8a68b8656ea5dcbb36551785c6cd5121a17ca33b608f",
|
||||
"0xa859cd6f693e4065161d56cb22cd0d6d5d956c420b956b85f9c0a6b514b81239",
|
||||
"0x3142bd457bdef3d22c0ddc07126fa2797dc66f5a6f5cb47f6856e1fae2acd5f5",
|
||||
"0x13240195f63e790c2a170e0d80fa02eb3cf34dcf201b1e7a02865f70e50ec2c2",
|
||||
"0xe224513d0d30dcb59244c59cdbe0a8e6e7693437aecb3dd1a6355835699b63d8",
|
||||
"0x7b219fddafc455ef34378dd31249ceb4cb23fc848f7427f3128b91abc3101a3d",
|
||||
"0xd4e7b7f004d95074b19c9d18ac954427de24207b3d9dbb7a06581c9cdd4d2366",
|
||||
"0xc7db291a8868c58260331d4c36b0ae595e8816398a7bbc5fb61e00ffb4a2cc48",
|
||||
"0x2ae7dff7f6e63d0324956b2b8dbb022b7069f392e0187abade6bc63b01e3ba6d",
|
||||
"0x81e0b05aad2ac79beb89eb38b6a7455e4b174dcaa9f07a7556875d036d64ac1f",
|
||||
"0xeae95f8e34186329c5f5c11b4ed62a53d665092b0d6559d7bc91dbaa99ad3ce4",
|
||||
"0x0bb11d748a73a1b82f42c2df52404e834b30ded843d28460109d5177dd75f3df",
|
||||
"0x55fb0af273730208f0a197862c7d0defc91b6c47b027b225d3093a723d2b93aa",
|
||||
"0xfd43b8ec425e95d361c8bafb6a239f4e3c9edc68631e96777264dcf7bb41710e",
|
||||
"0xc48523fea3c7ea518327a5bea849765f088904f8cd9f69f17c7783e1ba71a436",
|
||||
"0x0d1a511231e15d7a37f0695422cb652ae319ac71ee98885f4d2c3d14f14711b1",
|
||||
"0x4bf7cd135402f0055b423920ad92f71b4064ee0d7f2d151f82023c222d0f2286",
|
||||
"0x0a682e8df046079882a718790725049059fbfa35171d43ea830ba2cae37c0aa5",
|
||||
"0xa74a929c762e8dde59388c91955eb36b04fb56dbe4a1ee2b05e4fd841222cd29",
|
||||
"0xd69beba4005e2b8925b6d7fcf0caf3c1587361604152bcba34554ae6f669a1be",
|
||||
"0xcc56728e8f7dd56d4d6afdb8f694429dfeb4c552707c8354fdb7ea05fb2af24b",
|
||||
"0x055228dc295905b467901461b4409fa8de5d9ecb5ae74009642918db39a0c9a4",
|
||||
"0xa83391e094c0d0ce3eee5a8318e40f4035ff66a0c259e3e089781db320a362cc",
|
||||
"0x517bb360f6246c1317440f8dd5d06bdf09e90c552f65c86141be3a57c3aed460",
|
||||
"0x156c67e332c3285636136f7984b7a82baef2fd7362b577ca59697fe398be6686",
|
||||
"0x2b1b1689e6aff6e88d7baa7beb7c8f562c1136b5ec59048b257ee418c84458ba",
|
||||
"0x51377f007754fda7e2f03df2b57f8156189cd11e99dca9a2b8de264fb899a734",
|
||||
"0xf553d083b83095e0d28bf5c55e67a5199eb9fa906aca202798fe0b62385aae31",
|
||||
"0x2aac8ca12a5599a080bd3ca34aab0eb30c05fda6190a156572c6c2b47059eb0f",
|
||||
"0xe73e24d314e4c9b1022150ad3bf04a7f2bb388b7f098b761c7e7c5645e9e4887",
|
||||
"0x0fe5f32be6999f5c53c7510a4a4d9fc9fcd43ac628f76bae4e73deca7a0cc248",
|
||||
"0x13596367afd452947ec1d1e662dd84b4e609656b834c5b5a194947edea1b935d",
|
||||
"0xa0bf762235b0f768cea2dc903b53d677745871e3e4d4b2a3177e55e3804181ba",
|
||||
"0xd2f59472fbedf386d58b656919cbae55a8fcf2728ab267e6654c92f8521dd8eb",
|
||||
"0x80b5903b0ad0752832819d16623b6de6744930f16a4b65e6e247b106e81cc896",
|
||||
"0x2937d2a0713811cbaacc151165c173faf17bbd48a8b3eedda0abb5d9e4c29688",
|
||||
"0x624431382bc121c155c1f630739254ea7723af40f9fdd59f805cb86625aec467",
|
||||
"0x7f25d7f03a06052510c4b273462e0f1de88a75c3d7c9b8bfac7463f87bfef2db",
|
||||
"0xebab10354a48b7738601c8a126558601c27ac64c99a37e9d96a1e4e0b606b2df",
|
||||
"0x4fd2600be9f6b69c314ca499198e2ed139c54135d9953f559a597f7a90c33af7",
|
||||
"0xa6cbb5f10a4fb0fb56e0b5370deaf3e17a5d9923681aafdbe19e9ebf901f0e01",
|
||||
"0x02fea3f0c4decf9f4415c2cde324f0896c50796a816e967577b908e930bb172f",
|
||||
"0x66e1956e75b83de2108d3e5a8d420073dd7a7759052b18ba57bc9f73a690b59c",
|
||||
"0x7c1a01ec0178fa52df7bed4253a53036778d16c5f27e9bd8d551d8566a68b6ca",
|
||||
"0xdbff162f6abff748698b1132eb5f7083aa663eaa5f0b45d23728723a8fdd8d41",
|
||||
"0x235f6827df12dd625ee73194e2dffde908478f3ca7b94b39034c069fed46ed55",
|
||||
"0xcc9aaf19d6e950928e182fb6da4ade8956a9e0e5912df2cdbe40ab27f61784fc",
|
||||
"0x0631903a6b2342017d657ee0d46a2bc4fa596188dd07049a9a3e8a6c8702e82b",
|
||||
"0x507efceb9be0e626757b435102d7a457519b380539601dbb0c4b4eb1be0b254e",
|
||||
"0xefa62044848f585c643b41d1e7b953dbc0aaf48c2e1d1e55352dd0659c2fa862",
|
||||
"0x394d2d21b55f664af9883e632ab5188d8a590bc32eb512d6a0b37c56415ff62c",
|
||||
"0xc8866dc79571cb751178bcc23aead8fb58f1b0b4974924c3f5fad97e49e98902",
|
||||
"0xab28093bd80beedc7f0ba976a3f8f0949dd9215f7e4a58dd6d58e856a1fda7ed",
|
||||
"0x75b7831f7c721f32836fb9d318e0b59790ad1bcf410f6daaab45a6747f68efc8",
|
||||
"0x6663609e8afc148de17db41d13fa3243a7b1965c500b0e1c6b4a2ed0d25e6410",
|
||||
"0xed8e18d8c7e391293d67f47f097572f0eee4ed3025c52a9353ac525b7c011941",
|
||||
"0x9e878c269e15a6118fa6d817270e83fad9aafd8e02ccbfec77b7cb20854fcf2e",
|
||||
"0x50f28d14bf0f7d3a9ef50cde5da08bc05ae7a17f1ed49444f2e55613118549e5",
|
||||
"0x69cf59007a8e108c4919e9dd9a32476939d1458f4c48d3c3c3d578ac133e041b",
|
||||
"0x1a3a45397a43ff3dc1257821b11c70102c62e2b5944cba93f486926c733d6611",
|
||||
"0xca9d78286fd94a14d13f7a55ece595045055dee0bf0778b37085f9153f64e8c7",
|
||||
"0x5f0dc585dcfc2e42b5cfc321000b9f54524d0df126bd39ad88fb2ea6cdd79879",
|
||||
"0x0b4b39eddfb68dd783652937ae5683f4fa0977209074a6c7fd5eb8ba3c47aaa6",
|
||||
"0x522170deac3047ab3502a909e9046702786aa3a7afad28d8bab8db83e429d406",
|
||||
"0x9f8949e4215b2f88a16ed46645032f1464960c7ebac5f8613d45e68fdbaeb907",
|
||||
"0xce240564d0bb1197d8db238c4cfcc18c812ec19fce22eab81da86d299d85b54f",
|
||||
"0x0b7189fdab65e2904d39da44e502ed9e802e71c29e0c472ad5aad6c2ca5dd129",
|
||||
"0xb348ac6fa88bddd70ba78e248a9b68194a941334d2035728da5f435c7ea59be4",
|
||||
"0x685ffc580369e72f2a955aee3e534c8750ccaa6fca8a006edf70ded1539ebc43",
|
||||
"0xbd30fc47b0276111ce05f7324c581e9baf6be4f8f295973f765f046675f03b16",
|
||||
"0xca8e232211e0f9d9d770efb0d4579df6359a62951ed016bdbc347c7a16e2d744",
|
||||
"0x9e89d6a8bd44cba906026df9efe6035571c4e6ffa61fcbd8bd55c283e7ccd72f",
|
||||
"0xc825d5d5958a0d9b9c91f184c70ee423771d3eb7c82858f9f061c93062ed1fe5",
|
||||
"0x13aaae6dcf997bb119feaec6e63f1d820919d43a556fce6fba9c5256b4fa17b1",
|
||||
"0x8f84d37191c7fc2645235ee786edf175dd8762e421ccfe3e3d208ae506dad5cc",
|
||||
"0xb15292239abff342e81d8e864a104283a8f34ac314b7309da0e25eb3ca07ef86",
|
||||
"0xccb205c1bedd182e2d84efd9c904360725ba55948428a27df11c442896902db5",
|
||||
"0x6ef7ac2bf233dbddbb2f544aa4dbdffc4066871ec46ca0c2d52feffb22c10871",
|
||||
"0x928c6527362c77cee4d5f210a058f99db0e894f43824d9fa1390f0d12f86f83d",
|
||||
"0xe40012bc2be876404a7d2e46e522f5d591434affb2aae462bbe141654bbc0e68",
|
||||
"0x9d84b3d531754063af426594c8517516b506b7513e6522af6364a50537f1255d",
|
||||
"0x7e702e849342a7bcb54ace9630dd60e25699b5337491c2ab7db5aedbe0742258",
|
||||
"0x3dafb10928e7924f1f0fe45c5c22d19670cbb249f5367699e8e9534490db4525",
|
||||
"0xcf5ad9ac52ee98b5158ba11ac59e511641a40a63b5105cd40d2f0e76a03638a2",
|
||||
"0x726a5fc40b7e00ea477e7934afbbe4717717fb20a63f2a6a23e3d5e4a8c4289c",
|
||||
"0x700616f25343c8c6ef208845a494c89126905aeb24179ca86310fb94cfddc704",
|
||||
"0x4a0c9193314e365b26d9e0dd4b3948c01b3a04842a816a82a62e387915178160",
|
||||
"0x396a4301241851acb28a25ed41899c338e78543c3b4a24ce0945b9cffbc0b5f1",
|
||||
"0xdd1400c831bd337a24c8a272778482aa72521ca705588757cd15cf9e4edafba7",
|
||||
"0xf4e30c77914620e0b54eeacaf81d882b37e327667ecb2b00b8e446a9966131f6",
|
||||
"0x0122c2b14b1a38670dd7b419bae417c92e2738acda9260f9ed7e1c4c478dd489",
|
||||
"0x13f38613722e1ad77a5711bb5e87800f066519f33c186e395be3020b26f9951d",
|
||||
"0x697aa448e31fb9567fefe706f338a9aa263a676fdd12884197a51bdef1290491",
|
||||
"0x1dbfc227e7a118f1721b7918d7c397d5700de868d7f782314e3d15f314821027",
|
||||
"0x0a1bd08dda388fcbe87c4a1ad36b973d5eb1cd42316e462fff555d51033cefe9",
|
||||
"0x46468eb78fa82c6e85db8b4698f6dadcd70146b898198e8bbbeeb767a29515b8",
|
||||
"0xbca2b6e0d6adcc468eb950ef14f6d523a2f69161ee80cbff341ebb2a7ffa1850",
|
||||
"0x34f9bd97fe1279a33b011dc0dc0be14d4fec140d28cc923be9e0ca371df28561",
|
||||
"0xf6864309d0005da1f8fb9cd6c829953c2dac386cf278b2a17a1234c18b140e0f",
|
||||
"0x048eeaecee39626c3e4b74f795b4e114e5e28e2fb43055213f1646b2c368f9ee",
|
||||
"0x2afb516e8b70d32ff7a3570f79eedbb7938c3c466cc0fcdf26f365bd3dfc17e7",
|
||||
"0x5f909f050567e6e1e4eb72cfeb32ddfaf21f5a2b11af2a1c986d5c0ae1a0396c",
|
||||
"0xd4eda1725219b7cc353cba2e52a8a4ba04c7e788c872daea2151a0ded5c2b099",
|
||||
"0xdd87a715a242cfc1fafa12b377738417b8fec4fb666fca9e0c6b955dab4fe182",
|
||||
"0x23ada1e98cf5dd103e47256bcd1e2288b95cbf1bf3161c7ebd9fa7a22aebc93f",
|
||||
"0x783ae20e558493e86af4a10f4c2a263774a468c86a7ae08e2a47bf29a792def4",
|
||||
"0xadbfb8f049f2b2b55a8a5b1848e99d13208297576d3b293a03d18c95e6e43ecc",
|
||||
"0x0fee2df760f5abb966a150b120406fb8eb7b7c8970400e0a5446431523840d89",
|
||||
"0xe0283d1ff97fea7d9cbe32ba780718879c9d117e54a60ff5bfd9b14dd2cb45fd",
|
||||
"0xd50698f580a53978b1f7a7b8e89efe5ce0e094a6edbb1fae899b28a00c245d4b",
|
||||
"0x090fb313663f5f5ef50f39e9fa95161cfc2113d78df444039ba775a9c8b319c5",
|
||||
"0x01730bb508aa57e6662c7496c1f5505680ea5cfd7df0f2be19b53726e4d02b29",
|
||||
"0x9dfb58edffcdbf0198bce964f17a80bf49198c3188e6bfa7cadd65ad8eb5068d",
|
||||
"0xa2f367c11bf28bbe6eb9600ab5257e20c5723c4afb00f2c239639f14f08b9568",
|
||||
"0x959368c2bf88ebc8bdc79855a416f60599a48ad988e2138b4906d68c0b2de560",
|
||||
"0x7c592090a0f000b0a54a1f87cfef12e8852f768b45c3d215e98c515794ae38c9",
|
||||
"0xae331f6087d6d7fef950455b69719625d92ef2daa5a43ab87ec011768915c8b6",
|
||||
"0x0dbafa99763d66a876aec21f04c613e71d990dab0e0b63487d9077123279dfad",
|
||||
"0x6557a409e545827b8319bbe3bf8af198a861ac72214108f242379ddff0b641ae",
|
||||
"0x950b7d33192044a93ddda170eae89170c22ef2fcb69276e345501be10885b2a8",
|
||||
"0x6c448f1a1128f1fdde6413b715c840f08b3a7be02d235bfdebe4fa83689e3c98",
|
||||
"0x34f0f02fc19b11f7fc6ae3a6462d6ed7902f9fe91c392d5f0045561101f93985",
|
||||
"0x7c20f9f63293af90f5e74ca780602aee3161c0419dc5082014e267c7740f7d70",
|
||||
"0xe6a92ee909560901da6af970417e60c90badd5375a06716b31beaf18c0aa8636",
|
||||
"0xc8407118d397623d12d8f74bf56667c3e1dd5a6d7a0407d0173cc3deaa9b854b",
|
||||
"0x612345790bff5f5f6404a8351a7fc2a514fb465db99c8d5dd7d2490e210623a1",
|
||||
"0x4963d52b2c174748924030ce6693b125113550556d0145ad392ee98cadc9f528",
|
||||
"0xb3c7da54e50a98c3971cc4b72bf975749f2ab5e6a7864f811079d779e7f596f7",
|
||||
"0xbfa1711dab9f4b0e82b95a5900f60f3195ebf8f93d7d9d489ef9a05414602f41",
|
||||
"0xd028ce049e1e4a62c3ce1fc9417ec7c2608bacae61f5653da317ca8e83cf6766",
|
||||
"0xf21938c46a71ed8b5ac97ee6df2388e8c55fdc14d1c11ab525d18862ed6d1bfc",
|
||||
"0x9b2c3f3396eb2be6b09ad9a9bfd718d50dafd91d232228ea530d68a4a1803223",
|
||||
"0x672325d948f44f2365efc3e73509e673c2ae763a6292946a0d3a9e55c77cc58a",
|
||||
"0x72b163a99d809998e87e4679b05a2d13d891016d8cf613d3c3ca8464e06b4b0b",
|
||||
"0xbbf71a91472273686c2bd961e7ef163673b225c8571cf4d9cdbc25abbe7c986c",
|
||||
"0x1717c5acf94a4062426475a4da78fc64310edb44bb3a7777597bd38b8243fd36",
|
||||
"0xe701e3e57d8a162060e4926cd876645f30862a90bcbbf7a1bb25147bc849d7c3",
|
||||
"0xe1601b1713013c54045b17bd31bd1548c8e88cf5cbc0743745253ed71436abfa",
|
||||
"0x7e829e487cb8e902e8c573cd8f5f40309d45fad3e18ba95add98742f34021100",
|
||||
"0xde0bfcfc0a7ea3cb1dbfdc4b97e07ac8961e87b172199e2db96a9f3d50ee08a4",
|
||||
"0x8a8fa6ec0bfceb9c7936f7cc6710644f14d5d2f53bbe8e61ebdddcea892bfb9b",
|
||||
"0x937d50777fc97596beb74dd8cd0a873395307f73b25242a3644e4b5e417d63e9",
|
||||
"0xea42ad880f934c42c4fb31cc390db3839415691d34d07880454d72d8a008fd96",
|
||||
"0xdcbcd3e2040fcde9541cfd4e2099936ba9631b3bce98161fd00ac478cec29d4c",
|
||||
"0x4abaf5bf3dc2b12a41d8cee6179b699e72d546e5821537de6afc41b52a8a67e0",
|
||||
"0x6bae0e2c7e47added25aded1c015e70a7636ef08d69c39a18570d6adce5082d1",
|
||||
"0xa026ed1ec9d9784359996b33c185795ffca57be09c3b04d797e7869d5e7bac9e",
|
||||
"0x2f353f90306a8e1ec19d138fe6e05e0e9b2a9fc652cdb0d565bb606e1ef6e906",
|
||||
"0x7c2c5ac89c841106835551508d6fa058e92290714b7eba2f4900cefecaadc512",
|
||||
"0x93ab889dd0d40711ce5f7ea8203bf62a45ce4be2a6772e21b3d20031faec4f60",
|
||||
"0x6648314d63d8b97a43b08d46a8b938c16ab6c9e86726e0b7ea24ce7021bfae22",
|
||||
"0x61a01393ab59b76fcc93bfccd294395b5985f99416f13375a116d49dc0f4e45f",
|
||||
"0x48d4448ea0d7533e29be0b6dbdf08e35fcb1d77b5fa77af94f469e718cec597d",
|
||||
"0x5db14c2c548786dbde38a21c90d5e020f7ff9a42dba40e34596d1542aaa85ceb",
|
||||
"0xc6da00b09a6a9ded3ae155183ea064f8710665ba8f8c671fd023ee24853fe506",
|
||||
"0x8aa669698319ed0934720348ba6bbf7c2324ff7cd7da19d70f1cd7a84b320c36",
|
||||
"0x72cec022c3d512100894c62919cec6b0f023c592533ae789d88c8af41fbfab06",
|
||||
"0xf4797972fadd05a2f034c25dade1bf465b58d758f712438d1098fb12022cacbf",
|
||||
"0x2d6e56359df3c2420dfc12d7e519751e86499eb3ae3966e445dea2ce9697e63d",
|
||||
"0xa2f6740825a17d124f3ffddf79ef11c6153b7c39d58c0705181247ccaffff8c0",
|
||||
"0xdce5c6427da944caba1124e16970a67787a2c77ad378b9852b2e75f755269617",
|
||||
"0x3b382bfdd96d188d3499a7d7645d14884b1db9952a7296f639c730ae05355e5a",
|
||||
"0xa7076ced7a4e838fc71656336d61232e554521d17b8e6f9c019b0da54eac976f",
|
||||
"0xb7ed718af65740c96796770a34969d077e625d8a54bd88a42a87b4b8f16d16f6",
|
||||
"0x145f53c61eca53f76fa5f0bd9eaceddffee5057194795a42564a18f2d229a0eb",
|
||||
"0xc4070964ebfafb3eab3ec4289355c7512d740fa6a2c08d92cf93f3b1b0e3dfe3",
|
||||
"0x0246c044b729978bc210a52e10dccb89c22f9ed10909f60c137014cfadcdad5d",
|
||||
"0x2fa31b5ba8d194a5863201d381e14d2cfac7dda5ee5a033aa9a3944cff76dc92",
|
||||
"0x1df1768ab12b327e696cef043a3dcd8eb75d150bb4434d187acd2acecf89922e",
|
||||
"0x7dfc139ce57b5f4110a48ce4e1c95b8e9fce7b83d0fe99e92120bd60c30483b0",
|
||||
"0x63aa478875762360827069f1333ea6c5891829baf30d64d95072c28b14a7a355",
|
||||
"0x4b468dd25a8826014ac17fdf88c2e9572be84fbee3a5f3945aed657743df5bea",
|
||||
"0x4916fce234b380a5d68a2ead5434bd2b6916183da350cc03cc8b773c025b7a80",
|
||||
"0xf41f640714f3514ec18f28ad5cc4c0d3d234df287c31733cd817d628a0a1a0f1",
|
||||
"0xebfa72e615cae735bc4de3ecf6d22523ee9becd0f2276073504b634881e36de6",
|
||||
"0xb778e9cd4c25f960f4e841b112c6363ccbc0166e2a968ecb619a50465dd8c86d",
|
||||
"0xc3fe93c88b5e27b55fb238d94f8cb0bbf5bc93d4ee1ea1cf8450f4cc9ddf7aaf",
|
||||
"0x0e5a87855596eefbde0f62867bbe3e231b0d5c1feef517dd564212b7c2bd6511",
|
||||
"0x2151b7a79946aeee1d43f23e12cc9cbac2ae73cce199a30f23983110fd18adc9",
|
||||
"0xd73c780aa498e8f7d33e8ba6a6b1cf8613d5ef6f50328c92f5f396bf9078ce57",
|
||||
"0x6a914f2d36f781d4973f4145798730344d780c0bdf5a011f28c8ded1bf21e43b",
|
||||
"0x7542ad864b4c158b1d2124276b96a6006872cb746506ede1204a5c8ba8843e61",
|
||||
"0x4cd8e7d558198702e21c38160c7176e933e4bbfeae7bd0d1a127a0dbb9cbe9c4",
|
||||
"0x83270981afc5f053129845af5a1289d20e9a5c0c59a37239899844ac29e54283",
|
||||
"0xaf5049a81ee76ab1affca890e645b45301d083dd1c2b184df6a2ceefdc28aecc",
|
||||
"0x3b126bf9cec4d38408de16abe4ad3e37fda0cdfb9b529a3944cec0fb0002fe18",
|
||||
"0x207578a3c801ee996262bf48758c64dc5dee340acff86a019645b02d4e6c74c4",
|
||||
"0x3240b16dc703c5319a04391025c2d580eac2002516a58adfd21e5e91a03adae9",
|
||||
"0x591c064b39e7ce2796bdc6eecd9a59454271bfcef70c8c8b1e37ada5d6af7460",
|
||||
"0x0a19a277639a56259a82385ce2f76b5a34f82b766a45a70bf6fdc3faea97b6c5",
|
||||
"0x3b05954fb191e93a96df6bbdf9ac2c6dab554a395f12e8c8873cb3cd7330ad1d",
|
||||
"0xd193e6db7ce63d8a872370e1869ca3b2b0f8dfcb03f660409e9291570041c94d",
|
||||
"0x4104f38d0898f11eb83792364435cda9f9c796717c5f019fad67643d3a5f119e",
|
||||
"0xb339df1509cd46b01fcd1754bb9aa7896f092a4db4ee948ba142ab99cf7e82a3",
|
||||
"0x5ff4ceb016e91332e7756aa2ec5abb8c2946399833ffe9475ed36b1c2c0a644f",
|
||||
"0xf55565169495ebcd3a4e6189e9b57643ed454354947dcbf0e8f36037e021a9f7",
|
||||
"0x01ec24790062d9cadd54f7eef2fff08f2d4c6ae1ec40e4c14e2edc52d4df387c",
|
||||
"0x00e1b46ba26791d220ce9bd1466bf7535561e8a5332244777ee125aab8404fbb",
|
||||
"0x73cf9fc9d87af9c319cb8bfd4d62c7397c0aae246521d5bc65a617e1d42c9888",
|
||||
"0x903c53b767fefc344149aea5c6f012bf806e2295d26ee09c725e7a7844810279",
|
||||
"0xd3794732bb5866755f80f98f33a8582aeaa3a6fcd39e8020c3839f18faffc910",
|
||||
"0x0dc7d1b89601438f6c84b22d78702380e299b042273d1e521d5ab66f78e2ae77",
|
||||
"0x0e1d28ee0d6792943aba0cd4953e915f0baa572aaf8485295e8ec16d3792e16f",
|
||||
"0xa317217fc59650d60eb5a170164dac6d67124adffb9759088003cc3995807853",
|
||||
"0x69f85349fb348be33c270a9b381ab5d04e10c218dc901cf6a2d104873aed4b1f",
|
||||
"0x6fc876e038a0c7282d68e9e5684ced830cc86529e6d059ac502b9eb1fe2a4a74",
|
||||
"0x8c683feeed5f2f29cdf7009b6099da17d107a3b976db0971ecad4fe8ad9ca86c",
|
||||
"0xf6bbf20cfd59c3e48f01bb532aeb3bcd0ca45235069cbc09bd8ccc622b409508",
|
||||
"0x9c0cabf794a61af956116c0b7182d8ec78b540645d4e967714c72a3ba9ede87d",
|
||||
"0x3839c20cdd445ec0634a429bb66cef9bbe5f94f8ac0ee5ca9deb6bd78a1d7a9e",
|
||||
"0x90da2c6b23c61b53730e697cfe3db5978755fd58e69f9384fc69b8084d447cb9",
|
||||
"0xf1b40a8505c734fc4de4805756aafe46e0263e7118e1116334b224109006329c",
|
||||
"0x24c3366b0fa68d33b1a0f09d760759036763ef47fd2b7b13e83c4876d554e73c",
|
||||
"0xbfd80734e935a2af2415d201fb87d1dda5aea542ef4a76dcf26c91fc5bff9603",
|
||||
"0xa5bd0cd46eafdd399d8a227431745f5f08ad5d6fd8daa509cb1b10f50b5f0e1b",
|
||||
"0xb4a541b63a8da08fcc5d9a21535f652eee04fc70cfba7da908e7780fa92ca6c8",
|
||||
"0x26d6434430c7083b1d17415f4f11e53aaa79270dc22045e361329d0c9821f9bc",
|
||||
"0x0db69286e0bd5f808dc58766d5c301ea9e88e22a79ce026d43639a14162ff973",
|
||||
"0xc76640923490407a7983c15d05a28f5d3ccf21d908092b444a259e54ee5b3d07",
|
||||
"0xc0fccc6411f5153146f6c178cc0daea0686c4c471a3423203933ded03baa8be7",
|
||||
"0xa6b01ae91dfc4eec65d6913f8ab446288a103179c3a8cfccb125f2e3aed6397e",
|
||||
"0x4ac1e929f0ffa7b020a599d90c252dfcb63836017e31904e32757c4b1fa2984f",
|
||||
"0x654da4188d385f9eaa3ee0ae3190c7835a9d6b26a9970c092a4bb9c1f54b78cf",
|
||||
"0x4e29427161cb1c9c1f7dc21c0d6deada7d2144bcfe51dcc2e68d921271edc365",
|
||||
"0xa3ad2bb2868c5d5a733676398bcec315e546932932e419eadc1debd92b92bbcc",
|
||||
"0x536472049b93df449ce20ba4daffbb88f9120475f957ad0df6aaf17c3e1a04c0",
|
||||
"0x25ecb3940fcf854205f5f690886df3d1510cc452cfafe16bbf0e6aba639b321c",
|
||||
"0xda2749f7287f3f04255cdc9f35e8612f9e5c81255c19fc0520c828db9f74bdbb",
|
||||
"0x6dbb8ba3285e8773634794b08bc565b6b4aeb6f0b2a2a6473f323ba429e388a3",
|
||||
"0x6b88e565b448346004b50ce69d71b1213ecb014d40b1ca2b62c75e1e20b7865a",
|
||||
"0x024a327ff6987da3a1455cd913368fb5224c25c07468d899a9cd0aab51119bcd",
|
||||
"0xb6efb51174dd044a376d3deeb4f0875c9a79e366cea2c9416e31c2c2f1dfac46",
|
||||
"0x2e2b073b7a50d2da16698ca9bf07ea4f7ca34c9d1cbf29683f2dc4efbb41e7b1",
|
||||
"0x9ec5cffe0496577c7d006b980a21d7e2b1845338a4c1f9522b84ee1351078e2c",
|
||||
"0x94cf61152cd1f41b168a0dd2f92164e77f0eaa2280e74dfbc3a21236f1db67b3",
|
||||
"0x8353c97cf93c5a4db1ac6b0c8d6ea0baecb5031e728f72dc80dd51eae7a71481",
|
||||
"0xa826b527f726bf326b4bf5d140e4cf8b424797d768745107515f01f9dacc5c1f",
|
||||
"0x920c77649a8f3865717ac17498642164350f2a1db0ec7a373d099e446a42c3b2",
|
||||
"0xfd6eeb07733e1c74c801986432805266837a691009205af683828c3d5466ca53",
|
||||
"0xcc69a010251b5b609e898c948dadd3a393b8f5911dbc10ec074a3aa5edbb426e",
|
||||
"0xd60b9792e328706d35ab143240ef40c677f5569630abce688606d3a57740219c",
|
||||
"0xb6a1a98e987617fd9c51810d39fab31248f2e4e251883e4a71c1dfb3949182a4",
|
||||
"0x75a3e7c79f4bac4bd1efa81ed568d014b1a465b462ab26c6c26ee90e774d5164",
|
||||
"0x1808189230382b5150f05b6b5e7aa783685493f3e89cdf190e23f9604d8352e6",
|
||||
"0x344964d349c217124e6354e667d50309c3cbebd1bf81b9363378a60d8de8cb2c",
|
||||
"0x12d6b292ff08dc249bb9c38351740018d598382c2ae1f37bb7b625d46f34d033",
|
||||
"0x3115a238affc7cc50ad40536f87514c33a75935c72520d876fa9011651a819b0",
|
||||
"0xe9b3c0e687e2cc5f28eb2042decce3039e2d296bec673c98cd9e264aad9f73cb",
|
||||
"0x0330df9a1c2fe42d1ac3cd393527ffe01e59a808fcd7ffe2fbe918a82b4e1413",
|
||||
"0x254b23e5a6e403fa1df82d90633c051bd463049d521eca349e33f26a4e22443d",
|
||||
"0x56d62e6ff41a89670cd0b58f3f5842b7059027c1ed7f1f178052894dd5439b09",
|
||||
"0x83b738db92b8e789d5502de38a36d20d78e92d6975e07ad803a75a1c181cae70",
|
||||
"0x439beab00d517d81edd9e3cd2867df5fb9aa335e61b6203700426ad5b0dfc9a7",
|
||||
"0x28ade6a2d26a001c10eca939e58dea51d9d0c226642d268e230409b301ce0a58",
|
||||
"0xdbe038170721346bdba08663c6026c9e7573dc7d7ea2f56276ff95040d069f3e",
|
||||
"0x0500db2e22ad39bf7d4d62c07375ab2f46eb363d8b9b7dcbe4f4f912d9168655",
|
||||
"0x703dff8ff55e11f57ecd13101a677eacf1a4819b0424943a48da6a4a280a8a34",
|
||||
"0xc21c01581110d5199576c79b1c6d1a7ec1bb72739454a38b997da81f731e7695",
|
||||
"0x45cc74ac2b338a887bdc722c482122e39718251cf1319de75b9ba313272d8f5f",
|
||||
"0x06a4e54aa37b99b3335ab7702d6c4f026c8f27a578373fb5853489b6958b15e3",
|
||||
"0x12d1423a834f74fcf279e32a639ce5f4542001e421c9743eb905fed7bf0a6cee",
|
||||
"0x49f1dc5818186e91030e09eb615a0bb82393ce2874f9ee36ea968cd6906c7a71",
|
||||
"0xcfaff643a75fbf1eb89ae488902b3a9bc8e66b8c300e0e816f223675e5dc65a0",
|
||||
"0xd228d415cdd5a37109f621d62447fe5906ba7462acb65efda2e2a0965c2b984d",
|
||||
"0xb38ea82d658e85c65d3ce9a576bf6e6ffb0e1f1c7dd554d124cc8a2c568d1ce9",
|
||||
"0x4eda30e35f5d4c726201d22a0c4c05c6986b73cd5b19c4af2f2f591bd81c50ca",
|
||||
"0xde7d2cb40910c98cda5b17bf931b9e2761b5dbcf73de4ca6fd659cac415b7841",
|
||||
"0xf6320ef9228420d1808b74b63202f96eb291f99616f76f73248e45c3d8c36f10",
|
||||
"0xc6c03a85d998fe38f84ad79e16bc557722c04ffc3b79ac0e1d0940b2f59648c3",
|
||||
"0x07be4386488ccccba5f162630241b00e8fccebeaee946867e2808bc2a93b16b0",
|
||||
"0x2ab690bc438084f7791bcf9589c4fcd3d94a2189f40e98c9420071f2ea5e6573",
|
||||
"0xcf0fc0827c824378e2c2174d11dea0fe34a2a4bb874e88c50e40d0401514137e",
|
||||
"0x2ada7bdd5af69bba2b9977b811d6d611790ea9f6817701a1e4c0ff7b7b28cce1",
|
||||
"0x19f78ed4654d9ea98d26f1fc808890f8ea49232647891973df375b7b4be84dfc",
|
||||
"0x992f145952e6e56f6e0b6c573c052ad7b38f55ce6a07b72698be9ec4e325839f",
|
||||
"0x184eba0a8a2df9c1cb81d331347c62336f939cdb6b26ad8f50803b685dea5c0f",
|
||||
"0xb7f9f64ef6e4b458f6bbc6be094c8bf98a081ed16a9541f9e039a0427a949d26",
|
||||
"0x1512358c838d7c2c6afa1137868c7ad0fe3cc516e3201549f3cb3d4267ffc261",
|
||||
"0x8ebf459d29023a6664205d88b73b6fbe03f3a8d58f7190cc15a7c0b82849a166",
|
||||
"0xf2cceb56f58173ae33637c04f6ebae6bdac71d203b0fe430594d3d6be971dbd3",
|
||||
"0x559511da5e412b3695bd7c377f55e7883fa08f68dd5660600bc5e09338288b27",
|
||||
"0x50e872da55a1653cdbce2bc2bdc5ee19f8d1ffaacef6af75bd874fe47b77f371",
|
||||
"0x7a905e060419b238821bbc5513223483c106412663f64ff6a019cde75b420879",
|
||||
"0xa328246a599423850df4012528cb5d61d9be303cdea4202f946b0e3a2a12c0d0",
|
||||
"0x15e4b9a3a475cb5221e7b281c657335d04bb243266e9c75e46f03c0ff8ccffd4",
|
||||
"0x835fc9e31d0c5a15d7906006d5b291490d52b5c79c3d777b34d497ecd59ddf50",
|
||||
"0x886b5f07f5dede599b3c692660bd84300e74eea5f48834b4925b6f9dfed6aecb",
|
||||
"0xfc835d61674acea24b648647df71787400c51dfb5892a2a7043dab45adcc6730",
|
||||
"0x48cc0e8061b8fd5c812e139b75e0460ea142150dfeab913c82252a26993af6ed",
|
||||
"0x984eb490ac031f55596a4ecbf57580350e4b9f266c5eb94431f371aa4a14a78d",
|
||||
"0x3fc77c60434bf79a471724600e12efd8120f9f78e201e46657afa87f68f0a001",
|
||||
"0x6f234e54de04b9506a5b78233d110d6329e30ea223edcb8e902ef3cd089b2f85",
|
||||
"0x8068bbc755d0b10e40c8024660ac142d44aede73cd62137a63cbec48613c572e",
|
||||
"0xe7beec3a0554febce53e90c838e6aab0bf05fb02ab13b446187423e4d374b04e",
|
||||
"0xbe879a01ddebea34c5f6d401f271db75ac5852c5fa5e5d46beaebc6a7ed66d3e",
|
||||
"0x67038d88e0920d79dc4083ff02591bdb181ed9778fc3954aad25bb78bbe9123a",
|
||||
"0x19caa0b1ce83f546d93290a3f77b1ad9b08e7f1bd0245c9fc1a5347bc03c01a1",
|
||||
"0x9a37436c2a0b2e7e576e95fb6bb6d4c68d301ec1201e72734a0c9303af418474",
|
||||
"0x95ebbe7bb838501fca3f14ca7ef25dd5be7a1447cec71d62004597ac9c726e61",
|
||||
"0xa043556767bed1e2519b1cd4192e6c300de50970e0cb369731c02aa7ee17475f",
|
||||
"0x9b68ba75b041f0dd71145d11c681bda5861b31b43c4c9f8a0cec3fc65847b6db",
|
||||
"0xd0a4d11096863e0c74387154f58f4c9efb2b947ae4ce7a1da77b8a4ea9f0c869",
|
||||
"0x074a3e887f280751e7dcd1fd02f93dd12695b9e14a4fff45e2fcb3e26991c768",
|
||||
"0xa9ea4bde6a319b4f053c92a76d6e72928a089d680eacbb3670decb749f454659",
|
||||
"0xfed003b25b35e122adaf2a94fbb7069622846f86cdcb79a5e93e3f1f122cbc6a",
|
||||
"0xa158088c237ee341658d410ff6aa1f43b40a607248479330fce34731cc2de3bc",
|
||||
"0x62b4f39e1545eee1893e697e634e6e7441e8ceca5fc8be3ca8684cfd70b4b452",
|
||||
"0x39063f1e2c8fccd7fd2220a595f969d5148b3227a635aee5d3da7793341de814",
|
||||
"0x02b8c915c81e902a050090bbbcc51dd167f2ff075178bbc00966ee193da6344b",
|
||||
"0x4d45cab0e4554fb86eb6657df7969092eb77ecc8a099ae09ab0e2ae264db61c8",
|
||||
"0x484a0d21957e5d0d711bcf630102a793de1c16990d4f0be0f589176f6c9484a0",
|
||||
"0xb797d1fa33bf9bfff63eec0a30a51d02f1f88bd400d0a6f4ef5f8429997ea547",
|
||||
"0x5df2b265dc8181782e4e5dd25b32728605a77818598c5ec67fe99c6fc3e35ef7",
|
||||
"0x31aa525b9dfa0a57cb36206643c4822eb2c1061cc7d5f21e0cfbd72023f8e997",
|
||||
"0x9d7c6172f750e7d18593c71f36a3ffdea5f0e3565cba656a3b7e2ea361be18bf",
|
||||
"0xcfb4ac621266fcc2f5b33e249d8989efd98820964f68540ba8b7e3af27b83a9c",
|
||||
"0xd1c0c7aaca4e2b0dcfca44c2b570eeec6c4982512a50e6a82b65ebb6dbdc3195",
|
||||
"0xdc4095ea235c260e67c4896e465383605d55c5e8d96b27a512bc8c3f7b72ba1c",
|
||||
"0x11df12ba1b0bef7378ef88955aa026ec93e87c625a9c7d832eff44ff94aba7d9",
|
||||
"0x733dd930527eefa4454b56f5eb4c3aa29c4a0e99072e2c1684abfcc6ee09d833",
|
||||
"0xa7c38c7080fb588e42127de256b7b3d259055c2460900f76de9ef036d3a03377",
|
||||
"0x27b90a4f0708d7fcbd61dc42570d0d569acd86b7578d479f56d823f35ba4411e",
|
||||
"0x7cb065f986407568958ed7fb7ba18c2babfb49b02bc9b3b88756695411f31d7e",
|
||||
"0x648061097b5f547c60290afc290c91696367852d0dcb87be712382f09c9cd1b4",
|
||||
"0x194bd726e882fb0411c45d51da92d4d8949edf5ea53bb5e3282af1aa25217363",
|
||||
"0xa3b6c3ae24ca9cd0986610f3cf83d1250c8bad3110c645d9e98a8fd7a85d4867",
|
||||
"0xcf58b289f156df5285c791d7a8c8ea77104e34b254f600319cb9ef9582638997",
|
||||
"0xbbbbc3512166bcba6b870a314ec68698b6c36e594e40f73cdead3402ce69f2d4",
|
||||
"0xf9589e517f83fec8f31d79af06166719dd7870530697716ed1e603050bd3cd08",
|
||||
"0x63b199bc52c448f48ef989e5cf45b9a9fd3cb7d0b47686ba113dab00f08cd9ce",
|
||||
"0x366ec4dd2374d0328cc58f74dfe3993e33cf37881506dfd2de0462f01683659f",
|
||||
"0x24e19f5fee0f703516bc39c4a089bcda7d111e2ef39b138cb760f79272f0382e",
|
||||
"0x63aa5570647f5bb0aaa8190dd92e6581ac8fd7ca432a6cdca4caa9eb6ca6ebd4",
|
||||
"0xd04499317d5c640c71617dcd7c2bcb6e7ba84278162c3f7298e2b67a0d50a7cf",
|
||||
"0x36ea61183d313cae00bf55795ed9d798f18a17a543e9d6de49834cf128cbbdea",
|
||||
"0x5d6fdce8a398f24f97908926c0528b224f02823116fa2f302cb431ba06c9982b",
|
||||
"0x34f1e74f0bb9e36ce61aa75847d86e8d4cb9d4e555f0e8a4f93c5edced10a81c",
|
||||
"0xd40d82620603252580f5cf54f307170ca5868c086ddc345e633325d4f78f07b9",
|
||||
"0x79b3c34bfd5040d39878501ef0dbec691ed3d806e4adb7782220feb1756a6acd",
|
||||
"0xc43e045abff53b8077a017596e531359672b0531d129cb6a0302df2a0c59800b",
|
||||
"0xa5fd2680ea8bec22cbf5da6bf0f8f7f42c569a4a9068de15399c0b386f2afcd0",
|
||||
"0xe3c0ce648f3c4d5a76d96b30287933ae5a79b60bf5bddfba0f102bb06b9f2b86",
|
||||
"0x5c068aad633ce416a4340c5425d133616c0eaf30dc042d919786dfafea4e2968",
|
||||
"0x2a12b2aa6359401efd17d464209547657d01992bcdad081a386628a60ec3924b",
|
||||
"0x10de6b8c58816feb6145f9b39544a09d67f04ca758ed0b9a80e540ce8b117ead",
|
||||
"0xf5632116f787967f9954600c783ca2ae46be42d36cc693b5d847a5dd830b6c06",
|
||||
"0x1798366afb7398e8cce0b35d38c59d8ea2eb3cfb1a85b165fe4ba68a15088c3e",
|
||||
"0x8cd71c3c3cc517d8997115eefbc7907ab871321f8167e7da90d42c33114d557d",
|
||||
"0x3795c6454c0d734e71a33e8d4596a2e9af0a4d3aca1bb0452f2da4676f4cafae",
|
||||
"0x853f13cd1e6c0cad486db441bbc58a266f35bcc367f7ecd9ded5a4b2ce96ede1",
|
||||
"0x2a557d24d969f703c9fbb2b8efd4c89f43930e21c5a5a7b03c1e53d94b73696f",
|
||||
"0x66ae745b56d3d5f9082953d9b267e2a98dcab60a302bc783d79aad6c3816ee67",
|
||||
"0x904f9d8510005aaa955a2654de93af4ec0271205472740a7b86a90375fed50eb",
|
||||
"0x74e9fef60a9911345324a22e50fc74982b24849f3c4973a5d55b73092dc46ea4",
|
||||
"0x1571a667030f301ab8657d70607c3dfb02599cac15b44e9e48a4935d68d74028"
|
||||
]
|
||||
},
|
||||
"accounts": {
|
||||
"0x0000000000000000000000000000000000000001": {
|
||||
"balance": "1",
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1572,53 +1572,6 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
||||
self.validators.signals_epoch_end(first, header, aux)
|
||||
}
|
||||
|
||||
fn is_epoch_end_light(
|
||||
&self,
|
||||
chain_head: &Header,
|
||||
chain: &super::Headers<Header>,
|
||||
transition_store: &super::PendingTransitionStore,
|
||||
) -> Option<Vec<u8>> {
|
||||
// epochs only matter if we want to support light clients.
|
||||
if self.immediate_transitions {
|
||||
return None;
|
||||
}
|
||||
|
||||
let epoch_transition_hash = {
|
||||
let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) {
|
||||
Some(client) => client,
|
||||
None => {
|
||||
warn!(target: "engine", "Unable to check for epoch end: missing client ref.");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
let mut epoch_manager = self.epoch_manager.lock();
|
||||
if !epoch_manager.zoom_to(&*client, &self.machine, &*self.validators, chain_head) {
|
||||
return None;
|
||||
}
|
||||
|
||||
epoch_manager.epoch_transition_hash
|
||||
};
|
||||
|
||||
let mut hash = *chain_head.parent_hash();
|
||||
|
||||
let mut ancestry = itertools::repeat_call(move || {
|
||||
chain(hash).and_then(|header| {
|
||||
if header.number() == 0 {
|
||||
return None;
|
||||
}
|
||||
hash = *header.parent_hash();
|
||||
Some(header)
|
||||
})
|
||||
})
|
||||
.while_some()
|
||||
.take_while(|header| header.hash() != epoch_transition_hash);
|
||||
|
||||
let finalized = self.build_finality(chain_head, &mut ancestry);
|
||||
|
||||
self.is_epoch_end(chain_head, &finalized, chain, transition_store)
|
||||
}
|
||||
|
||||
fn is_epoch_end(
|
||||
&self,
|
||||
chain_head: &Header,
|
||||
|
@ -168,15 +168,6 @@ impl Engine<EthereumMachine> for BasicAuthority {
|
||||
self.validators.is_epoch_end(first, chain_head)
|
||||
}
|
||||
|
||||
fn is_epoch_end_light(
|
||||
&self,
|
||||
chain_head: &Header,
|
||||
chain: &super::Headers<Header>,
|
||||
transition_store: &super::PendingTransitionStore,
|
||||
) -> Option<Vec<u8>> {
|
||||
self.is_epoch_end(chain_head, &[], chain, transition_store)
|
||||
}
|
||||
|
||||
fn epoch_verifier<'a>(
|
||||
&self,
|
||||
header: &Header,
|
||||
|
@ -432,24 +432,6 @@ pub trait Engine<M: Machine>: Sync + Send {
|
||||
None
|
||||
}
|
||||
|
||||
/// Whether a block is the end of an epoch.
|
||||
///
|
||||
/// This either means that an immediate transition occurs or a block signalling transition
|
||||
/// has reached finality. The `Headers` given are not guaranteed to return any blocks
|
||||
/// from any epoch other than the current. This is a specialized method to use for light
|
||||
/// clients since the light client doesn't track finality of all blocks, and therefore finality
|
||||
/// for blocks in the current epoch is built inside this method by the engine.
|
||||
///
|
||||
/// Return optional transition proof.
|
||||
fn is_epoch_end_light(
|
||||
&self,
|
||||
_chain_head: &Header,
|
||||
_chain: &Headers<Header>,
|
||||
_transition_store: &PendingTransitionStore,
|
||||
) -> Option<Vec<u8>> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Create an epoch verifier from validation proof and a flag indicating
|
||||
/// whether finality is required.
|
||||
fn epoch_verifier<'a>(&self, _header: &Header, _proof: &'a [u8]) -> ConstructedVerifier<'a, M> {
|
||||
|
@ -22,5 +22,5 @@ mod spec;
|
||||
|
||||
pub use self::{
|
||||
genesis::Genesis,
|
||||
spec::{CommonParams, OptimizeFor, Spec, SpecHardcodedSync, SpecParams},
|
||||
spec::{CommonParams, OptimizeFor, Spec, SpecParams},
|
||||
};
|
||||
|
@ -24,8 +24,8 @@ use ethjson;
|
||||
use hash::{keccak, KECCAK_NULL_RLP};
|
||||
use parking_lot::RwLock;
|
||||
use rlp::{Rlp, RlpStream};
|
||||
use rustc_hex::{FromHex, ToHex};
|
||||
use types::{encoded, header::Header, BlockNumber};
|
||||
use rustc_hex::FromHex;
|
||||
use types::{header::Header, BlockNumber};
|
||||
use vm::{ActionParams, ActionValue, CallType, EnvInfo, ParamsType};
|
||||
|
||||
use builtin::Builtin;
|
||||
@ -436,9 +436,6 @@ pub struct Spec {
|
||||
/// Each seal field, expressed as RLP, concatenated.
|
||||
pub seal_rlp: Bytes,
|
||||
|
||||
/// Hardcoded synchronization. Allows the light client to immediately jump to a specific block.
|
||||
pub hardcoded_sync: Option<SpecHardcodedSync>,
|
||||
|
||||
/// Contract constructors to be executed on genesis.
|
||||
constructors: Vec<(Address, Bytes)>,
|
||||
|
||||
@ -467,7 +464,6 @@ impl Clone for Spec {
|
||||
timestamp: self.timestamp.clone(),
|
||||
extra_data: self.extra_data.clone(),
|
||||
seal_rlp: self.seal_rlp.clone(),
|
||||
hardcoded_sync: self.hardcoded_sync.clone(),
|
||||
constructors: self.constructors.clone(),
|
||||
state_root_memo: RwLock::new(*self.state_root_memo.read()),
|
||||
genesis_state: self.genesis_state.clone(),
|
||||
@ -475,45 +471,6 @@ impl Clone for Spec {
|
||||
}
|
||||
}
|
||||
|
||||
/// Part of `Spec`. Describes the hardcoded synchronization parameters.
|
||||
pub struct SpecHardcodedSync {
|
||||
/// Header of the block to jump to for hardcoded sync, and total difficulty.
|
||||
pub header: encoded::Header,
|
||||
/// Total difficulty of the block to jump to.
|
||||
pub total_difficulty: U256,
|
||||
/// List of hardcoded CHTs, in order. If `hardcoded_sync` is set, the CHTs should include the
|
||||
/// header of `hardcoded_sync`.
|
||||
pub chts: Vec<H256>,
|
||||
}
|
||||
|
||||
impl SpecHardcodedSync {
|
||||
/// Turns this specifications back into JSON. Useful for pretty printing.
|
||||
pub fn to_json(self) -> ethjson::spec::HardcodedSync {
|
||||
self.into()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl Clone for SpecHardcodedSync {
|
||||
fn clone(&self) -> SpecHardcodedSync {
|
||||
SpecHardcodedSync {
|
||||
header: self.header.clone(),
|
||||
total_difficulty: self.total_difficulty.clone(),
|
||||
chts: self.chts.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SpecHardcodedSync> for ethjson::spec::HardcodedSync {
|
||||
fn from(sync: SpecHardcodedSync) -> ethjson::spec::HardcodedSync {
|
||||
ethjson::spec::HardcodedSync {
|
||||
header: sync.header.into_inner().to_hex(),
|
||||
total_difficulty: ethjson::uint::Uint(sync.total_difficulty),
|
||||
chts: sync.chts.into_iter().map(Into::into).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn load_machine_from(s: ethjson::spec::Spec) -> EthereumMachine {
|
||||
let builtins = s
|
||||
.accounts
|
||||
@ -551,24 +508,6 @@ fn load_from(spec_params: SpecParams, s: ethjson::spec::Spec) -> Result<Spec, Er
|
||||
let GenericSeal(seal_rlp) = g.seal.into();
|
||||
let params = CommonParams::from(s.params);
|
||||
|
||||
let hardcoded_sync = if let Some(ref hs) = s.hardcoded_sync {
|
||||
if let Ok(header) = hs.header.from_hex() {
|
||||
Some(SpecHardcodedSync {
|
||||
header: encoded::Header::new(header),
|
||||
total_difficulty: hs.total_difficulty.into(),
|
||||
chts: s
|
||||
.hardcoded_sync
|
||||
.as_ref()
|
||||
.map(|s| s.chts.iter().map(|c| c.clone().into()).collect())
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut s = Spec {
|
||||
name: s.name.clone().into(),
|
||||
engine: Spec::engine(spec_params, s.engine, params, builtins),
|
||||
@ -584,7 +523,6 @@ fn load_from(spec_params: SpecParams, s: ethjson::spec::Spec) -> Result<Spec, Er
|
||||
timestamp: g.timestamp,
|
||||
extra_data: g.extra_data,
|
||||
seal_rlp: seal_rlp,
|
||||
hardcoded_sync: hardcoded_sync,
|
||||
constructors: s
|
||||
.accounts
|
||||
.constructors()
|
||||
|
@ -12,7 +12,6 @@ common-types = { path = "../types" }
|
||||
enum_primitive = "0.1.1"
|
||||
ethcore = { path = ".." }
|
||||
ethcore-io = { path = "../../util/io" }
|
||||
ethcore-light = { path = "../light" }
|
||||
ethcore-network = { path = "../../util/network" }
|
||||
ethcore-network-devp2p = { path = "../../util/network-devp2p" }
|
||||
ethereum-types = "0.4"
|
||||
|
@ -41,14 +41,6 @@ use ethcore::{
|
||||
use ethereum_types::{H256, H512, U256};
|
||||
use ethkey::Secret;
|
||||
use io::TimerToken;
|
||||
use light::{
|
||||
client::AsLightClient,
|
||||
net::{
|
||||
self as light_net, Capabilities, EventContext, Handler as LightHandler, LightProtocol,
|
||||
Params as LightParams, SampleStore,
|
||||
},
|
||||
Provider,
|
||||
};
|
||||
use network::IpFilter;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use private_tx::PrivateTxHandler;
|
||||
@ -57,16 +49,12 @@ use std::{
|
||||
str::FromStr,
|
||||
};
|
||||
use sync_io::NetSyncIo;
|
||||
use types::{pruning_info::PruningInfo, transaction::UnverifiedTransaction, BlockNumber};
|
||||
|
||||
use super::light_sync::SyncInfo;
|
||||
use types::{transaction::UnverifiedTransaction, BlockNumber};
|
||||
|
||||
/// Parity sync protocol
|
||||
pub const PAR_PROTOCOL: ProtocolId = *b"par";
|
||||
/// Ethereum sync protocol
|
||||
pub const ETH_PROTOCOL: ProtocolId = *b"eth";
|
||||
/// Ethereum light protocol
|
||||
pub const LIGHT_PROTOCOL: ProtocolId = *b"pip";
|
||||
|
||||
/// Determine warp sync status.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
@ -114,14 +102,10 @@ pub struct SyncConfig {
|
||||
pub network_id: u64,
|
||||
/// Main "eth" subprotocol name.
|
||||
pub subprotocol_name: [u8; 3],
|
||||
/// Light subprotocol name.
|
||||
pub light_subprotocol_name: [u8; 3],
|
||||
/// Fork block to check
|
||||
pub fork_block: Option<(BlockNumber, H256)>,
|
||||
/// Enable snapshot sync
|
||||
pub warp_sync: WarpSync,
|
||||
/// Enable light client server.
|
||||
pub serve_light: bool,
|
||||
}
|
||||
|
||||
impl Default for SyncConfig {
|
||||
@ -131,10 +115,8 @@ impl Default for SyncConfig {
|
||||
download_old_blocks: true,
|
||||
network_id: 1,
|
||||
subprotocol_name: ETH_PROTOCOL,
|
||||
light_subprotocol_name: LIGHT_PROTOCOL,
|
||||
fork_block: None,
|
||||
warp_sync: WarpSync::Disabled,
|
||||
serve_light: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -178,8 +160,6 @@ pub struct PeerInfo {
|
||||
pub local_address: String,
|
||||
/// Eth protocol info.
|
||||
pub eth_info: Option<EthProtocolInfo>,
|
||||
/// Light protocol info.
|
||||
pub pip_info: Option<PipProtocolInfo>,
|
||||
}
|
||||
|
||||
/// Ethereum protocol info.
|
||||
@ -193,27 +173,6 @@ pub struct EthProtocolInfo {
|
||||
pub difficulty: Option<U256>,
|
||||
}
|
||||
|
||||
/// PIP protocol info.
|
||||
#[derive(Debug)]
|
||||
pub struct PipProtocolInfo {
|
||||
/// Protocol version
|
||||
pub version: u32,
|
||||
/// SHA3 of peer best block hash
|
||||
pub head: H256,
|
||||
/// Peer total difficulty if known
|
||||
pub difficulty: U256,
|
||||
}
|
||||
|
||||
impl From<light_net::Status> for PipProtocolInfo {
|
||||
fn from(status: light_net::Status) -> Self {
|
||||
PipProtocolInfo {
|
||||
version: status.protocol_version,
|
||||
head: status.head_hash,
|
||||
difficulty: status.head_td,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A prioritized tasks run in a specialised timer.
|
||||
/// Every task should be completed within a hard deadline,
|
||||
/// if it's not it's either cancelled or split into multiple tasks.
|
||||
@ -257,8 +216,6 @@ pub struct Params {
|
||||
pub snapshot_service: Arc<dyn SnapshotService>,
|
||||
/// Private tx service.
|
||||
pub private_tx_handler: Option<Arc<dyn PrivateTxHandler>>,
|
||||
/// Light data provider.
|
||||
pub provider: Arc<dyn crate::light::Provider>,
|
||||
/// Network layer configuration.
|
||||
pub network_config: NetworkConfiguration,
|
||||
}
|
||||
@ -269,76 +226,18 @@ pub struct EthSync {
|
||||
network: NetworkService,
|
||||
/// Main (eth/par) protocol handler
|
||||
eth_handler: Arc<SyncProtocolHandler>,
|
||||
/// Light (pip) protocol handler
|
||||
light_proto: Option<Arc<LightProtocol>>,
|
||||
/// The main subprotocol name
|
||||
subprotocol_name: [u8; 3],
|
||||
/// Light subprotocol name.
|
||||
light_subprotocol_name: [u8; 3],
|
||||
/// Priority tasks notification channel
|
||||
priority_tasks: Mutex<mpsc::Sender<PriorityTask>>,
|
||||
}
|
||||
|
||||
fn light_params(
|
||||
network_id: u64,
|
||||
median_peers: f64,
|
||||
pruning_info: PruningInfo,
|
||||
sample_store: Option<Box<dyn SampleStore>>,
|
||||
) -> LightParams {
|
||||
let mut light_params = LightParams {
|
||||
network_id: network_id,
|
||||
config: Default::default(),
|
||||
capabilities: Capabilities {
|
||||
serve_headers: true,
|
||||
serve_chain_since: Some(pruning_info.earliest_chain),
|
||||
serve_state_since: Some(pruning_info.earliest_state),
|
||||
tx_relay: true,
|
||||
},
|
||||
sample_store: sample_store,
|
||||
};
|
||||
|
||||
light_params.config.median_peers = median_peers;
|
||||
light_params
|
||||
}
|
||||
|
||||
impl EthSync {
|
||||
/// Creates and register protocol with the network service
|
||||
pub fn new(
|
||||
params: Params,
|
||||
connection_filter: Option<Arc<dyn ConnectionFilter>>,
|
||||
) -> Result<Arc<EthSync>, Error> {
|
||||
let pruning_info = params.chain.pruning_info();
|
||||
let light_proto = match params.config.serve_light {
|
||||
false => None,
|
||||
true => Some({
|
||||
let sample_store = params
|
||||
.network_config
|
||||
.net_config_path
|
||||
.clone()
|
||||
.map(::std::path::PathBuf::from)
|
||||
.map(|mut p| {
|
||||
p.push("request_timings");
|
||||
light_net::FileStore(p)
|
||||
})
|
||||
.map(|store| Box::new(store) as Box<_>);
|
||||
|
||||
let median_peers = (params.network_config.min_peers
|
||||
+ params.network_config.max_peers) as f64
|
||||
/ 2.0;
|
||||
let light_params = light_params(
|
||||
params.config.network_id,
|
||||
median_peers,
|
||||
pruning_info,
|
||||
sample_store,
|
||||
);
|
||||
|
||||
let mut light_proto = LightProtocol::new(params.provider, light_params);
|
||||
light_proto.add_handler(Arc::new(TxRelay(params.chain.clone())));
|
||||
|
||||
Arc::new(light_proto)
|
||||
}),
|
||||
};
|
||||
|
||||
let (priority_tasks_tx, priority_tasks_rx) = mpsc::channel();
|
||||
let sync = ChainSyncApi::new(
|
||||
params.config,
|
||||
@ -359,9 +258,7 @@ impl EthSync {
|
||||
snapshot_service: params.snapshot_service,
|
||||
overlay: RwLock::new(HashMap::new()),
|
||||
}),
|
||||
light_proto: light_proto,
|
||||
subprotocol_name: params.config.subprotocol_name,
|
||||
light_subprotocol_name: params.config.light_subprotocol_name,
|
||||
priority_tasks: Mutex::new(priority_tasks_tx),
|
||||
});
|
||||
|
||||
@ -385,7 +282,6 @@ impl SyncProvider for EthSync {
|
||||
self.network
|
||||
.with_context_eval(self.subprotocol_name, |ctx| {
|
||||
let peer_ids = self.network.connected_peers();
|
||||
let light_proto = self.light_proto.as_ref();
|
||||
|
||||
let peer_info = self.eth_handler.sync.peer_info(&peer_ids);
|
||||
peer_ids
|
||||
@ -408,10 +304,6 @@ impl SyncProvider for EthSync {
|
||||
remote_address: session_info.remote_address,
|
||||
local_address: session_info.local_address,
|
||||
eth_info: peer_info,
|
||||
pip_info: light_proto
|
||||
.as_ref()
|
||||
.and_then(|lp| lp.peer_status(peer_id))
|
||||
.map(Into::into),
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
@ -527,8 +419,6 @@ impl ChainNotify for EthSync {
|
||||
if new_blocks.has_more_blocks_to_import {
|
||||
return;
|
||||
}
|
||||
use light::net::Announcement;
|
||||
|
||||
self.network.with_context(self.subprotocol_name, |context| {
|
||||
let mut sync_io = NetSyncIo::new(
|
||||
context,
|
||||
@ -546,29 +436,6 @@ impl ChainNotify for EthSync {
|
||||
&new_blocks.proposed,
|
||||
);
|
||||
});
|
||||
|
||||
self.network
|
||||
.with_context(self.light_subprotocol_name, |context| {
|
||||
let light_proto = match self.light_proto.as_ref() {
|
||||
Some(lp) => lp,
|
||||
None => return,
|
||||
};
|
||||
|
||||
let chain_info = self.eth_handler.chain.chain_info();
|
||||
light_proto.make_announcement(
|
||||
&context,
|
||||
Announcement {
|
||||
head_hash: chain_info.best_block_hash,
|
||||
head_num: chain_info.best_block_number,
|
||||
head_td: chain_info.total_difficulty,
|
||||
reorg_depth: 0, // recalculated on a per-peer basis.
|
||||
serve_headers: false, // these fields consist of _changes_ in capability.
|
||||
serve_state_since: None,
|
||||
serve_chain_since: None,
|
||||
tx_relay: false,
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn start(&self) {
|
||||
@ -603,17 +470,6 @@ impl ChainNotify for EthSync {
|
||||
],
|
||||
)
|
||||
.unwrap_or_else(|e| warn!("Error registering snapshot sync protocol: {:?}", e));
|
||||
|
||||
// register the light protocol.
|
||||
if let Some(light_proto) = self.light_proto.as_ref().map(|x| x.clone()) {
|
||||
self.network
|
||||
.register_protocol(
|
||||
light_proto,
|
||||
self.light_subprotocol_name,
|
||||
::light::net::PROTOCOL_VERSIONS,
|
||||
)
|
||||
.unwrap_or_else(|e| warn!("Error registering light client protocol: {:?}", e));
|
||||
}
|
||||
}
|
||||
|
||||
fn stop(&self) {
|
||||
@ -661,24 +517,6 @@ impl ChainNotify for EthSync {
|
||||
}
|
||||
}
|
||||
|
||||
/// PIP event handler.
|
||||
/// Simply queues transactions from light client peers.
|
||||
struct TxRelay(Arc<dyn BlockChainClient>);
|
||||
|
||||
impl LightHandler for TxRelay {
|
||||
fn on_transactions(
|
||||
&self,
|
||||
ctx: &dyn EventContext,
|
||||
relay: &[::types::transaction::UnverifiedTransaction],
|
||||
) {
|
||||
trace!(target: "pip", "Relaying {} transactions from peer {}", relay.len(), ctx.peer());
|
||||
self.0.queue_transactions(
|
||||
relay.iter().map(|tx| ::rlp::encode(tx)).collect(),
|
||||
ctx.peer(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for managing network
|
||||
pub trait ManageNetwork: Send + Sync {
|
||||
/// Set to allow unreserved peers to connect
|
||||
@ -737,10 +575,6 @@ impl ManageNetwork for EthSync {
|
||||
self.eth_handler.sync.write().abort(&mut sync_io);
|
||||
});
|
||||
|
||||
if let Some(light_proto) = self.light_proto.as_ref() {
|
||||
light_proto.abort();
|
||||
}
|
||||
|
||||
self.stop();
|
||||
}
|
||||
|
||||
@ -891,262 +725,3 @@ pub struct PeerNumbers {
|
||||
/// Min peers.
|
||||
pub min: usize,
|
||||
}
|
||||
|
||||
/// Light synchronization.
|
||||
pub trait LightSyncProvider {
|
||||
/// Get peer numbers.
|
||||
fn peer_numbers(&self) -> PeerNumbers;
|
||||
|
||||
/// Get peers information
|
||||
fn peers(&self) -> Vec<PeerInfo>;
|
||||
|
||||
/// Get network id.
|
||||
fn network_id(&self) -> u64;
|
||||
|
||||
/// Get the enode if available.
|
||||
fn enode(&self) -> Option<String>;
|
||||
|
||||
/// Returns propagation count for pending transactions.
|
||||
fn transactions_stats(&self) -> BTreeMap<H256, TransactionStats>;
|
||||
}
|
||||
|
||||
/// Wrapper around `light_sync::SyncInfo` to expose those methods without the concrete type `LightSync`
|
||||
pub trait LightSyncInfo: Send + Sync {
|
||||
/// Get the highest block advertised on the network.
|
||||
fn highest_block(&self) -> Option<u64>;
|
||||
|
||||
/// Get the block number at the time of sync start.
|
||||
fn start_block(&self) -> u64;
|
||||
|
||||
/// Whether major sync is underway.
|
||||
fn is_major_importing(&self) -> bool;
|
||||
}
|
||||
|
||||
/// Execute a closure with a protocol context.
|
||||
pub trait LightNetworkDispatcher {
|
||||
/// Execute a closure with a protocol context.
|
||||
fn with_context<F, T>(&self, f: F) -> Option<T>
|
||||
where
|
||||
F: FnOnce(&dyn crate::light::net::BasicContext) -> T;
|
||||
}
|
||||
|
||||
/// Configuration for the light sync.
|
||||
pub struct LightSyncParams<L> {
|
||||
/// Network configuration.
|
||||
pub network_config: BasicNetworkConfiguration,
|
||||
/// Light client to sync to.
|
||||
pub client: Arc<L>,
|
||||
/// Network ID.
|
||||
pub network_id: u64,
|
||||
/// Subprotocol name.
|
||||
pub subprotocol_name: [u8; 3],
|
||||
/// Other handlers to attach.
|
||||
pub handlers: Vec<Arc<dyn LightHandler>>,
|
||||
}
|
||||
|
||||
/// Service for light synchronization.
|
||||
pub struct LightSync {
|
||||
proto: Arc<LightProtocol>,
|
||||
sync: Arc<dyn SyncInfo + Sync + Send>,
|
||||
network: NetworkService,
|
||||
subprotocol_name: [u8; 3],
|
||||
network_id: u64,
|
||||
}
|
||||
|
||||
impl LightSync {
|
||||
/// Create a new light sync service.
|
||||
pub fn new<L>(params: LightSyncParams<L>) -> Result<Self, Error>
|
||||
where
|
||||
L: AsLightClient + Provider + Sync + Send + 'static,
|
||||
{
|
||||
use light_sync::LightSync as SyncHandler;
|
||||
|
||||
// initialize light protocol handler and attach sync module.
|
||||
let (sync, light_proto) = {
|
||||
let light_params = LightParams {
|
||||
network_id: params.network_id,
|
||||
config: Default::default(),
|
||||
capabilities: Capabilities {
|
||||
serve_headers: false,
|
||||
serve_chain_since: None,
|
||||
serve_state_since: None,
|
||||
tx_relay: false,
|
||||
},
|
||||
sample_store: None,
|
||||
};
|
||||
|
||||
let mut light_proto = LightProtocol::new(params.client.clone(), light_params);
|
||||
let sync_handler = Arc::new(SyncHandler::new(params.client.clone())?);
|
||||
light_proto.add_handler(sync_handler.clone());
|
||||
|
||||
for handler in params.handlers {
|
||||
light_proto.add_handler(handler);
|
||||
}
|
||||
|
||||
(sync_handler, Arc::new(light_proto))
|
||||
};
|
||||
|
||||
let service = NetworkService::new(params.network_config, None)?;
|
||||
|
||||
Ok(LightSync {
|
||||
proto: light_proto,
|
||||
sync: sync,
|
||||
network: service,
|
||||
subprotocol_name: params.subprotocol_name,
|
||||
network_id: params.network_id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::ops::Deref for LightSync {
|
||||
type Target = dyn crate::light_sync::SyncInfo;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&*self.sync
|
||||
}
|
||||
}
|
||||
|
||||
impl LightNetworkDispatcher for LightSync {
|
||||
fn with_context<F, T>(&self, f: F) -> Option<T>
|
||||
where
|
||||
F: FnOnce(&dyn crate::light::net::BasicContext) -> T,
|
||||
{
|
||||
self.network
|
||||
.with_context_eval(self.subprotocol_name, move |ctx| {
|
||||
self.proto.with_context(&ctx, f)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ManageNetwork for LightSync {
|
||||
fn accept_unreserved_peers(&self) {
|
||||
self.network
|
||||
.set_non_reserved_mode(NonReservedPeerMode::Accept);
|
||||
}
|
||||
|
||||
fn deny_unreserved_peers(&self) {
|
||||
self.network
|
||||
.set_non_reserved_mode(NonReservedPeerMode::Deny);
|
||||
}
|
||||
|
||||
fn remove_reserved_peer(&self, peer: String) -> Result<(), String> {
|
||||
self.network
|
||||
.remove_reserved_peer(&peer)
|
||||
.map_err(|e| format!("{:?}", e))
|
||||
}
|
||||
|
||||
fn add_reserved_peer(&self, peer: String) -> Result<(), String> {
|
||||
self.network
|
||||
.add_reserved_peer(&peer)
|
||||
.map_err(|e| format!("{:?}", e))
|
||||
}
|
||||
|
||||
fn start_network(&self) {
|
||||
match self.network.start() {
|
||||
Err((err, listen_address)) => {
|
||||
match err.into() {
|
||||
ErrorKind::Io(ref e) if e.kind() == io::ErrorKind::AddrInUse => {
|
||||
warn!("Network port {:?} is already in use, make sure that another instance of an Ethereum client is not running or change the port using the --port option.", listen_address.expect("Listen address is not set."))
|
||||
},
|
||||
err => warn!("Error starting network: {}", err),
|
||||
}
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
|
||||
let light_proto = self.proto.clone();
|
||||
|
||||
self.network
|
||||
.register_protocol(
|
||||
light_proto,
|
||||
self.subprotocol_name,
|
||||
::light::net::PROTOCOL_VERSIONS,
|
||||
)
|
||||
.unwrap_or_else(|e| warn!("Error registering light client protocol: {:?}", e));
|
||||
}
|
||||
|
||||
fn stop_network(&self) {
|
||||
self.proto.abort();
|
||||
self.network.stop();
|
||||
}
|
||||
|
||||
fn num_peers_range(&self) -> RangeInclusive<u32> {
|
||||
self.network.num_peers_range()
|
||||
}
|
||||
|
||||
fn with_proto_context(&self, proto: ProtocolId, f: &mut dyn FnMut(&dyn NetworkContext)) {
|
||||
self.network.with_context_eval(proto, f);
|
||||
}
|
||||
}
|
||||
|
||||
impl LightSyncProvider for LightSync {
|
||||
fn peer_numbers(&self) -> PeerNumbers {
|
||||
let (connected, active) = self.proto.peer_count();
|
||||
let peers_range = self.num_peers_range();
|
||||
debug_assert!(peers_range.end() >= peers_range.start());
|
||||
PeerNumbers {
|
||||
connected: connected,
|
||||
active: active,
|
||||
max: *peers_range.end() as usize,
|
||||
min: *peers_range.start() as usize,
|
||||
}
|
||||
}
|
||||
|
||||
fn peers(&self) -> Vec<PeerInfo> {
|
||||
self.network
|
||||
.with_context_eval(self.subprotocol_name, |ctx| {
|
||||
let peer_ids = self.network.connected_peers();
|
||||
|
||||
peer_ids
|
||||
.into_iter()
|
||||
.filter_map(|peer_id| {
|
||||
let session_info = match ctx.session_info(peer_id) {
|
||||
None => return None,
|
||||
Some(info) => info,
|
||||
};
|
||||
|
||||
Some(PeerInfo {
|
||||
id: session_info.id.map(|id| format!("{:x}", id)),
|
||||
client_version: session_info.client_version,
|
||||
capabilities: session_info
|
||||
.peer_capabilities
|
||||
.into_iter()
|
||||
.map(|c| c.to_string())
|
||||
.collect(),
|
||||
remote_address: session_info.remote_address,
|
||||
local_address: session_info.local_address,
|
||||
eth_info: None,
|
||||
pip_info: self.proto.peer_status(peer_id).map(Into::into),
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_else(Vec::new)
|
||||
}
|
||||
|
||||
fn enode(&self) -> Option<String> {
|
||||
self.network.external_url()
|
||||
}
|
||||
|
||||
fn network_id(&self) -> u64 {
|
||||
self.network_id
|
||||
}
|
||||
|
||||
fn transactions_stats(&self) -> BTreeMap<H256, TransactionStats> {
|
||||
Default::default() // TODO
|
||||
}
|
||||
}
|
||||
|
||||
impl LightSyncInfo for LightSync {
|
||||
fn highest_block(&self) -> Option<u64> {
|
||||
(*self.sync).highest_block()
|
||||
}
|
||||
|
||||
fn start_block(&self) -> u64 {
|
||||
(*self.sync).start_block()
|
||||
}
|
||||
|
||||
fn is_major_importing(&self) -> bool {
|
||||
(*self.sync).is_major_importing()
|
||||
}
|
||||
}
|
||||
|
@ -37,8 +37,6 @@ extern crate rand;
|
||||
extern crate rlp;
|
||||
extern crate triehash_ethereum;
|
||||
|
||||
extern crate ethcore_light as light;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate env_logger;
|
||||
#[cfg(test)]
|
||||
@ -67,8 +65,6 @@ mod snapshot;
|
||||
mod sync_io;
|
||||
mod transactions_stats;
|
||||
|
||||
pub mod light_sync;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
|
@ -1,788 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Light client synchronization.
|
||||
//!
|
||||
//! This will synchronize the header chain using PIP messages.
|
||||
//! Dataflow is largely one-directional as headers are pushed into
|
||||
//! the light client queue for import. Where possible, they are batched
|
||||
//! in groups.
|
||||
//!
|
||||
//! This is written assuming that the client and sync service are running
|
||||
//! in the same binary; unlike a full node which might communicate via IPC.
|
||||
//!
|
||||
//!
|
||||
//! Sync strategy:
|
||||
//! - Find a common ancestor with peers.
|
||||
//! - Split the chain up into subchains, which are downloaded in parallel from various peers in rounds.
|
||||
//! - When within a certain distance of the head of the chain, aggressively download all
|
||||
//! announced blocks.
|
||||
//! - On bad block/response, punish peer and reset.
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
mem,
|
||||
ops::Deref,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use ethereum_types::{H256, U256};
|
||||
use light::{
|
||||
client::{AsLightClient, LightChainClient},
|
||||
net::{
|
||||
Announcement, BasicContext, Capabilities, Error as NetError, EventContext, Handler,
|
||||
PeerStatus, ReqId, Status,
|
||||
},
|
||||
request::{self, CompleteHeadersRequest as HeadersRequest},
|
||||
};
|
||||
use network::PeerId;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use rand::{OsRng, Rng};
|
||||
use types::encoded;
|
||||
|
||||
use self::sync_round::{AbortReason, ResponseContext, SyncRound};
|
||||
|
||||
mod response;
|
||||
mod sync_round;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
// Base value for the header request timeout.
|
||||
const REQ_TIMEOUT_BASE: Duration = Duration::from_secs(7);
|
||||
// Additional value for each requested header.
|
||||
// If we request N headers, then the timeout will be:
|
||||
// REQ_TIMEOUT_BASE + N * REQ_TIMEOUT_PER_HEADER
|
||||
const REQ_TIMEOUT_PER_HEADER: Duration = Duration::from_millis(10);
|
||||
|
||||
/// Peer chain info.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
struct ChainInfo {
|
||||
head_td: U256,
|
||||
head_hash: H256,
|
||||
head_num: u64,
|
||||
}
|
||||
|
||||
impl PartialOrd for ChainInfo {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> {
|
||||
self.head_td.partial_cmp(&other.head_td)
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for ChainInfo {
|
||||
fn cmp(&self, other: &Self) -> ::std::cmp::Ordering {
|
||||
self.head_td.cmp(&other.head_td)
|
||||
}
|
||||
}
|
||||
|
||||
struct Peer {
|
||||
status: ChainInfo,
|
||||
}
|
||||
|
||||
impl Peer {
|
||||
// Create a new peer.
|
||||
fn new(chain_info: ChainInfo) -> Self {
|
||||
Peer { status: chain_info }
|
||||
}
|
||||
}
|
||||
|
||||
// search for a common ancestor with the best chain.
|
||||
#[derive(Debug)]
|
||||
enum AncestorSearch {
|
||||
Queued(u64), // queued to search for blocks starting from here.
|
||||
Awaiting(ReqId, u64, HeadersRequest), // awaiting response for this request.
|
||||
Prehistoric, // prehistoric block found. TODO: start to roll back CHTs.
|
||||
FoundCommon(u64, H256), // common block found.
|
||||
Genesis, // common ancestor is the genesis.
|
||||
}
|
||||
|
||||
impl AncestorSearch {
|
||||
fn begin(best_num: u64) -> Self {
|
||||
match best_num {
|
||||
0 => AncestorSearch::Genesis,
|
||||
_ => AncestorSearch::Queued(best_num),
|
||||
}
|
||||
}
|
||||
|
||||
fn process_response<L>(self, ctx: &dyn ResponseContext, client: &L) -> AncestorSearch
|
||||
where
|
||||
L: AsLightClient,
|
||||
{
|
||||
let client = client.as_light_client();
|
||||
let first_num = client.chain_info().first_block_number.unwrap_or(0);
|
||||
match self {
|
||||
AncestorSearch::Awaiting(id, start, req) => {
|
||||
if &id == ctx.req_id() {
|
||||
match response::verify(ctx.data(), &req) {
|
||||
Ok(headers) => {
|
||||
for header in &headers {
|
||||
if client.is_known(&header.hash()) {
|
||||
debug!(target: "sync", "Found common ancestor with best chain");
|
||||
return AncestorSearch::FoundCommon(
|
||||
header.number(),
|
||||
header.hash(),
|
||||
);
|
||||
}
|
||||
|
||||
if header.number() < first_num {
|
||||
debug!(target: "sync", "Prehistoric common ancestor with best chain.");
|
||||
return AncestorSearch::Prehistoric;
|
||||
}
|
||||
}
|
||||
|
||||
let probe = start - headers.len() as u64;
|
||||
if probe == 0 {
|
||||
AncestorSearch::Genesis
|
||||
} else {
|
||||
AncestorSearch::Queued(probe)
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
trace!(target: "sync", "Bad headers response from {}: {}", ctx.responder(), e);
|
||||
|
||||
ctx.punish_responder();
|
||||
AncestorSearch::Queued(start)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
AncestorSearch::Awaiting(id, start, req)
|
||||
}
|
||||
}
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
|
||||
fn requests_abandoned(self, req_ids: &[ReqId]) -> AncestorSearch {
|
||||
match self {
|
||||
AncestorSearch::Awaiting(id, start, req) => {
|
||||
if req_ids.iter().find(|&x| x == &id).is_some() {
|
||||
AncestorSearch::Queued(start)
|
||||
} else {
|
||||
AncestorSearch::Awaiting(id, start, req)
|
||||
}
|
||||
}
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
|
||||
fn dispatch_request<F>(self, mut dispatcher: F) -> AncestorSearch
|
||||
where
|
||||
F: FnMut(HeadersRequest) -> Option<ReqId>,
|
||||
{
|
||||
const BATCH_SIZE: u64 = 64;
|
||||
|
||||
match self {
|
||||
AncestorSearch::Queued(start) => {
|
||||
let batch_size = ::std::cmp::min(start, BATCH_SIZE);
|
||||
trace!(target: "sync", "Requesting {} reverse headers from {} to find common ancestor",
|
||||
batch_size, start);
|
||||
|
||||
let req = HeadersRequest {
|
||||
start: start.into(),
|
||||
max: batch_size,
|
||||
skip: 0,
|
||||
reverse: true,
|
||||
};
|
||||
|
||||
match dispatcher(req.clone()) {
|
||||
Some(req_id) => AncestorSearch::Awaiting(req_id, start, req),
|
||||
None => AncestorSearch::Queued(start),
|
||||
}
|
||||
}
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// synchronization state machine.
|
||||
#[derive(Debug)]
|
||||
enum SyncState {
|
||||
// Idle (waiting for peers) or at chain head.
|
||||
Idle,
|
||||
// searching for common ancestor with best chain.
|
||||
// queue should be cleared at this phase.
|
||||
AncestorSearch(AncestorSearch),
|
||||
// Doing sync rounds.
|
||||
Rounds(SyncRound),
|
||||
}
|
||||
|
||||
/// A wrapper around the SyncState that makes sure to
|
||||
/// update the giving reference to `is_idle`
|
||||
#[derive(Debug)]
|
||||
struct SyncStateWrapper {
|
||||
state: SyncState,
|
||||
}
|
||||
|
||||
impl SyncStateWrapper {
|
||||
/// Create a new wrapper for SyncState::Idle
|
||||
pub fn idle() -> Self {
|
||||
SyncStateWrapper {
|
||||
state: SyncState::Idle,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the new state's value, making sure `is_idle` gets updated
|
||||
pub fn set(&mut self, state: SyncState, is_idle_handle: &mut bool) {
|
||||
*is_idle_handle = match state {
|
||||
SyncState::Idle => true,
|
||||
_ => false,
|
||||
};
|
||||
self.state = state;
|
||||
}
|
||||
|
||||
/// Returns the internal state's value
|
||||
pub fn into_inner(self) -> SyncState {
|
||||
self.state
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for SyncStateWrapper {
|
||||
type Target = SyncState;
|
||||
|
||||
fn deref(&self) -> &SyncState {
|
||||
&self.state
|
||||
}
|
||||
}
|
||||
|
||||
struct ResponseCtx<'a> {
|
||||
peer: PeerId,
|
||||
req_id: ReqId,
|
||||
ctx: &'a dyn BasicContext,
|
||||
data: &'a [encoded::Header],
|
||||
}
|
||||
|
||||
impl<'a> ResponseContext for ResponseCtx<'a> {
|
||||
fn responder(&self) -> PeerId {
|
||||
self.peer
|
||||
}
|
||||
fn req_id(&self) -> &ReqId {
|
||||
&self.req_id
|
||||
}
|
||||
fn data(&self) -> &[encoded::Header] {
|
||||
self.data
|
||||
}
|
||||
fn punish_responder(&self) {
|
||||
self.ctx.disable_peer(self.peer)
|
||||
}
|
||||
}
|
||||
|
||||
/// Light client synchronization manager. See module docs for more details.
|
||||
pub struct LightSync<L: AsLightClient> {
|
||||
start_block_number: u64,
|
||||
best_seen: Mutex<Option<ChainInfo>>, // best seen block on the network.
|
||||
peers: RwLock<HashMap<PeerId, Mutex<Peer>>>, // peers which are relevant to synchronization.
|
||||
pending_reqs: Mutex<HashMap<ReqId, PendingReq>>, // requests from this handler
|
||||
client: Arc<L>,
|
||||
rng: Mutex<OsRng>,
|
||||
state: Mutex<SyncStateWrapper>,
|
||||
// We duplicate this state tracking to avoid deadlocks in `is_major_importing`.
|
||||
is_idle: Mutex<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct PendingReq {
|
||||
started: Instant,
|
||||
timeout: Duration,
|
||||
}
|
||||
|
||||
impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> {
|
||||
fn on_connect(
|
||||
&self,
|
||||
ctx: &dyn EventContext,
|
||||
status: &Status,
|
||||
capabilities: &Capabilities,
|
||||
) -> PeerStatus {
|
||||
use std::cmp;
|
||||
|
||||
if capabilities.serve_headers {
|
||||
let chain_info = ChainInfo {
|
||||
head_td: status.head_td,
|
||||
head_hash: status.head_hash,
|
||||
head_num: status.head_num,
|
||||
};
|
||||
|
||||
{
|
||||
let mut best = self.best_seen.lock();
|
||||
*best = cmp::max(best.clone(), Some(chain_info.clone()));
|
||||
}
|
||||
|
||||
self.peers
|
||||
.write()
|
||||
.insert(ctx.peer(), Mutex::new(Peer::new(chain_info)));
|
||||
self.maintain_sync(ctx.as_basic());
|
||||
|
||||
PeerStatus::Kept
|
||||
} else {
|
||||
PeerStatus::Unkept
|
||||
}
|
||||
}
|
||||
|
||||
fn on_disconnect(&self, ctx: &dyn EventContext, unfulfilled: &[ReqId]) {
|
||||
let peer_id = ctx.peer();
|
||||
|
||||
let peer = match self.peers.write().remove(&peer_id).map(|p| p.into_inner()) {
|
||||
Some(peer) => peer,
|
||||
None => return,
|
||||
};
|
||||
|
||||
trace!(target: "sync", "peer {} disconnecting", peer_id);
|
||||
|
||||
let new_best = {
|
||||
let mut best = self.best_seen.lock();
|
||||
|
||||
if best.as_ref().map_or(false, |b| b == &peer.status) {
|
||||
// search for next-best block.
|
||||
let next_best: Option<ChainInfo> = self
|
||||
.peers
|
||||
.read()
|
||||
.values()
|
||||
.map(|p| p.lock().status.clone())
|
||||
.map(Some)
|
||||
.fold(None, ::std::cmp::max);
|
||||
|
||||
*best = next_best;
|
||||
}
|
||||
|
||||
best.clone()
|
||||
};
|
||||
|
||||
{
|
||||
let mut pending_reqs = self.pending_reqs.lock();
|
||||
for unfulfilled in unfulfilled {
|
||||
pending_reqs.remove(&unfulfilled);
|
||||
}
|
||||
}
|
||||
|
||||
if new_best.is_none() {
|
||||
debug!(target: "sync", "No peers remain. Reverting to idle");
|
||||
self.set_state(&mut self.state.lock(), SyncState::Idle);
|
||||
} else {
|
||||
let mut state = self.state.lock();
|
||||
|
||||
let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner()
|
||||
{
|
||||
SyncState::Idle => SyncState::Idle,
|
||||
SyncState::AncestorSearch(search) => {
|
||||
SyncState::AncestorSearch(search.requests_abandoned(unfulfilled))
|
||||
}
|
||||
SyncState::Rounds(round) => {
|
||||
SyncState::Rounds(round.requests_abandoned(unfulfilled))
|
||||
}
|
||||
};
|
||||
self.set_state(&mut state, next_state);
|
||||
}
|
||||
|
||||
self.maintain_sync(ctx.as_basic());
|
||||
}
|
||||
|
||||
fn on_announcement(&self, ctx: &dyn EventContext, announcement: &Announcement) {
|
||||
let (last_td, chain_info) = {
|
||||
let peers = self.peers.read();
|
||||
match peers.get(&ctx.peer()) {
|
||||
None => return,
|
||||
Some(peer) => {
|
||||
let mut peer = peer.lock();
|
||||
let last_td = peer.status.head_td;
|
||||
peer.status = ChainInfo {
|
||||
head_td: announcement.head_td,
|
||||
head_hash: announcement.head_hash,
|
||||
head_num: announcement.head_num,
|
||||
};
|
||||
(last_td, peer.status.clone())
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
trace!(target: "sync", "Announcement from peer {}: new chain head {:?}, reorg depth {}",
|
||||
ctx.peer(), (announcement.head_hash, announcement.head_num), announcement.reorg_depth);
|
||||
|
||||
if last_td > announcement.head_td {
|
||||
trace!(target: "sync", "Peer {} moved backwards.", ctx.peer());
|
||||
self.peers.write().remove(&ctx.peer());
|
||||
ctx.disconnect_peer(ctx.peer());
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
let mut best = self.best_seen.lock();
|
||||
*best = ::std::cmp::max(best.clone(), Some(chain_info));
|
||||
}
|
||||
|
||||
self.maintain_sync(ctx.as_basic());
|
||||
}
|
||||
|
||||
fn on_responses(&self, ctx: &dyn EventContext, req_id: ReqId, responses: &[request::Response]) {
|
||||
let peer = ctx.peer();
|
||||
if !self.peers.read().contains_key(&peer) {
|
||||
return;
|
||||
}
|
||||
|
||||
if self.pending_reqs.lock().remove(&req_id).is_none() {
|
||||
return;
|
||||
}
|
||||
|
||||
let headers = match responses.get(0) {
|
||||
Some(&request::Response::Headers(ref response)) => &response.headers[..],
|
||||
Some(_) => {
|
||||
trace!("Disabling peer {} for wrong response type.", peer);
|
||||
ctx.disable_peer(peer);
|
||||
&[]
|
||||
}
|
||||
None => &[],
|
||||
};
|
||||
|
||||
{
|
||||
let mut state = self.state.lock();
|
||||
|
||||
let ctx = ResponseCtx {
|
||||
peer: ctx.peer(),
|
||||
req_id: req_id,
|
||||
ctx: ctx.as_basic(),
|
||||
data: headers,
|
||||
};
|
||||
|
||||
let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner()
|
||||
{
|
||||
SyncState::Idle => SyncState::Idle,
|
||||
SyncState::AncestorSearch(search) => {
|
||||
SyncState::AncestorSearch(search.process_response(&ctx, &*self.client))
|
||||
}
|
||||
SyncState::Rounds(round) => SyncState::Rounds(round.process_response(&ctx)),
|
||||
};
|
||||
self.set_state(&mut state, next_state);
|
||||
}
|
||||
|
||||
self.maintain_sync(ctx.as_basic());
|
||||
}
|
||||
|
||||
fn tick(&self, ctx: &dyn BasicContext) {
|
||||
self.maintain_sync(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
// private helpers
|
||||
impl<L: AsLightClient> LightSync<L> {
|
||||
/// Sets the LightSync's state, and update
|
||||
/// `is_idle`
|
||||
fn set_state(&self, state: &mut SyncStateWrapper, next_state: SyncState) {
|
||||
state.set(next_state, &mut self.is_idle.lock());
|
||||
}
|
||||
|
||||
// Begins a search for the common ancestor and our best block.
|
||||
// does not lock state, instead has a mutable reference to it passed.
|
||||
fn begin_search(&self, state: &mut SyncStateWrapper) {
|
||||
if let None = *self.best_seen.lock() {
|
||||
// no peers.
|
||||
self.set_state(state, SyncState::Idle);
|
||||
return;
|
||||
}
|
||||
|
||||
self.client.as_light_client().flush_queue();
|
||||
let chain_info = self.client.as_light_client().chain_info();
|
||||
|
||||
trace!(target: "sync", "Beginning search for common ancestor from {:?}",
|
||||
(chain_info.best_block_number, chain_info.best_block_hash));
|
||||
let next_state =
|
||||
SyncState::AncestorSearch(AncestorSearch::begin(chain_info.best_block_number));
|
||||
self.set_state(state, next_state);
|
||||
}
|
||||
|
||||
// handles request dispatch, block import, state machine transitions, and timeouts.
|
||||
fn maintain_sync(&self, ctx: &dyn BasicContext) {
|
||||
use ethcore::error::{
|
||||
Error as EthcoreError, ErrorKind as EthcoreErrorKind, ImportErrorKind,
|
||||
};
|
||||
|
||||
const DRAIN_AMOUNT: usize = 128;
|
||||
|
||||
let client = self.client.as_light_client();
|
||||
let chain_info = client.chain_info();
|
||||
|
||||
let mut state = self.state.lock();
|
||||
debug!(target: "sync", "Maintaining sync ({:?})", **state);
|
||||
|
||||
// drain any pending blocks into the queue.
|
||||
{
|
||||
let mut sink = Vec::with_capacity(DRAIN_AMOUNT);
|
||||
|
||||
'a: loop {
|
||||
if client.queue_info().is_full() {
|
||||
break;
|
||||
}
|
||||
|
||||
let next_state =
|
||||
match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() {
|
||||
SyncState::Rounds(round) => {
|
||||
SyncState::Rounds(round.drain(&mut sink, Some(DRAIN_AMOUNT)))
|
||||
}
|
||||
other => other,
|
||||
};
|
||||
self.set_state(&mut state, next_state);
|
||||
|
||||
if sink.is_empty() {
|
||||
break;
|
||||
}
|
||||
trace!(target: "sync", "Drained {} headers to import", sink.len());
|
||||
|
||||
for header in sink.drain(..) {
|
||||
match client.queue_header(header) {
|
||||
Ok(_) => {}
|
||||
Err(EthcoreError(
|
||||
EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain),
|
||||
_,
|
||||
)) => {
|
||||
trace!(target: "sync", "Block already in chain. Continuing.");
|
||||
}
|
||||
Err(EthcoreError(
|
||||
EthcoreErrorKind::Import(ImportErrorKind::AlreadyQueued),
|
||||
_,
|
||||
)) => {
|
||||
trace!(target: "sync", "Block already queued. Continuing.");
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(target: "sync", "Found bad header ({:?}). Reset to search state.", e);
|
||||
|
||||
self.begin_search(&mut state);
|
||||
break 'a;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle state transitions.
|
||||
{
|
||||
let best_td = chain_info.pending_total_difficulty;
|
||||
let sync_target = match *self.best_seen.lock() {
|
||||
Some(ref target) if target.head_td > best_td => (target.head_num, target.head_hash),
|
||||
ref other => {
|
||||
let network_score = other.as_ref().map(|target| target.head_td);
|
||||
trace!(target: "sync", "No target to sync to. Network score: {:?}, Local score: {:?}",
|
||||
network_score, best_td);
|
||||
self.set_state(&mut state, SyncState::Idle);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() {
|
||||
SyncState::Rounds(SyncRound::Abort(reason, remaining)) => {
|
||||
if remaining.len() > 0 {
|
||||
self.set_state(
|
||||
&mut state,
|
||||
SyncState::Rounds(SyncRound::Abort(reason, remaining)),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
match reason {
|
||||
AbortReason::BadScaffold(bad_peers) => {
|
||||
debug!(target: "sync", "Disabling peers responsible for bad scaffold");
|
||||
for peer in bad_peers {
|
||||
ctx.disable_peer(peer);
|
||||
}
|
||||
}
|
||||
AbortReason::NoResponses => {}
|
||||
AbortReason::TargetReached => {
|
||||
debug!(target: "sync", "Sync target reached. Going idle");
|
||||
self.set_state(&mut state, SyncState::Idle);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
debug!(target: "sync", "Beginning search after aborted sync round");
|
||||
self.begin_search(&mut state);
|
||||
}
|
||||
SyncState::AncestorSearch(AncestorSearch::FoundCommon(num, hash)) => {
|
||||
self.set_state(
|
||||
&mut state,
|
||||
SyncState::Rounds(SyncRound::begin((num, hash), sync_target)),
|
||||
);
|
||||
}
|
||||
SyncState::AncestorSearch(AncestorSearch::Genesis) => {
|
||||
// Same here.
|
||||
let g_hash = chain_info.genesis_hash;
|
||||
self.set_state(
|
||||
&mut state,
|
||||
SyncState::Rounds(SyncRound::begin((0, g_hash), sync_target)),
|
||||
);
|
||||
}
|
||||
SyncState::Idle => self.begin_search(&mut state),
|
||||
other => self.set_state(&mut state, other), // restore displaced state.
|
||||
}
|
||||
}
|
||||
|
||||
// handle requests timeouts
|
||||
{
|
||||
let mut pending_reqs = self.pending_reqs.lock();
|
||||
let mut unfulfilled = Vec::new();
|
||||
for (req_id, info) in pending_reqs.iter() {
|
||||
if info.started.elapsed() >= info.timeout {
|
||||
debug!(target: "sync", "{} timed out", req_id);
|
||||
unfulfilled.push(req_id.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if !unfulfilled.is_empty() {
|
||||
for unfulfilled in unfulfilled.iter() {
|
||||
pending_reqs.remove(unfulfilled);
|
||||
}
|
||||
drop(pending_reqs);
|
||||
|
||||
let next_state =
|
||||
match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() {
|
||||
SyncState::Idle => SyncState::Idle,
|
||||
SyncState::AncestorSearch(search) => {
|
||||
SyncState::AncestorSearch(search.requests_abandoned(&unfulfilled))
|
||||
}
|
||||
SyncState::Rounds(round) => {
|
||||
SyncState::Rounds(round.requests_abandoned(&unfulfilled))
|
||||
}
|
||||
};
|
||||
self.set_state(&mut state, next_state);
|
||||
}
|
||||
}
|
||||
|
||||
// allow dispatching of requests.
|
||||
{
|
||||
let peers = self.peers.read();
|
||||
let mut peer_ids: Vec<_> = peers
|
||||
.iter()
|
||||
.filter_map(|(id, p)| {
|
||||
if p.lock().status.head_td > chain_info.pending_total_difficulty {
|
||||
Some(*id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut rng = self.rng.lock();
|
||||
let mut requested_from = HashSet::new();
|
||||
|
||||
// naive request dispatcher: just give to any peer which says it will
|
||||
// give us responses. but only one request per peer per state transition.
|
||||
let dispatcher = move |req: HeadersRequest| {
|
||||
rng.shuffle(&mut peer_ids);
|
||||
|
||||
let request = {
|
||||
let mut builder = request::Builder::default();
|
||||
builder.push(request::Request::Headers(request::IncompleteHeadersRequest {
|
||||
start: req.start.into(),
|
||||
skip: req.skip,
|
||||
max: req.max,
|
||||
reverse: req.reverse,
|
||||
})).expect("request provided fully complete with no unresolved back-references; qed");
|
||||
builder.build()
|
||||
};
|
||||
for peer in &peer_ids {
|
||||
if requested_from.contains(peer) {
|
||||
continue;
|
||||
}
|
||||
match ctx.request_from(*peer, request.clone()) {
|
||||
Ok(id) => {
|
||||
assert!(
|
||||
req.max <= u32::max_value() as u64,
|
||||
"requesting more than 2^32 headers at a time would overflow"
|
||||
);
|
||||
let timeout =
|
||||
REQ_TIMEOUT_BASE + REQ_TIMEOUT_PER_HEADER * req.max as u32;
|
||||
self.pending_reqs.lock().insert(
|
||||
id.clone(),
|
||||
PendingReq {
|
||||
started: Instant::now(),
|
||||
timeout,
|
||||
},
|
||||
);
|
||||
requested_from.insert(peer.clone());
|
||||
|
||||
return Some(id);
|
||||
}
|
||||
Err(NetError::NoCredits) => {}
|
||||
Err(e) => {
|
||||
trace!(target: "sync", "Error requesting headers from viable peer: {}", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
};
|
||||
|
||||
let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner()
|
||||
{
|
||||
SyncState::Rounds(round) => SyncState::Rounds(round.dispatch_requests(dispatcher)),
|
||||
SyncState::AncestorSearch(search) => {
|
||||
SyncState::AncestorSearch(search.dispatch_request(dispatcher))
|
||||
}
|
||||
other => other,
|
||||
};
|
||||
self.set_state(&mut state, next_state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// public API
|
||||
impl<L: AsLightClient> LightSync<L> {
|
||||
/// Create a new instance of `LightSync`.
|
||||
///
|
||||
/// This won't do anything until registered as a handler
|
||||
/// so it can act on events.
|
||||
pub fn new(client: Arc<L>) -> Result<Self, ::std::io::Error> {
|
||||
Ok(LightSync {
|
||||
start_block_number: client.as_light_client().chain_info().best_block_number,
|
||||
best_seen: Mutex::new(None),
|
||||
peers: RwLock::new(HashMap::new()),
|
||||
pending_reqs: Mutex::new(HashMap::new()),
|
||||
client: client,
|
||||
rng: Mutex::new(OsRng::new()?),
|
||||
state: Mutex::new(SyncStateWrapper::idle()),
|
||||
is_idle: Mutex::new(true),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for erasing the type of a light sync object and exposing read-only methods.
|
||||
pub trait SyncInfo {
|
||||
/// Get the highest block advertised on the network.
|
||||
fn highest_block(&self) -> Option<u64>;
|
||||
|
||||
/// Get the block number at the time of sync start.
|
||||
fn start_block(&self) -> u64;
|
||||
|
||||
/// Whether major sync is underway.
|
||||
fn is_major_importing(&self) -> bool;
|
||||
}
|
||||
|
||||
impl<L: AsLightClient> SyncInfo for LightSync<L> {
|
||||
fn highest_block(&self) -> Option<u64> {
|
||||
self.best_seen.lock().as_ref().map(|x| x.head_num)
|
||||
}
|
||||
|
||||
fn start_block(&self) -> u64 {
|
||||
self.start_block_number
|
||||
}
|
||||
|
||||
fn is_major_importing(&self) -> bool {
|
||||
const EMPTY_QUEUE: usize = 3;
|
||||
|
||||
let queue_info = self.client.as_light_client().queue_info();
|
||||
let is_verifying =
|
||||
queue_info.unverified_queue_size + queue_info.verified_queue_size > EMPTY_QUEUE;
|
||||
let is_syncing = !*self.is_idle.lock();
|
||||
|
||||
is_verifying || is_syncing
|
||||
}
|
||||
}
|
@ -1,291 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Helpers for decoding and verifying responses for headers.
|
||||
|
||||
use ethereum_types::H256;
|
||||
use light::request::{CompleteHeadersRequest as HeadersRequest, HashOrNumber};
|
||||
use rlp::DecoderError;
|
||||
use std::fmt;
|
||||
use types::{encoded, header::Header};
|
||||
|
||||
/// Errors found when decoding headers and verifying with basic constraints.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum BasicError {
|
||||
/// Wrong skip value: expected, found (if any).
|
||||
WrongSkip(u64, Option<u64>),
|
||||
/// Wrong start number.
|
||||
WrongStartNumber(u64, u64),
|
||||
/// Wrong start hash.
|
||||
WrongStartHash(H256, H256),
|
||||
/// Too many headers.
|
||||
TooManyHeaders(usize, usize),
|
||||
/// Decoder error.
|
||||
Decoder(DecoderError),
|
||||
}
|
||||
|
||||
impl From<DecoderError> for BasicError {
|
||||
fn from(err: DecoderError) -> Self {
|
||||
BasicError::Decoder(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for BasicError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Header response verification error: ")?;
|
||||
|
||||
match *self {
|
||||
BasicError::WrongSkip(ref exp, ref got) => {
|
||||
write!(f, "wrong skip (expected {}, got {:?})", exp, got)
|
||||
}
|
||||
BasicError::WrongStartNumber(ref exp, ref got) => {
|
||||
write!(f, "wrong start number (expected {}, got {})", exp, got)
|
||||
}
|
||||
BasicError::WrongStartHash(ref exp, ref got) => {
|
||||
write!(f, "wrong start hash (expected {}, got {})", exp, got)
|
||||
}
|
||||
BasicError::TooManyHeaders(ref max, ref got) => {
|
||||
write!(f, "too many headers (max {}, got {})", max, got)
|
||||
}
|
||||
BasicError::Decoder(ref err) => write!(f, "{}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request verification constraint.
|
||||
pub trait Constraint {
|
||||
type Error;
|
||||
|
||||
/// Verify headers against this.
|
||||
fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
/// Do basic verification of provided headers against a request.
|
||||
pub fn verify(
|
||||
headers: &[encoded::Header],
|
||||
request: &HeadersRequest,
|
||||
) -> Result<Vec<Header>, BasicError> {
|
||||
let headers: Result<Vec<_>, _> = headers.iter().map(|h| h.decode()).collect();
|
||||
match headers {
|
||||
Ok(headers) => {
|
||||
let reverse = request.reverse;
|
||||
|
||||
Max(request.max as usize).verify(&headers, reverse)?;
|
||||
match request.start {
|
||||
HashOrNumber::Number(ref num) => StartsAtNumber(*num).verify(&headers, reverse)?,
|
||||
HashOrNumber::Hash(ref hash) => StartsAtHash(*hash).verify(&headers, reverse)?,
|
||||
}
|
||||
|
||||
SkipsBetween(request.skip).verify(&headers, reverse)?;
|
||||
|
||||
Ok(headers)
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
struct StartsAtNumber(u64);
|
||||
struct StartsAtHash(H256);
|
||||
struct SkipsBetween(u64);
|
||||
struct Max(usize);
|
||||
|
||||
impl Constraint for StartsAtNumber {
|
||||
type Error = BasicError;
|
||||
|
||||
fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> {
|
||||
headers.first().map_or(Ok(()), |h| {
|
||||
if h.number() == self.0 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(BasicError::WrongStartNumber(self.0, h.number()))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Constraint for StartsAtHash {
|
||||
type Error = BasicError;
|
||||
|
||||
fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> {
|
||||
headers.first().map_or(Ok(()), |h| {
|
||||
if h.hash() == self.0 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(BasicError::WrongStartHash(self.0, h.hash()))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Constraint for SkipsBetween {
|
||||
type Error = BasicError;
|
||||
|
||||
fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), BasicError> {
|
||||
for pair in headers.windows(2) {
|
||||
let (low, high) = if reverse {
|
||||
(&pair[1], &pair[0])
|
||||
} else {
|
||||
(&pair[0], &pair[1])
|
||||
};
|
||||
if low.number() >= high.number() {
|
||||
return Err(BasicError::WrongSkip(self.0, None));
|
||||
}
|
||||
|
||||
let skip = (high.number() - low.number()) - 1;
|
||||
if skip != self.0 {
|
||||
return Err(BasicError::WrongSkip(self.0, Some(skip)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Constraint for Max {
|
||||
type Error = BasicError;
|
||||
|
||||
fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> {
|
||||
match headers.len() > self.0 {
|
||||
true => Err(BasicError::TooManyHeaders(self.0, headers.len())),
|
||||
false => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use light::request::CompleteHeadersRequest as HeadersRequest;
|
||||
use types::{encoded, header::Header};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn sequential_forward() {
|
||||
let request = HeadersRequest {
|
||||
start: 10.into(),
|
||||
max: 30,
|
||||
skip: 0,
|
||||
reverse: false,
|
||||
};
|
||||
|
||||
let mut parent_hash = None;
|
||||
let headers: Vec<_> = (0..25)
|
||||
.map(|x| x + 10)
|
||||
.map(|x| {
|
||||
let mut header = Header::default();
|
||||
header.set_number(x);
|
||||
|
||||
if let Some(parent_hash) = parent_hash {
|
||||
header.set_parent_hash(parent_hash);
|
||||
}
|
||||
|
||||
parent_hash = Some(header.hash());
|
||||
|
||||
encoded::Header::new(::rlp::encode(&header))
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert!(verify(&headers, &request).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sequential_backward() {
|
||||
let request = HeadersRequest {
|
||||
start: 34.into(),
|
||||
max: 30,
|
||||
skip: 0,
|
||||
reverse: true,
|
||||
};
|
||||
|
||||
let mut parent_hash = None;
|
||||
let headers: Vec<_> = (0..25)
|
||||
.map(|x| x + 10)
|
||||
.rev()
|
||||
.map(|x| {
|
||||
let mut header = Header::default();
|
||||
header.set_number(x);
|
||||
|
||||
if let Some(parent_hash) = parent_hash {
|
||||
header.set_parent_hash(parent_hash);
|
||||
}
|
||||
|
||||
parent_hash = Some(header.hash());
|
||||
|
||||
encoded::Header::new(::rlp::encode(&header))
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert!(verify(&headers, &request).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn too_many() {
|
||||
let request = HeadersRequest {
|
||||
start: 10.into(),
|
||||
max: 20,
|
||||
skip: 0,
|
||||
reverse: false,
|
||||
};
|
||||
|
||||
let mut parent_hash = None;
|
||||
let headers: Vec<_> = (0..25)
|
||||
.map(|x| x + 10)
|
||||
.map(|x| {
|
||||
let mut header = Header::default();
|
||||
header.set_number(x);
|
||||
|
||||
if let Some(parent_hash) = parent_hash {
|
||||
header.set_parent_hash(parent_hash);
|
||||
}
|
||||
|
||||
parent_hash = Some(header.hash());
|
||||
|
||||
encoded::Header::new(::rlp::encode(&header))
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
verify(&headers, &request),
|
||||
Err(BasicError::TooManyHeaders(20, 25))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_skip() {
|
||||
let request = HeadersRequest {
|
||||
start: 10.into(),
|
||||
max: 30,
|
||||
skip: 5,
|
||||
reverse: false,
|
||||
};
|
||||
|
||||
let headers: Vec<_> = (0..25)
|
||||
.map(|x| x * 3)
|
||||
.map(|x| x + 10)
|
||||
.map(|x| {
|
||||
let mut header = Header::default();
|
||||
header.set_number(x);
|
||||
|
||||
encoded::Header::new(::rlp::encode(&header))
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
verify(&headers, &request),
|
||||
Err(BasicError::WrongSkip(5, Some(2)))
|
||||
);
|
||||
}
|
||||
}
|
@ -1,585 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Header download state machine.
|
||||
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
collections::{BinaryHeap, HashMap, HashSet, VecDeque},
|
||||
fmt,
|
||||
};
|
||||
|
||||
use types::{encoded, header::Header};
|
||||
|
||||
use light::{net::ReqId, request::CompleteHeadersRequest as HeadersRequest};
|
||||
|
||||
use ethereum_types::H256;
|
||||
use network::PeerId;
|
||||
|
||||
use super::response;
|
||||
|
||||
// number of attempts to make to get a full scaffold for a sync round.
|
||||
const SCAFFOLD_ATTEMPTS: usize = 3;
|
||||
|
||||
/// Context for a headers response.
|
||||
pub trait ResponseContext {
|
||||
/// Get the peer who sent this response.
|
||||
fn responder(&self) -> PeerId;
|
||||
/// Get the request ID this response corresponds to.
|
||||
fn req_id(&self) -> &ReqId;
|
||||
/// Get the (unverified) response data.
|
||||
fn data(&self) -> &[encoded::Header];
|
||||
/// Punish the responder.
|
||||
fn punish_responder(&self);
|
||||
}
|
||||
|
||||
/// Reasons for sync round abort.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum AbortReason {
|
||||
/// Bad sparse header chain along with a list of peers who contributed to it.
|
||||
BadScaffold(Vec<PeerId>),
|
||||
/// No incoming data.
|
||||
NoResponses,
|
||||
/// Sync rounds completed.
|
||||
TargetReached,
|
||||
}
|
||||
|
||||
// A request for headers with a known starting header hash.
|
||||
// and a known parent hash for the last block.
|
||||
#[derive(PartialEq, Eq)]
|
||||
struct SubchainRequest {
|
||||
subchain_parent: (u64, H256),
|
||||
headers_request: HeadersRequest,
|
||||
subchain_end: (u64, H256),
|
||||
downloaded: VecDeque<Header>,
|
||||
}
|
||||
|
||||
// ordered by subchain parent number so pending requests towards the
|
||||
// front of the round are dispatched first.
|
||||
impl PartialOrd for SubchainRequest {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
self.subchain_parent
|
||||
.0
|
||||
.partial_cmp(&other.subchain_parent.0)
|
||||
.map(Ordering::reverse)
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for SubchainRequest {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.subchain_parent
|
||||
.0
|
||||
.cmp(&other.subchain_parent.0)
|
||||
.reverse()
|
||||
}
|
||||
}
|
||||
|
||||
/// Manages downloading of interior blocks of a sparse header chain.
|
||||
pub struct Fetcher {
|
||||
sparse: VecDeque<Header>, // sparse header chain.
|
||||
requests: BinaryHeap<SubchainRequest>,
|
||||
complete_requests: HashMap<H256, SubchainRequest>,
|
||||
pending: HashMap<ReqId, SubchainRequest>,
|
||||
scaffold_contributors: Vec<PeerId>,
|
||||
ready: VecDeque<Header>,
|
||||
end: (u64, H256),
|
||||
target: (u64, H256),
|
||||
}
|
||||
|
||||
impl Fetcher {
|
||||
// Produce a new fetcher given a sparse headerchain, in ascending order along
|
||||
// with a list of peers who helped produce the chain.
|
||||
// The headers must be valid RLP at this point and must have a consistent
|
||||
// non-zero gap between them. Will abort the round if found wrong.
|
||||
fn new(
|
||||
sparse_headers: Vec<Header>,
|
||||
contributors: Vec<PeerId>,
|
||||
target: (u64, H256),
|
||||
) -> SyncRound {
|
||||
let mut requests = BinaryHeap::with_capacity(sparse_headers.len() - 1);
|
||||
|
||||
for pair in sparse_headers.windows(2) {
|
||||
let low_rung = &pair[0];
|
||||
let high_rung = &pair[1];
|
||||
|
||||
let diff = high_rung.number() - low_rung.number();
|
||||
|
||||
// should never happen as long as we verify the gaps
|
||||
// gotten from SyncRound::Start
|
||||
if diff < 2 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let needed_headers = HeadersRequest {
|
||||
start: high_rung.parent_hash().clone().into(),
|
||||
max: diff - 1,
|
||||
skip: 0,
|
||||
reverse: true,
|
||||
};
|
||||
|
||||
requests.push(SubchainRequest {
|
||||
headers_request: needed_headers,
|
||||
subchain_end: (high_rung.number() - 1, *high_rung.parent_hash()),
|
||||
downloaded: VecDeque::new(),
|
||||
subchain_parent: (low_rung.number(), low_rung.hash()),
|
||||
});
|
||||
}
|
||||
|
||||
let end = match sparse_headers.last().map(|h| (h.number(), h.hash())) {
|
||||
Some(end) => end,
|
||||
None => {
|
||||
return SyncRound::abort(AbortReason::BadScaffold(contributors), VecDeque::new())
|
||||
}
|
||||
};
|
||||
|
||||
SyncRound::Fetch(Fetcher {
|
||||
sparse: sparse_headers.into(),
|
||||
requests: requests,
|
||||
complete_requests: HashMap::new(),
|
||||
pending: HashMap::new(),
|
||||
scaffold_contributors: contributors,
|
||||
ready: VecDeque::new(),
|
||||
end: end,
|
||||
target: target,
|
||||
})
|
||||
}
|
||||
|
||||
// collect complete requests and their subchain from the sparse header chain
|
||||
// into the ready set in order.
|
||||
fn collect_ready(&mut self) {
|
||||
loop {
|
||||
let start_hash = match self.sparse.front() {
|
||||
Some(first) => first.hash(),
|
||||
None => break,
|
||||
};
|
||||
|
||||
match self.complete_requests.remove(&start_hash) {
|
||||
None => break,
|
||||
Some(complete_req) => {
|
||||
self.ready
|
||||
.push_back(self.sparse.pop_front().expect("first known to exist; qed"));
|
||||
self.ready.extend(complete_req.downloaded);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// frames are between two sparse headers and keyed by subchain parent, so the last
|
||||
// remaining will be the last header.
|
||||
if self.sparse.len() == 1 {
|
||||
self.ready.push_back(
|
||||
self.sparse
|
||||
.pop_back()
|
||||
.expect("sparse known to have one entry; qed"),
|
||||
)
|
||||
}
|
||||
|
||||
trace!(target: "sync", "{} headers ready to drain", self.ready.len());
|
||||
}
|
||||
|
||||
fn process_response<R: ResponseContext>(mut self, ctx: &R) -> SyncRound {
|
||||
let mut request = match self.pending.remove(ctx.req_id()) {
|
||||
Some(request) => request,
|
||||
None => return SyncRound::Fetch(self),
|
||||
};
|
||||
|
||||
trace!(target: "sync", "Received response for subchain ({} -> {})",
|
||||
request.subchain_parent.0, request.subchain_end.0);
|
||||
|
||||
let headers = ctx.data();
|
||||
|
||||
if headers.is_empty() {
|
||||
trace!(target: "sync", "Punishing peer {} for empty response", ctx.responder());
|
||||
ctx.punish_responder();
|
||||
|
||||
self.requests.push(request);
|
||||
return SyncRound::Fetch(self);
|
||||
}
|
||||
|
||||
match response::verify(headers, &request.headers_request) {
|
||||
Err(e) => {
|
||||
trace!(target: "sync", "Punishing peer {} for invalid response ({})", ctx.responder(), e);
|
||||
ctx.punish_responder();
|
||||
|
||||
// TODO: track number of attempts per request,
|
||||
// abort if failure rate too high.
|
||||
self.requests.push(request);
|
||||
SyncRound::Fetch(self)
|
||||
}
|
||||
Ok(headers) => {
|
||||
let mut parent_hash = None;
|
||||
for header in headers {
|
||||
if let Some(hash) = parent_hash.as_ref() {
|
||||
if *hash != header.hash() {
|
||||
trace!(target: "sync", "Punishing peer {} for parent mismatch", ctx.responder());
|
||||
ctx.punish_responder();
|
||||
self.requests.push(request);
|
||||
return SyncRound::Fetch(self);
|
||||
}
|
||||
}
|
||||
// incrementally update the frame request as we go so we can
|
||||
// return at any time in the loop.
|
||||
parent_hash = Some(*header.parent_hash());
|
||||
request.headers_request.start = header.parent_hash().clone().into();
|
||||
request.headers_request.max -= 1;
|
||||
request.downloaded.push_front(header);
|
||||
}
|
||||
|
||||
let subchain_parent = request.subchain_parent.1;
|
||||
|
||||
// check if the subchain portion has been completely filled.
|
||||
if request.headers_request.max == 0 {
|
||||
if parent_hash.map_or(true, |hash| hash != subchain_parent) {
|
||||
let abort = AbortReason::BadScaffold(self.scaffold_contributors);
|
||||
return SyncRound::abort(abort, self.ready);
|
||||
}
|
||||
|
||||
self.complete_requests.insert(subchain_parent, request);
|
||||
self.collect_ready();
|
||||
}
|
||||
|
||||
// state transition not triggered until drain is finished.
|
||||
SyncRound::Fetch(self)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn requests_abandoned(mut self, abandoned: &[ReqId]) -> SyncRound {
|
||||
trace!(target: "sync", "Abandonned requests {:?}", abandoned);
|
||||
|
||||
for abandoned in abandoned {
|
||||
match self.pending.remove(abandoned) {
|
||||
None => {}
|
||||
Some(req) => self.requests.push(req),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: track failure rate and potentially abort.
|
||||
SyncRound::Fetch(self)
|
||||
}
|
||||
|
||||
fn dispatch_requests<D>(mut self, mut dispatcher: D) -> SyncRound
|
||||
where
|
||||
D: FnMut(HeadersRequest) -> Option<ReqId>,
|
||||
{
|
||||
while let Some(pending_req) = self.requests.pop() {
|
||||
match dispatcher(pending_req.headers_request.clone()) {
|
||||
Some(req_id) => {
|
||||
trace!(target: "sync", "Assigned request {} for subchain ({} -> {})",
|
||||
req_id, pending_req.subchain_parent.0, pending_req.subchain_end.0);
|
||||
|
||||
self.pending.insert(req_id, pending_req);
|
||||
}
|
||||
None => {
|
||||
trace!(target: "sync", "Failed to assign request for subchain ({} -> {})",
|
||||
pending_req.subchain_parent.0, pending_req.subchain_end.0);
|
||||
self.requests.push(pending_req);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SyncRound::Fetch(self)
|
||||
}
|
||||
|
||||
fn drain(mut self, headers: &mut Vec<Header>, max: Option<usize>) -> SyncRound {
|
||||
let max = ::std::cmp::min(max.unwrap_or(usize::max_value()), self.ready.len());
|
||||
headers.extend(self.ready.drain(0..max));
|
||||
|
||||
if self.sparse.is_empty() && self.ready.is_empty() {
|
||||
trace!(target: "sync", "sync round complete. Starting anew from {:?}", self.end);
|
||||
SyncRound::begin(self.end, self.target)
|
||||
} else {
|
||||
SyncRound::Fetch(self)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute scaffold parameters from non-zero distance between start and target block: (skip, pivots).
|
||||
fn scaffold_params(diff: u64) -> (u64, u64) {
|
||||
// default parameters.
|
||||
// amount of blocks between each scaffold pivot.
|
||||
const ROUND_SKIP: u64 = 255;
|
||||
// amount of scaffold pivots: these are the Xs in "X___X___X"
|
||||
const ROUND_PIVOTS: u64 = 256;
|
||||
|
||||
let rem = diff % (ROUND_SKIP + 1);
|
||||
if diff <= ROUND_SKIP {
|
||||
// just request headers from the start to the target.
|
||||
(0, rem)
|
||||
} else {
|
||||
// the number of pivots necessary to exactly hit or overshoot the target.
|
||||
let pivots_to_target = (diff / (ROUND_SKIP + 1)) + if rem == 0 { 0 } else { 1 };
|
||||
let num_pivots = ::std::cmp::min(pivots_to_target, ROUND_PIVOTS);
|
||||
(ROUND_SKIP, num_pivots)
|
||||
}
|
||||
}
|
||||
|
||||
/// Round started: get stepped header chain.
|
||||
/// from a start block with number X we request ROUND_PIVOTS headers stepped by ROUND_SKIP from
|
||||
/// block X + 1 to a target >= X + 1.
|
||||
/// If the sync target is within ROUND_SKIP of the start, we request
|
||||
/// only those blocks. If the sync target is within (ROUND_SKIP + 1) * (ROUND_PIVOTS - 1) of
|
||||
/// the start, we reduce the number of pivots so the target is outside it.
|
||||
pub struct RoundStart {
|
||||
start_block: (u64, H256),
|
||||
target: (u64, H256),
|
||||
pending_req: Option<(ReqId, HeadersRequest)>,
|
||||
sparse_headers: Vec<Header>,
|
||||
contributors: HashSet<PeerId>,
|
||||
attempt: usize,
|
||||
skip: u64,
|
||||
pivots: u64,
|
||||
}
|
||||
|
||||
impl RoundStart {
|
||||
fn new(start: (u64, H256), target: (u64, H256)) -> Self {
|
||||
let (skip, pivots) = scaffold_params(target.0 - start.0);
|
||||
|
||||
trace!(target: "sync", "Beginning sync round: {} pivots and {} skip from block {}",
|
||||
pivots, skip, start.0);
|
||||
|
||||
RoundStart {
|
||||
start_block: start,
|
||||
target: target,
|
||||
pending_req: None,
|
||||
sparse_headers: Vec::new(),
|
||||
contributors: HashSet::new(),
|
||||
attempt: 0,
|
||||
skip: skip,
|
||||
pivots: pivots,
|
||||
}
|
||||
}
|
||||
|
||||
// called on failed attempt. may trigger a transition after a number of attempts.
|
||||
// a failed attempt is defined as any time a peer returns invalid or incomplete response
|
||||
fn failed_attempt(mut self) -> SyncRound {
|
||||
self.attempt += 1;
|
||||
|
||||
if self.attempt >= SCAFFOLD_ATTEMPTS {
|
||||
return if self.sparse_headers.len() > 1 {
|
||||
Fetcher::new(
|
||||
self.sparse_headers,
|
||||
self.contributors.into_iter().collect(),
|
||||
self.target,
|
||||
)
|
||||
} else {
|
||||
let fetched_headers = if self.skip == 0 {
|
||||
self.sparse_headers.into()
|
||||
} else {
|
||||
VecDeque::new()
|
||||
};
|
||||
|
||||
SyncRound::abort(AbortReason::NoResponses, fetched_headers)
|
||||
};
|
||||
} else {
|
||||
SyncRound::Start(self)
|
||||
}
|
||||
}
|
||||
|
||||
fn process_response<R: ResponseContext>(mut self, ctx: &R) -> SyncRound {
|
||||
let req = match self.pending_req.take() {
|
||||
Some((id, ref req)) if ctx.req_id() == &id => req.clone(),
|
||||
other => {
|
||||
self.pending_req = other;
|
||||
return SyncRound::Start(self);
|
||||
}
|
||||
};
|
||||
|
||||
match response::verify(ctx.data(), &req) {
|
||||
Ok(headers) => {
|
||||
if self.sparse_headers.is_empty()
|
||||
&& headers
|
||||
.get(0)
|
||||
.map_or(false, |x| x.parent_hash() != &self.start_block.1)
|
||||
{
|
||||
trace!(target: "sync", "Wrong parent for first header in round");
|
||||
ctx.punish_responder(); // or should we reset?
|
||||
}
|
||||
|
||||
self.contributors.insert(ctx.responder());
|
||||
self.sparse_headers.extend(headers);
|
||||
|
||||
if self.sparse_headers.len() as u64 == self.pivots {
|
||||
return if self.skip == 0 {
|
||||
SyncRound::abort(AbortReason::TargetReached, self.sparse_headers.into())
|
||||
} else {
|
||||
trace!(target: "sync", "Beginning fetch of blocks between {} sparse headers",
|
||||
self.sparse_headers.len());
|
||||
Fetcher::new(
|
||||
self.sparse_headers,
|
||||
self.contributors.into_iter().collect(),
|
||||
self.target,
|
||||
)
|
||||
};
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
trace!(target: "sync", "Punishing peer {} for malformed response ({})", ctx.responder(), e);
|
||||
ctx.punish_responder();
|
||||
}
|
||||
};
|
||||
|
||||
self.failed_attempt()
|
||||
}
|
||||
|
||||
fn requests_abandoned(mut self, abandoned: &[ReqId]) -> SyncRound {
|
||||
match self.pending_req.take() {
|
||||
Some((id, req)) => {
|
||||
if abandoned.iter().any(|r| r == &id) {
|
||||
self.pending_req = None;
|
||||
self.failed_attempt()
|
||||
} else {
|
||||
self.pending_req = Some((id, req));
|
||||
SyncRound::Start(self)
|
||||
}
|
||||
}
|
||||
None => SyncRound::Start(self),
|
||||
}
|
||||
}
|
||||
|
||||
fn dispatch_requests<D>(mut self, mut dispatcher: D) -> SyncRound
|
||||
where
|
||||
D: FnMut(HeadersRequest) -> Option<ReqId>,
|
||||
{
|
||||
if self.pending_req.is_none() {
|
||||
// beginning offset + first block expected after last header we have.
|
||||
let start =
|
||||
(self.start_block.0 + 1) + self.sparse_headers.len() as u64 * (self.skip + 1);
|
||||
|
||||
let max = self.pivots - self.sparse_headers.len() as u64;
|
||||
|
||||
let headers_request = HeadersRequest {
|
||||
start: start.into(),
|
||||
max: max,
|
||||
skip: self.skip,
|
||||
reverse: false,
|
||||
};
|
||||
|
||||
if let Some(req_id) = dispatcher(headers_request.clone()) {
|
||||
trace!(target: "sync", "Requesting scaffold: {} headers forward from {}, skip={}",
|
||||
max, start, self.skip);
|
||||
|
||||
self.pending_req = Some((req_id, headers_request));
|
||||
}
|
||||
}
|
||||
|
||||
SyncRound::Start(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Sync round state machine.
|
||||
pub enum SyncRound {
|
||||
/// Beginning a sync round.
|
||||
Start(RoundStart),
|
||||
/// Fetching intermediate blocks during a sync round.
|
||||
Fetch(Fetcher),
|
||||
/// Aborted + Sequential headers
|
||||
Abort(AbortReason, VecDeque<Header>),
|
||||
}
|
||||
|
||||
impl SyncRound {
|
||||
fn abort(reason: AbortReason, remaining: VecDeque<Header>) -> Self {
|
||||
trace!(target: "sync", "Aborting sync round: {:?}. To drain: {}", reason, remaining.len());
|
||||
|
||||
SyncRound::Abort(reason, remaining)
|
||||
}
|
||||
|
||||
/// Begin sync rounds from a starting block, but not to go past a given target
|
||||
pub fn begin(start: (u64, H256), target: (u64, H256)) -> Self {
|
||||
if target.0 <= start.0 {
|
||||
SyncRound::abort(AbortReason::TargetReached, VecDeque::new())
|
||||
} else {
|
||||
SyncRound::Start(RoundStart::new(start, target))
|
||||
}
|
||||
}
|
||||
|
||||
/// Process an answer to a request. Unknown requests will be ignored.
|
||||
pub fn process_response<R: ResponseContext>(self, ctx: &R) -> Self {
|
||||
match self {
|
||||
SyncRound::Start(round_start) => round_start.process_response(ctx),
|
||||
SyncRound::Fetch(fetcher) => fetcher.process_response(ctx),
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return unfulfilled requests from disconnected peer. Unknown requests will be ignored.
|
||||
pub fn requests_abandoned(self, abandoned: &[ReqId]) -> Self {
|
||||
match self {
|
||||
SyncRound::Start(round_start) => round_start.requests_abandoned(abandoned),
|
||||
SyncRound::Fetch(fetcher) => fetcher.requests_abandoned(abandoned),
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
|
||||
/// Dispatch pending requests. The dispatcher provided will attempt to
|
||||
/// find a suitable peer to serve the request.
|
||||
// TODO: have dispatcher take capabilities argument? and return an error as
|
||||
// to why no suitable peer can be found? (no buffer, no chain heads that high, etc)
|
||||
pub fn dispatch_requests<D>(self, dispatcher: D) -> Self
|
||||
where
|
||||
D: FnMut(HeadersRequest) -> Option<ReqId>,
|
||||
{
|
||||
match self {
|
||||
SyncRound::Start(round_start) => round_start.dispatch_requests(dispatcher),
|
||||
SyncRound::Fetch(fetcher) => fetcher.dispatch_requests(dispatcher),
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
|
||||
/// Drain up to a maximum number (None -> all) of headers (continuous, starting with a child of
|
||||
/// the round start block) from the round, starting a new one once finished.
|
||||
pub fn drain(self, v: &mut Vec<Header>, max: Option<usize>) -> Self {
|
||||
match self {
|
||||
SyncRound::Fetch(fetcher) => fetcher.drain(v, max),
|
||||
SyncRound::Abort(reason, mut remaining) => {
|
||||
let len = ::std::cmp::min(max.unwrap_or(usize::max_value()), remaining.len());
|
||||
v.extend(remaining.drain(..len));
|
||||
SyncRound::Abort(reason, remaining)
|
||||
}
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SyncRound {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
SyncRound::Start(ref state) => write!(f, "Scaffolding from {:?}", state.start_block),
|
||||
SyncRound::Fetch(ref fetcher) => write!(f, "Filling scaffold up to {:?}", fetcher.end),
|
||||
SyncRound::Abort(ref reason, ref remaining) => {
|
||||
write!(f, "Aborted: {:?}, {} remain", reason, remaining.len())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::scaffold_params;
|
||||
|
||||
#[test]
|
||||
fn scaffold_config() {
|
||||
// within a certain distance of the head, we download
|
||||
// sequentially.
|
||||
assert_eq!(scaffold_params(1), (0, 1));
|
||||
assert_eq!(scaffold_params(6), (0, 6));
|
||||
|
||||
// when scaffolds are useful, download enough frames to get
|
||||
// within a close distance of the goal.
|
||||
assert_eq!(scaffold_params(1000), (255, 4));
|
||||
assert_eq!(scaffold_params(1024), (255, 4));
|
||||
}
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use tests::helpers::TestNet;
|
||||
|
||||
use ethcore::client::{BlockId, BlockInfo, EachBlockWith};
|
||||
|
||||
mod test_net;
|
||||
|
||||
#[test]
|
||||
fn basic_sync() {
|
||||
let mut net = TestNet::light(1, 2);
|
||||
net.peer(1).chain().add_blocks(5000, EachBlockWith::Nothing);
|
||||
net.peer(2).chain().add_blocks(6000, EachBlockWith::Nothing);
|
||||
|
||||
net.sync();
|
||||
|
||||
assert!(net
|
||||
.peer(0)
|
||||
.light_chain()
|
||||
.block_header(BlockId::Number(6000))
|
||||
.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fork_post_cht() {
|
||||
const CHAIN_LENGTH: u64 = 50; // shouldn't be longer than ::light::cht::size();
|
||||
|
||||
let mut net = TestNet::light(1, 2);
|
||||
|
||||
// peer 2 is on a higher TD chain.
|
||||
net.peer(1)
|
||||
.chain()
|
||||
.add_blocks(CHAIN_LENGTH as usize, EachBlockWith::Nothing);
|
||||
net.peer(2)
|
||||
.chain()
|
||||
.add_blocks(CHAIN_LENGTH as usize + 1, EachBlockWith::Uncle);
|
||||
|
||||
// get the light peer on peer 1's chain.
|
||||
for id in (0..CHAIN_LENGTH).map(|x| x + 1).map(BlockId::Number) {
|
||||
let (light_peer, full_peer) = (net.peer(0), net.peer(1));
|
||||
let light_chain = light_peer.light_chain();
|
||||
let header = full_peer
|
||||
.chain()
|
||||
.block_header(id)
|
||||
.unwrap()
|
||||
.decode()
|
||||
.expect("decoding failure");
|
||||
let _ = light_chain.import_header(header);
|
||||
light_chain.flush_queue();
|
||||
light_chain.import_verified();
|
||||
assert!(light_chain.block_header(id).is_some());
|
||||
}
|
||||
|
||||
net.sync();
|
||||
|
||||
for id in (0..CHAIN_LENGTH).map(|x| x + 1).map(BlockId::Number) {
|
||||
assert_eq!(
|
||||
net.peer(0).light_chain().block_header(id).unwrap(),
|
||||
net.peer(2).chain().block_header(id).unwrap()
|
||||
);
|
||||
}
|
||||
}
|
@ -1,270 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! TestNet peer definition.
|
||||
|
||||
use std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use light_sync::*;
|
||||
use tests::helpers::{Peer as PeerLike, TestNet, TestPacket};
|
||||
|
||||
use ethcore::{client::TestBlockChainClient, spec::Spec};
|
||||
use io::IoChannel;
|
||||
use kvdb_memorydb;
|
||||
use light::{
|
||||
client::fetch::{self, Unavailable},
|
||||
net::{Capabilities, IoContext, LightProtocol, Params as LightParams},
|
||||
provider::LightProvider,
|
||||
};
|
||||
use network::{NodeId, PeerId};
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use light::cache::Cache;
|
||||
use std::time::Duration;
|
||||
|
||||
const NETWORK_ID: u64 = 0xcafebabe;
|
||||
|
||||
pub type LightClient = ::light::client::Client<Unavailable>;
|
||||
|
||||
struct TestIoContext<'a> {
|
||||
queue: &'a RwLock<VecDeque<TestPacket>>,
|
||||
sender: Option<PeerId>,
|
||||
to_disconnect: RwLock<HashSet<PeerId>>,
|
||||
}
|
||||
|
||||
impl<'a> IoContext for TestIoContext<'a> {
|
||||
fn send(&self, peer: PeerId, packet_id: u8, packet_body: Vec<u8>) {
|
||||
self.queue.write().push_back(TestPacket {
|
||||
data: packet_body,
|
||||
packet_id: packet_id,
|
||||
recipient: peer,
|
||||
})
|
||||
}
|
||||
|
||||
fn respond(&self, packet_id: u8, packet_body: Vec<u8>) {
|
||||
if let Some(sender) = self.sender {
|
||||
self.send(sender, packet_id, packet_body);
|
||||
}
|
||||
}
|
||||
|
||||
fn disconnect_peer(&self, peer: PeerId) {
|
||||
self.to_disconnect.write().insert(peer);
|
||||
}
|
||||
|
||||
fn disable_peer(&self, peer: PeerId) {
|
||||
self.disconnect_peer(peer)
|
||||
}
|
||||
fn protocol_version(&self, _peer: PeerId) -> Option<u8> {
|
||||
Some(::light::net::MAX_PROTOCOL_VERSION)
|
||||
}
|
||||
|
||||
fn persistent_peer_id(&self, _peer: PeerId) -> Option<NodeId> {
|
||||
unimplemented!()
|
||||
}
|
||||
fn is_reserved_peer(&self, _peer: PeerId) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
// peer-specific data.
|
||||
enum PeerData {
|
||||
Light(Arc<LightSync<LightClient>>, Arc<LightClient>),
|
||||
Full(Arc<TestBlockChainClient>),
|
||||
}
|
||||
|
||||
// test peer type.
|
||||
// Either a full peer or a light peer.
|
||||
pub struct Peer {
|
||||
proto: LightProtocol,
|
||||
queue: RwLock<VecDeque<TestPacket>>,
|
||||
data: PeerData,
|
||||
}
|
||||
|
||||
impl Peer {
|
||||
// create a new full-client peer for light client peers to sync to.
|
||||
// buffer flow is made negligible.
|
||||
pub fn new_full(chain: Arc<TestBlockChainClient>) -> Self {
|
||||
let params = LightParams {
|
||||
network_id: NETWORK_ID,
|
||||
config: Default::default(),
|
||||
capabilities: Capabilities {
|
||||
serve_headers: true,
|
||||
serve_chain_since: None,
|
||||
serve_state_since: None,
|
||||
tx_relay: true,
|
||||
},
|
||||
sample_store: None,
|
||||
};
|
||||
|
||||
let proto = LightProtocol::new(chain.clone(), params);
|
||||
Peer {
|
||||
proto: proto,
|
||||
queue: RwLock::new(VecDeque::new()),
|
||||
data: PeerData::Full(chain),
|
||||
}
|
||||
}
|
||||
|
||||
// create a new light-client peer to sync to full peers.
|
||||
pub fn new_light(chain: Arc<LightClient>) -> Self {
|
||||
let sync = Arc::new(LightSync::new(chain.clone()).unwrap());
|
||||
let params = LightParams {
|
||||
network_id: NETWORK_ID,
|
||||
config: Default::default(),
|
||||
capabilities: Capabilities {
|
||||
serve_headers: false,
|
||||
serve_chain_since: None,
|
||||
serve_state_since: None,
|
||||
tx_relay: false,
|
||||
},
|
||||
sample_store: None,
|
||||
};
|
||||
|
||||
let provider = LightProvider::new(chain.clone(), Arc::new(RwLock::new(Default::default())));
|
||||
let mut proto = LightProtocol::new(Arc::new(provider), params);
|
||||
proto.add_handler(sync.clone());
|
||||
Peer {
|
||||
proto: proto,
|
||||
queue: RwLock::new(VecDeque::new()),
|
||||
data: PeerData::Light(sync, chain),
|
||||
}
|
||||
}
|
||||
|
||||
// get the chain from the client, asserting that it is a full node.
|
||||
pub fn chain(&self) -> &TestBlockChainClient {
|
||||
match self.data {
|
||||
PeerData::Full(ref chain) => &*chain,
|
||||
_ => panic!("Attempted to access full chain on light peer."),
|
||||
}
|
||||
}
|
||||
|
||||
// get the light chain from the peer, asserting that it is a light node.
|
||||
pub fn light_chain(&self) -> &LightClient {
|
||||
match self.data {
|
||||
PeerData::Light(_, ref chain) => &*chain,
|
||||
_ => panic!("Attempted to access light chain on full peer."),
|
||||
}
|
||||
}
|
||||
|
||||
// get a test Io context based on
|
||||
fn io(&self, sender: Option<PeerId>) -> TestIoContext {
|
||||
TestIoContext {
|
||||
queue: &self.queue,
|
||||
sender: sender,
|
||||
to_disconnect: RwLock::new(HashSet::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PeerLike for Peer {
|
||||
type Message = TestPacket;
|
||||
|
||||
fn on_connect(&self, other: PeerId) {
|
||||
let io = self.io(Some(other));
|
||||
self.proto.on_connect(other, &io);
|
||||
}
|
||||
|
||||
fn on_disconnect(&self, other: PeerId) {
|
||||
let io = self.io(Some(other));
|
||||
self.proto.on_disconnect(other, &io);
|
||||
}
|
||||
|
||||
fn receive_message(&self, from: PeerId, msg: TestPacket) -> HashSet<PeerId> {
|
||||
let io = self.io(Some(from));
|
||||
self.proto
|
||||
.handle_packet(&io, from, msg.packet_id, &msg.data);
|
||||
io.to_disconnect.into_inner()
|
||||
}
|
||||
|
||||
fn pending_message(&self) -> Option<TestPacket> {
|
||||
self.queue.write().pop_front()
|
||||
}
|
||||
|
||||
fn is_done(&self) -> bool {
|
||||
self.queue.read().is_empty()
|
||||
&& match self.data {
|
||||
PeerData::Light(_, ref client) => {
|
||||
// should create a test light client which just imports
|
||||
// headers directly and doesn't have a queue to drain.
|
||||
client.import_verified();
|
||||
client.queue_info().is_empty()
|
||||
}
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn sync_step(&self) {
|
||||
if let PeerData::Light(_, ref client) = self.data {
|
||||
client.flush_queue();
|
||||
|
||||
while !client.queue_info().is_empty() {
|
||||
client.import_verified()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn restart_sync(&self) {}
|
||||
|
||||
fn process_all_io_messages(&self) {}
|
||||
|
||||
fn process_all_new_block_messages(&self) {}
|
||||
}
|
||||
|
||||
impl TestNet<Peer> {
|
||||
/// Create a new `TestNet` for testing light synchronization.
|
||||
/// The first parameter is the number of light nodes,
|
||||
/// the second is the number of full nodes.
|
||||
pub fn light(n_light: usize, n_full: usize) -> Self {
|
||||
let mut peers = Vec::with_capacity(n_light + n_full);
|
||||
for _ in 0..n_light {
|
||||
let mut config = ::light::client::Config::default();
|
||||
|
||||
// skip full verification because the blocks are bad.
|
||||
config.verify_full = false;
|
||||
let cache = Arc::new(Mutex::new(Cache::new(
|
||||
Default::default(),
|
||||
Duration::from_secs(6 * 3600),
|
||||
)));
|
||||
let db = kvdb_memorydb::create(0);
|
||||
let client = LightClient::new(
|
||||
config,
|
||||
Arc::new(db),
|
||||
None,
|
||||
&Spec::new_test(),
|
||||
fetch::unavailable(), // TODO: allow fetch from full nodes.
|
||||
IoChannel::disconnected(),
|
||||
cache,
|
||||
)
|
||||
.expect("New DB creation infallible; qed");
|
||||
|
||||
peers.push(Arc::new(Peer::new_light(Arc::new(client))))
|
||||
}
|
||||
|
||||
for _ in 0..n_full {
|
||||
peers.push(Arc::new(Peer::new_full(Arc::new(
|
||||
TestBlockChainClient::new(),
|
||||
))))
|
||||
}
|
||||
|
||||
TestNet {
|
||||
peers: peers,
|
||||
started: false,
|
||||
disconnect_events: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,64 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Spec hardcoded synchronization deserialization for the light client.
|
||||
|
||||
use hash::H256;
|
||||
use uint::Uint;
|
||||
|
||||
/// Spec hardcoded sync.
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct HardcodedSync {
|
||||
/// Hexadecimal of the RLP encoding of the header of the block to start synchronization from.
|
||||
pub header: String,
|
||||
/// Total difficulty including the block of `header`.
|
||||
pub total_difficulty: Uint,
|
||||
/// Ordered trie roots of blocks before and including `header`.
|
||||
#[serde(rename = "CHTs")]
|
||||
pub chts: Vec<H256>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use ethereum_types::{H256 as Eth256, U256};
|
||||
use hash::H256;
|
||||
use serde_json;
|
||||
use spec::hardcoded_sync::HardcodedSync;
|
||||
use uint::Uint;
|
||||
|
||||
#[test]
|
||||
fn hardcoded_sync_deserialization() {
|
||||
let s = r#"{
|
||||
"header": "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23",
|
||||
"totalDifficulty": "0x400000000",
|
||||
"CHTs": [
|
||||
"0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
|
||||
"0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544"
|
||||
]
|
||||
}"#;
|
||||
let deserialized: HardcodedSync = serde_json::from_str(s).unwrap();
|
||||
assert_eq!(deserialized, HardcodedSync {
|
||||
header: String::from("f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23"),
|
||||
total_difficulty: Uint(U256::from(0x400000000u64)),
|
||||
chts: vec![
|
||||
H256(Eth256::from("0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa")),
|
||||
H256(Eth256::from("0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544")),
|
||||
]
|
||||
});
|
||||
}
|
||||
}
|
@ -24,7 +24,6 @@ pub mod clique;
|
||||
pub mod engine;
|
||||
pub mod ethash;
|
||||
pub mod genesis;
|
||||
pub mod hardcoded_sync;
|
||||
pub mod instant_seal;
|
||||
pub mod null_engine;
|
||||
pub mod params;
|
||||
@ -42,7 +41,6 @@ pub use self::{
|
||||
engine::Engine,
|
||||
ethash::{BlockReward, Ethash, EthashParams},
|
||||
genesis::Genesis,
|
||||
hardcoded_sync::HardcodedSync,
|
||||
instant_seal::{InstantSeal, InstantSealParams},
|
||||
null_engine::{NullEngine, NullEngineParams},
|
||||
params::Params,
|
||||
|
@ -17,7 +17,7 @@
|
||||
//! Spec deserialization.
|
||||
|
||||
use serde_json::{self, Error};
|
||||
use spec::{Engine, Genesis, HardcodedSync, Params, State};
|
||||
use spec::{Engine, Genesis, Params, State};
|
||||
use std::io::Read;
|
||||
|
||||
/// Fork spec definition
|
||||
@ -56,8 +56,6 @@ pub struct Spec {
|
||||
pub accounts: State,
|
||||
/// Boot nodes.
|
||||
pub nodes: Option<Vec<String>>,
|
||||
/// Hardcoded synchronization for the light client.
|
||||
pub hardcoded_sync: Option<HardcodedSync>,
|
||||
}
|
||||
|
||||
impl Spec {
|
||||
@ -126,14 +124,6 @@ mod tests {
|
||||
"0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
|
||||
"0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
|
||||
"102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c": { "balance": "1606938044258990275541962092341162602522202993782792835301376", "nonce": "1048576" }
|
||||
},
|
||||
"hardcodedSync": {
|
||||
"header": "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23",
|
||||
"totalDifficulty": "0x400000000",
|
||||
"CHTs": [
|
||||
"0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
|
||||
"0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544"
|
||||
]
|
||||
}
|
||||
}"#;
|
||||
let result: Result<Spec, _> = serde_json::from_str(s);
|
||||
@ -238,14 +228,6 @@ mod tests {
|
||||
}
|
||||
},
|
||||
"102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c": { "balance": "1606938044258990275541962092341162602522202993782792835301376", "nonce": "1048576" }
|
||||
},
|
||||
"hardcodedSync": {
|
||||
"header": "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23",
|
||||
"totalDifficulty": "0x400000000",
|
||||
"CHTs": [
|
||||
"0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
|
||||
"0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544"
|
||||
]
|
||||
}
|
||||
}"#;
|
||||
let _deserialized: Spec = serde_json::from_str(s).unwrap();
|
||||
|
@ -14,14 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::{
|
||||
fs, io,
|
||||
io::{BufRead, BufReader},
|
||||
str::from_utf8,
|
||||
sync::Arc,
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use std::{fs, io, sync::Arc, time::Instant};
|
||||
|
||||
use ansi_term::Colour;
|
||||
use bytes::ToPretty;
|
||||
@ -33,7 +26,6 @@ use ethcore::{
|
||||
Balance, BlockChainClient, BlockChainReset, BlockId, DatabaseCompactionProfile,
|
||||
ImportExportBlocks, Mode, Nonce, VMType,
|
||||
},
|
||||
error::{Error as EthcoreError, ErrorKind as EthcoreErrorKind, ImportErrorKind},
|
||||
miner::Miner,
|
||||
verification::queue::VerifierSettings,
|
||||
};
|
||||
@ -44,8 +36,6 @@ use hash::{keccak, KECCAK_NULL_RLP};
|
||||
use helpers::{execute_upgrades, to_client_config};
|
||||
use informant::{FullNodeInformantData, Informant, MillisecondDuration};
|
||||
use params::{fatdb_switch_to_bool, tracing_switch_to_bool, Pruning, SpecType, Switch};
|
||||
use rlp::PayloadInfo;
|
||||
use rustc_hex::FromHex;
|
||||
use types::data_format::DataFormat;
|
||||
use user_defaults::UserDefaults;
|
||||
|
||||
@ -96,7 +86,6 @@ pub struct ImportBlockchain {
|
||||
pub check_seal: bool,
|
||||
pub with_color: bool,
|
||||
pub verifier_settings: VerifierSettings,
|
||||
pub light: bool,
|
||||
pub max_round_blocks_to_import: usize,
|
||||
}
|
||||
|
||||
@ -143,201 +132,13 @@ pub struct ExportState {
|
||||
pub fn execute(cmd: BlockchainCmd) -> Result<(), String> {
|
||||
match cmd {
|
||||
BlockchainCmd::Kill(kill_cmd) => kill_db(kill_cmd),
|
||||
BlockchainCmd::Import(import_cmd) => {
|
||||
if import_cmd.light {
|
||||
execute_import_light(import_cmd)
|
||||
} else {
|
||||
execute_import(import_cmd)
|
||||
}
|
||||
}
|
||||
BlockchainCmd::Import(import_cmd) => execute_import(import_cmd),
|
||||
BlockchainCmd::Export(export_cmd) => execute_export(export_cmd),
|
||||
BlockchainCmd::ExportState(export_cmd) => execute_export_state(export_cmd),
|
||||
BlockchainCmd::Reset(reset_cmd) => execute_reset(reset_cmd),
|
||||
}
|
||||
}
|
||||
|
||||
fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
|
||||
use light::{
|
||||
cache::Cache as LightDataCache,
|
||||
client::{Config as LightClientConfig, Service as LightClientService},
|
||||
};
|
||||
use parking_lot::Mutex;
|
||||
|
||||
let timer = Instant::now();
|
||||
|
||||
// load spec file
|
||||
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
|
||||
|
||||
// load genesis hash
|
||||
let genesis_hash = spec.genesis_header().hash();
|
||||
|
||||
// database paths
|
||||
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone());
|
||||
|
||||
// user defaults path
|
||||
let user_defaults_path = db_dirs.user_defaults_path();
|
||||
|
||||
// load user defaults
|
||||
let user_defaults = UserDefaults::load(&user_defaults_path)?;
|
||||
|
||||
// select pruning algorithm
|
||||
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
||||
|
||||
// prepare client and snapshot paths.
|
||||
let client_path = db_dirs.client_path(algorithm);
|
||||
|
||||
// execute upgrades
|
||||
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?;
|
||||
|
||||
// create dirs used by parity
|
||||
cmd.dirs.create_dirs(false, false)?;
|
||||
|
||||
let cache = Arc::new(Mutex::new(LightDataCache::new(
|
||||
Default::default(),
|
||||
Duration::new(0, 0),
|
||||
)));
|
||||
|
||||
let mut config = LightClientConfig {
|
||||
queue: Default::default(),
|
||||
chain_column: ethcore_db::COL_LIGHT_CHAIN,
|
||||
verify_full: true,
|
||||
check_seal: cmd.check_seal,
|
||||
no_hardcoded_sync: true,
|
||||
};
|
||||
|
||||
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
||||
config.queue.verifier_settings = cmd.verifier_settings;
|
||||
|
||||
// initialize database.
|
||||
let db = db::open_db(
|
||||
&client_path
|
||||
.to_str()
|
||||
.expect("DB path could not be converted to string."),
|
||||
&cmd.cache_config,
|
||||
&cmd.compaction,
|
||||
)
|
||||
.map_err(|e| format!("Failed to open database: {:?}", e))?;
|
||||
|
||||
// TODO: could epoch signals be available at the end of the file?
|
||||
let fetch = ::light::client::fetch::unavailable();
|
||||
let service = LightClientService::start(config, &spec, fetch, db, cache)
|
||||
.map_err(|e| format!("Failed to start client: {}", e))?;
|
||||
|
||||
// free up the spec in memory.
|
||||
drop(spec);
|
||||
|
||||
let client = service.client();
|
||||
|
||||
let mut instream: Box<dyn io::Read> = match cmd.file_path {
|
||||
Some(f) => {
|
||||
Box::new(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f))?)
|
||||
}
|
||||
None => Box::new(io::stdin()),
|
||||
};
|
||||
|
||||
const READAHEAD_BYTES: usize = 8;
|
||||
|
||||
let mut first_bytes: Vec<u8> = vec![0; READAHEAD_BYTES];
|
||||
let mut first_read = 0;
|
||||
|
||||
let format = match cmd.format {
|
||||
Some(format) => format,
|
||||
None => {
|
||||
first_read = instream
|
||||
.read(&mut first_bytes)
|
||||
.map_err(|_| "Error reading from the file/stream.")?;
|
||||
match first_bytes[0] {
|
||||
0xf9 => DataFormat::Binary,
|
||||
_ => DataFormat::Hex,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let do_import = |bytes: Vec<u8>| {
|
||||
while client.queue_info().is_full() {
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
|
||||
let header: ::types::header::Header = ::rlp::Rlp::new(&bytes)
|
||||
.val_at(0)
|
||||
.map_err(|e| format!("Bad block: {}", e))?;
|
||||
|
||||
if client.best_block_header().number() >= header.number() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if header.number() % 10000 == 0 {
|
||||
info!("#{}", header.number());
|
||||
}
|
||||
|
||||
match client.import_header(header) {
|
||||
Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => {
|
||||
trace!("Skipping block already in chain.");
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(format!("Cannot import block: {:?}", e));
|
||||
}
|
||||
Ok(_) => {}
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
|
||||
match format {
|
||||
DataFormat::Binary => loop {
|
||||
let mut bytes = if first_read > 0 {
|
||||
first_bytes.clone()
|
||||
} else {
|
||||
vec![0; READAHEAD_BYTES]
|
||||
};
|
||||
let n = if first_read > 0 {
|
||||
first_read
|
||||
} else {
|
||||
instream
|
||||
.read(&mut bytes)
|
||||
.map_err(|_| "Error reading from the file/stream.")?
|
||||
};
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
first_read = 0;
|
||||
let s = PayloadInfo::from(&bytes)
|
||||
.map_err(|e| format!("Invalid RLP in the file/stream: {:?}", e))?
|
||||
.total();
|
||||
bytes.resize(s, 0);
|
||||
instream
|
||||
.read_exact(&mut bytes[n..])
|
||||
.map_err(|_| "Error reading from the file/stream.")?;
|
||||
do_import(bytes)?;
|
||||
},
|
||||
DataFormat::Hex => {
|
||||
for line in BufReader::new(instream).lines() {
|
||||
let s = line.map_err(|_| "Error reading from the file/stream.")?;
|
||||
let s = if first_read > 0 {
|
||||
from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])
|
||||
} else {
|
||||
s
|
||||
};
|
||||
first_read = 0;
|
||||
let bytes = s.from_hex().map_err(|_| "Invalid hex in file/stream.")?;
|
||||
do_import(bytes)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
client.flush_queue();
|
||||
|
||||
let ms = timer.elapsed().as_milliseconds();
|
||||
let report = client.report();
|
||||
|
||||
info!(
|
||||
"Import completed in {} seconds, {} headers, {} hdr/s",
|
||||
ms / 1000,
|
||||
report.blocks_imported,
|
||||
(report.blocks_imported * 1000) as u64 / ms,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn execute_import(cmd: ImportBlockchain) -> Result<(), String> {
|
||||
let timer = Instant::now();
|
||||
|
||||
|
@ -230,11 +230,6 @@ usage! {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
CMD cmd_export_hardcoded_sync
|
||||
{
|
||||
"Print the hashed light clients headers of the given --chain (default: mainnet) in a JSON format. To be used as hardcoded headers in a genesis file.",
|
||||
}
|
||||
}
|
||||
{
|
||||
// Global flags and arguments
|
||||
@ -247,14 +242,6 @@ usage! {
|
||||
"--no-consensus",
|
||||
"Force the binary to run even if there are known issues regarding consensus. Not recommended.",
|
||||
|
||||
FLAG flag_light: (bool) = false, or |c: &Config| c.parity.as_ref()?.light,
|
||||
"--light",
|
||||
"Experimental: run in light client mode. Light clients synchronize a bare minimum of data and fetch necessary data on-demand from the network. Much lower in storage, potentially higher in bandwidth. Has no effect with subcommands.",
|
||||
|
||||
FLAG flag_no_hardcoded_sync: (bool) = false, or |c: &Config| c.parity.as_ref()?.no_hardcoded_sync,
|
||||
"--no-hardcoded-sync",
|
||||
"By default, if there is no existing database the light client will automatically jump to a block hardcoded in the chain's specifications. This disables this feature.",
|
||||
|
||||
FLAG flag_force_direct: (bool) = false, or |_| None,
|
||||
"--force-direct",
|
||||
"Run the originally installed version of Parity, ignoring any updates that have since been installed.",
|
||||
@ -392,10 +379,6 @@ usage! {
|
||||
"--no-ancient-blocks",
|
||||
"Disable downloading old blocks after snapshot restoration or warp sync. Not recommended.",
|
||||
|
||||
FLAG flag_no_serve_light: (bool) = false, or |c: &Config| c.network.as_ref()?.no_serve_light.clone(),
|
||||
"--no-serve-light",
|
||||
"Disable serving of light peers.",
|
||||
|
||||
ARG arg_warp_barrier: (Option<u64>) = None, or |c: &Config| c.network.as_ref()?.warp_barrier.clone(),
|
||||
"--warp-barrier=[NUM]",
|
||||
"When warp enabled never attempt regular sync before warping to block NUM.",
|
||||
@ -574,27 +557,6 @@ usage! {
|
||||
"--ipfs-api-cors=[URL]",
|
||||
"Specify CORS header for IPFS API responses. Special options: \"all\", \"none\".",
|
||||
|
||||
["Light Client Options"]
|
||||
ARG arg_on_demand_response_time_window: (Option<u64>) = None, or |c: &Config| c.light.as_ref()?.on_demand_response_time_window,
|
||||
"--on-demand-time-window=[S]",
|
||||
"Specify the maximum time to wait for a successful response",
|
||||
|
||||
ARG arg_on_demand_request_backoff_start: (Option<u64>) = None, or |c: &Config| c.light.as_ref()?.on_demand_request_backoff_start,
|
||||
"--on-demand-start-backoff=[S]",
|
||||
"Specify light client initial backoff time for a request",
|
||||
|
||||
ARG arg_on_demand_request_backoff_max: (Option<u64>) = None, or |c: &Config| c.light.as_ref()?.on_demand_request_backoff_max,
|
||||
"--on-demand-end-backoff=[S]",
|
||||
"Specify light client maximum backoff time for a request",
|
||||
|
||||
ARG arg_on_demand_request_backoff_rounds_max: (Option<usize>) = None, or |c: &Config| c.light.as_ref()?.on_demand_request_backoff_rounds_max,
|
||||
"--on-demand-max-backoff-rounds=[TIMES]",
|
||||
"Specify light client maximum number of backoff iterations for a request",
|
||||
|
||||
ARG arg_on_demand_request_consecutive_failures: (Option<usize>) = None, or |c: &Config| c.light.as_ref()?.on_demand_request_consecutive_failures,
|
||||
"--on-demand-consecutive-failures=[TIMES]",
|
||||
"Specify light client the number of failures for a request until it gets exponentially backed off",
|
||||
|
||||
["Secret Store Options"]
|
||||
FLAG flag_no_secretstore: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable.clone(),
|
||||
"--no-secretstore",
|
||||
@ -926,7 +888,6 @@ struct Config {
|
||||
snapshots: Option<Snapshots>,
|
||||
misc: Option<Misc>,
|
||||
stratum: Option<Stratum>,
|
||||
light: Option<Light>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq, Deserialize)]
|
||||
@ -946,9 +907,7 @@ struct Operating {
|
||||
db_path: Option<String>,
|
||||
keys_path: Option<String>,
|
||||
identity: Option<String>,
|
||||
light: Option<bool>,
|
||||
no_persistent_txqueue: Option<bool>,
|
||||
no_hardcoded_sync: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq, Deserialize)]
|
||||
@ -998,7 +957,6 @@ struct Network {
|
||||
node_key: Option<String>,
|
||||
reserved_peers: Option<String>,
|
||||
reserved_only: Option<bool>,
|
||||
no_serve_light: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq, Deserialize)]
|
||||
@ -1154,21 +1112,11 @@ struct Misc {
|
||||
unsafe_expose: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
struct Light {
|
||||
on_demand_response_time_window: Option<u64>,
|
||||
on_demand_request_backoff_start: Option<u64>,
|
||||
on_demand_request_backoff_max: Option<u64>,
|
||||
on_demand_request_backoff_rounds_max: Option<usize>,
|
||||
on_demand_request_consecutive_failures: Option<usize>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
Account, Args, ArgsError, Config, Footprint, Ipc, Ipfs, Light, Mining, Misc, Network,
|
||||
Operating, Rpc, SecretStore, Snapshots, Ws,
|
||||
Account, Args, ArgsError, Config, Footprint, Ipc, Ipfs, Mining, Misc, Network, Operating,
|
||||
Rpc, SecretStore, Snapshots, Ws,
|
||||
};
|
||||
use clap::ErrorKind as ClapErrorKind;
|
||||
use toml;
|
||||
@ -1373,7 +1321,6 @@ mod tests {
|
||||
cmd_db: false,
|
||||
cmd_db_kill: false,
|
||||
cmd_db_reset: false,
|
||||
cmd_export_hardcoded_sync: false,
|
||||
|
||||
// Arguments
|
||||
arg_daemon_pid_file: None,
|
||||
@ -1408,8 +1355,6 @@ mod tests {
|
||||
arg_db_path: Some("$HOME/.parity/chains".into()),
|
||||
arg_keys_path: "$HOME/.parity/keys".into(),
|
||||
arg_identity: "".into(),
|
||||
flag_light: false,
|
||||
flag_no_hardcoded_sync: false,
|
||||
flag_no_persistent_txqueue: false,
|
||||
flag_force_direct: false,
|
||||
|
||||
@ -1453,7 +1398,6 @@ mod tests {
|
||||
arg_reserved_peers: Some("./path_to_file".into()),
|
||||
flag_reserved_only: false,
|
||||
flag_no_ancient_blocks: false,
|
||||
flag_no_serve_light: false,
|
||||
arg_warp_barrier: None,
|
||||
|
||||
// -- API and Console Options
|
||||
@ -1582,13 +1526,6 @@ mod tests {
|
||||
flag_no_periodic_snapshot: false,
|
||||
arg_snapshot_threads: None,
|
||||
|
||||
// -- Light options.
|
||||
arg_on_demand_response_time_window: Some(2),
|
||||
arg_on_demand_request_backoff_start: Some(9),
|
||||
arg_on_demand_request_backoff_max: Some(15),
|
||||
arg_on_demand_request_backoff_rounds_max: Some(100),
|
||||
arg_on_demand_request_consecutive_failures: Some(1),
|
||||
|
||||
// -- Internal Options
|
||||
flag_can_restart: false,
|
||||
|
||||
@ -1648,8 +1585,6 @@ mod tests {
|
||||
db_path: None,
|
||||
keys_path: None,
|
||||
identity: None,
|
||||
light: None,
|
||||
no_hardcoded_sync: None,
|
||||
no_persistent_txqueue: None,
|
||||
}),
|
||||
account: Some(Account {
|
||||
@ -1677,7 +1612,6 @@ mod tests {
|
||||
node_key: None,
|
||||
reserved_peers: Some("./path/to/reserved_peers".into()),
|
||||
reserved_only: Some(true),
|
||||
no_serve_light: None,
|
||||
}),
|
||||
websockets: Some(Ws {
|
||||
disable: Some(true),
|
||||
@ -1787,13 +1721,6 @@ mod tests {
|
||||
scale_verifiers: Some(false),
|
||||
num_verifiers: None,
|
||||
}),
|
||||
light: Some(Light {
|
||||
on_demand_response_time_window: Some(2),
|
||||
on_demand_request_backoff_start: Some(9),
|
||||
on_demand_request_backoff_max: Some(15),
|
||||
on_demand_request_backoff_rounds_max: Some(10),
|
||||
on_demand_request_consecutive_failures: Some(1),
|
||||
}),
|
||||
snapshots: Some(Snapshots {
|
||||
disable_periodic: Some(true),
|
||||
processing_threads: None,
|
||||
|
@ -15,8 +15,6 @@ base_path = "$HOME/.parity"
|
||||
db_path = "$HOME/.parity/chains"
|
||||
keys_path = "$HOME/.parity/keys"
|
||||
identity = ""
|
||||
light = false
|
||||
no_hardcoded_sync = false
|
||||
|
||||
[account]
|
||||
unlock = ["0xdeadbeefcafe0000000000000000000000000000"]
|
||||
@ -47,7 +45,6 @@ warp = true
|
||||
allow_ips = "all"
|
||||
snapshot_peers = 0
|
||||
max_pending_peers = 64
|
||||
no_serve_light = false
|
||||
|
||||
reserved_only = false
|
||||
reserved_peers = "./path_to_file"
|
||||
@ -142,13 +139,6 @@ fat_db = "auto"
|
||||
scale_verifiers = true
|
||||
num_verifiers = 6
|
||||
|
||||
[light]
|
||||
on_demand_response_time_window = 2
|
||||
on_demand_request_backoff_start = 9
|
||||
on_demand_request_backoff_max = 15
|
||||
on_demand_request_backoff_rounds_max = 100
|
||||
on_demand_request_consecutive_failures = 1
|
||||
|
||||
[snapshots]
|
||||
disable_periodic = false
|
||||
|
||||
|
@ -62,13 +62,6 @@ db_compaction = "ssd"
|
||||
fat_db = "off"
|
||||
scale_verifiers = false
|
||||
|
||||
[light]
|
||||
on_demand_response_time_window = 2
|
||||
on_demand_request_backoff_start = 9
|
||||
on_demand_request_backoff_max = 15
|
||||
on_demand_request_backoff_rounds_max = 10
|
||||
on_demand_request_consecutive_failures = 1
|
||||
|
||||
[snapshots]
|
||||
disable_periodic = true
|
||||
|
||||
|
@ -53,7 +53,6 @@ use dir::{
|
||||
};
|
||||
use ethcore_logger::Config as LogConfig;
|
||||
use ethcore_private_tx::{EncryptorConfig, ProviderConfig};
|
||||
use export_hardcoded_sync::ExportHsyncCmd;
|
||||
use helpers::{
|
||||
parity_ipc_path, to_address, to_addresses, to_block_id, to_bootnodes, to_duration, to_mode,
|
||||
to_pending_set, to_price, to_queue_penalization, to_queue_strategy, to_u256,
|
||||
@ -103,7 +102,6 @@ pub enum Cmd {
|
||||
},
|
||||
Snapshot(SnapshotCommand),
|
||||
Hash(Option<String>),
|
||||
ExportHardcodedSync(ExportHsyncCmd),
|
||||
}
|
||||
|
||||
pub struct Execute {
|
||||
@ -286,7 +284,6 @@ impl Configuration {
|
||||
check_seal: !self.args.flag_no_seal_check,
|
||||
with_color: logger_config.color,
|
||||
verifier_settings: self.verifier_settings(),
|
||||
light: self.args.flag_light,
|
||||
max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import,
|
||||
};
|
||||
Cmd::Blockchain(BlockchainCmd::Import(import_cmd))
|
||||
@ -376,15 +373,6 @@ impl Configuration {
|
||||
snapshot_conf: snapshot_conf,
|
||||
};
|
||||
Cmd::Snapshot(restore_cmd)
|
||||
} else if self.args.cmd_export_hardcoded_sync {
|
||||
let export_hs_cmd = ExportHsyncCmd {
|
||||
cache_config: cache_config,
|
||||
dirs: dirs,
|
||||
spec: spec,
|
||||
pruning: pruning,
|
||||
compaction: compaction,
|
||||
};
|
||||
Cmd::ExportHardcodedSync(export_hs_cmd)
|
||||
} else {
|
||||
let daemon = if self.args.cmd_daemon {
|
||||
Some(
|
||||
@ -444,20 +432,8 @@ impl Configuration {
|
||||
check_seal: !self.args.flag_no_seal_check,
|
||||
download_old_blocks: !self.args.flag_no_ancient_blocks,
|
||||
verifier_settings: verifier_settings,
|
||||
serve_light: !self.args.flag_no_serve_light,
|
||||
light: self.args.flag_light,
|
||||
no_persistent_txqueue: self.args.flag_no_persistent_txqueue,
|
||||
no_hardcoded_sync: self.args.flag_no_hardcoded_sync,
|
||||
max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import,
|
||||
on_demand_response_time_window: self.args.arg_on_demand_response_time_window,
|
||||
on_demand_request_backoff_start: self.args.arg_on_demand_request_backoff_start,
|
||||
on_demand_request_backoff_max: self.args.arg_on_demand_request_backoff_max,
|
||||
on_demand_request_backoff_rounds_max: self
|
||||
.args
|
||||
.arg_on_demand_request_backoff_rounds_max,
|
||||
on_demand_request_consecutive_failures: self
|
||||
.args
|
||||
.arg_on_demand_request_consecutive_failures,
|
||||
};
|
||||
Cmd::Run(run_cmd)
|
||||
};
|
||||
@ -1106,16 +1082,7 @@ impl Configuration {
|
||||
let is_using_base_path = self.args.arg_base_path.is_some();
|
||||
// If base_path is set and db_path is not we default to base path subdir instead of LOCAL.
|
||||
let base_db_path = if is_using_base_path && self.args.arg_db_path.is_none() {
|
||||
if self.args.flag_light {
|
||||
"$BASE/chains_light"
|
||||
} else {
|
||||
"$BASE/chains"
|
||||
}
|
||||
} else if self.args.flag_light {
|
||||
self.args
|
||||
.arg_db_path
|
||||
.as_ref()
|
||||
.map_or(dir::CHAINS_PATH_LIGHT, |s| &s)
|
||||
"$BASE/chains"
|
||||
} else {
|
||||
self.args
|
||||
.arg_db_path
|
||||
@ -1479,7 +1446,6 @@ mod tests {
|
||||
check_seal: true,
|
||||
with_color: !cfg!(windows),
|
||||
verifier_settings: Default::default(),
|
||||
light: false,
|
||||
max_round_blocks_to_import: 12,
|
||||
}))
|
||||
);
|
||||
@ -1674,16 +1640,8 @@ mod tests {
|
||||
check_seal: true,
|
||||
download_old_blocks: true,
|
||||
verifier_settings: Default::default(),
|
||||
serve_light: true,
|
||||
light: false,
|
||||
no_hardcoded_sync: false,
|
||||
no_persistent_txqueue: false,
|
||||
max_round_blocks_to_import: 12,
|
||||
on_demand_response_time_window: None,
|
||||
on_demand_request_backoff_start: None,
|
||||
on_demand_request_backoff_max: None,
|
||||
on_demand_request_backoff_rounds_max: None,
|
||||
on_demand_request_consecutive_failures: None,
|
||||
};
|
||||
expected.secretstore_conf.enabled = cfg!(feature = "secretstore");
|
||||
expected.secretstore_conf.http_enabled = cfg!(feature = "secretstore");
|
||||
|
@ -19,7 +19,7 @@
|
||||
#[path = "rocksdb/mod.rs"]
|
||||
mod impls;
|
||||
|
||||
pub use self::impls::{migrate, open_db, restoration_db_handler};
|
||||
pub use self::impls::{migrate, restoration_db_handler};
|
||||
|
||||
#[cfg(feature = "secretstore")]
|
||||
pub use self::impls::open_secretstore_db;
|
||||
|
@ -23,13 +23,10 @@ use self::{
|
||||
kvdb_rocksdb::{Database, DatabaseConfig},
|
||||
};
|
||||
use blooms_db;
|
||||
use ethcore::client::{ClientConfig, DatabaseCompactionProfile};
|
||||
use ethcore_db::NUM_COLUMNS;
|
||||
use ethcore::client::ClientConfig;
|
||||
use kvdb::KeyValueDB;
|
||||
use std::{fs, io, path::Path, sync::Arc};
|
||||
|
||||
use cache::CacheConfig;
|
||||
|
||||
mod blooms;
|
||||
mod helpers;
|
||||
mod migration;
|
||||
@ -93,23 +90,6 @@ pub fn restoration_db_handler(
|
||||
})
|
||||
}
|
||||
|
||||
/// Open a new main DB.
|
||||
pub fn open_db(
|
||||
client_path: &str,
|
||||
cache_config: &CacheConfig,
|
||||
compaction: &DatabaseCompactionProfile,
|
||||
) -> io::Result<Arc<dyn BlockChainDB>> {
|
||||
let path = Path::new(client_path);
|
||||
|
||||
let db_config = DatabaseConfig {
|
||||
memory_budget: Some(cache_config.blockchain() as usize * 1024 * 1024),
|
||||
compaction: helpers::compaction_profile(&compaction, path),
|
||||
..DatabaseConfig::with_columns(NUM_COLUMNS)
|
||||
};
|
||||
|
||||
open_database(client_path, &db_config)
|
||||
}
|
||||
|
||||
pub fn open_database(
|
||||
client_path: &str,
|
||||
config: &DatabaseConfig,
|
||||
|
@ -1,121 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use ethcore::{
|
||||
client::DatabaseCompactionProfile,
|
||||
spec::{OptimizeFor, SpecParams},
|
||||
};
|
||||
use light::{client::fetch::Unavailable as UnavailableDataFetcher, Cache as LightDataCache};
|
||||
|
||||
use cache::CacheConfig;
|
||||
use db;
|
||||
use dir::Directories;
|
||||
use helpers::execute_upgrades;
|
||||
use params::{Pruning, SpecType};
|
||||
use user_defaults::UserDefaults;
|
||||
|
||||
// Number of minutes before a given gas price corpus should expire.
|
||||
// Light client only.
|
||||
const GAS_CORPUS_EXPIRATION_MINUTES: u64 = 60 * 6;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct ExportHsyncCmd {
|
||||
pub cache_config: CacheConfig,
|
||||
pub dirs: Directories,
|
||||
pub spec: SpecType,
|
||||
pub pruning: Pruning,
|
||||
pub compaction: DatabaseCompactionProfile,
|
||||
}
|
||||
|
||||
pub fn execute(cmd: ExportHsyncCmd) -> Result<String, String> {
|
||||
use light::client as light_client;
|
||||
use parking_lot::Mutex;
|
||||
|
||||
// load spec
|
||||
let spec = cmd.spec.spec(SpecParams::new(
|
||||
cmd.dirs.cache.as_ref(),
|
||||
OptimizeFor::Memory,
|
||||
))?;
|
||||
|
||||
// load genesis hash
|
||||
let genesis_hash = spec.genesis_header().hash();
|
||||
|
||||
// database paths
|
||||
let db_dirs = cmd.dirs.database(
|
||||
genesis_hash,
|
||||
cmd.spec.legacy_fork_name(),
|
||||
spec.data_dir.clone(),
|
||||
);
|
||||
|
||||
// user defaults path
|
||||
let user_defaults_path = db_dirs.user_defaults_path();
|
||||
|
||||
// load user defaults
|
||||
let user_defaults = UserDefaults::load(&user_defaults_path)?;
|
||||
|
||||
// select pruning algorithm
|
||||
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
||||
|
||||
// execute upgrades
|
||||
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?;
|
||||
|
||||
// create dirs used by parity
|
||||
cmd.dirs.create_dirs(false, false)?;
|
||||
|
||||
// TODO: configurable cache size.
|
||||
let cache = LightDataCache::new(
|
||||
Default::default(),
|
||||
Duration::from_secs(60 * GAS_CORPUS_EXPIRATION_MINUTES),
|
||||
);
|
||||
let cache = Arc::new(Mutex::new(cache));
|
||||
|
||||
// start client and create transaction queue.
|
||||
let mut config = light_client::Config {
|
||||
queue: Default::default(),
|
||||
chain_column: ::ethcore_db::COL_LIGHT_CHAIN,
|
||||
verify_full: true,
|
||||
check_seal: true,
|
||||
no_hardcoded_sync: true,
|
||||
};
|
||||
|
||||
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
||||
|
||||
// initialize database.
|
||||
let db = db::open_db(
|
||||
&db_dirs
|
||||
.client_path(algorithm)
|
||||
.to_str()
|
||||
.expect("DB path could not be converted to string."),
|
||||
&cmd.cache_config,
|
||||
&cmd.compaction,
|
||||
)
|
||||
.map_err(|e| format!("Failed to open database {:?}", e))?;
|
||||
|
||||
let service = light_client::Service::start(config, &spec, UnavailableDataFetcher, db, cache)
|
||||
.map_err(|e| format!("Error starting light client: {}", e))?;
|
||||
|
||||
let hs = service
|
||||
.client()
|
||||
.read_hardcoded_sync()
|
||||
.map_err(|e| format!("Error reading hardcoded sync: {}", e))?;
|
||||
if let Some(hs) = hs {
|
||||
Ok(::serde_json::to_string_pretty(&hs.to_json()).expect("generated JSON is always valid"))
|
||||
} else {
|
||||
Err("Error: cannot generate hardcoded sync because the database is empty.".into())
|
||||
}
|
||||
}
|
@ -37,16 +37,11 @@ use ethcore::{
|
||||
},
|
||||
snapshot::{service::Service as SnapshotService, RestorationStatus, SnapshotService as SS},
|
||||
};
|
||||
use ethereum_types::H256;
|
||||
use io::{IoContext, IoHandler, TimerToken};
|
||||
use light::{
|
||||
client::{LightChainClient, LightChainNotify},
|
||||
Cache as LightDataCache,
|
||||
};
|
||||
use number_prefix::{binary_prefix, Prefixed, Standalone};
|
||||
use parity_rpc::{informant::RpcStats, is_major_importing_or_waiting};
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use sync::{LightSync, LightSyncProvider, ManageNetwork, SyncProvider};
|
||||
use sync::{ManageNetwork, SyncProvider};
|
||||
use types::BlockNumber;
|
||||
|
||||
/// Format byte counts to standard denominations.
|
||||
@ -189,53 +184,6 @@ impl InformantData for FullNodeInformantData {
|
||||
}
|
||||
}
|
||||
|
||||
/// Informant data for a light node -- note that the network is required.
|
||||
pub struct LightNodeInformantData {
|
||||
pub client: Arc<dyn LightChainClient>,
|
||||
pub sync: Arc<LightSync>,
|
||||
pub cache: Arc<Mutex<LightDataCache>>,
|
||||
}
|
||||
|
||||
impl InformantData for LightNodeInformantData {
|
||||
fn executes_transactions(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn is_major_importing(&self) -> bool {
|
||||
self.sync.is_major_importing()
|
||||
}
|
||||
|
||||
fn report(&self) -> Report {
|
||||
let (client_report, queue_info, chain_info) = (
|
||||
self.client.report(),
|
||||
self.client.queue_info(),
|
||||
self.client.chain_info(),
|
||||
);
|
||||
|
||||
let mut cache_sizes = CacheSizes::default();
|
||||
cache_sizes.insert("queue", queue_info.mem_used);
|
||||
cache_sizes.insert("cache", self.cache.lock().mem_used());
|
||||
|
||||
let peer_numbers = self.sync.peer_numbers();
|
||||
let sync_info = Some(SyncInfo {
|
||||
last_imported_block_number: chain_info.best_block_number,
|
||||
last_imported_old_block_number: None,
|
||||
num_peers: peer_numbers.connected,
|
||||
max_peers: peer_numbers.max as u32,
|
||||
snapshot_sync: false,
|
||||
});
|
||||
|
||||
Report {
|
||||
importing: self.sync.is_major_importing(),
|
||||
chain_info,
|
||||
client_report,
|
||||
queue_info,
|
||||
cache_sizes,
|
||||
sync_info,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Informant<T> {
|
||||
last_tick: RwLock<Instant>,
|
||||
with_color: bool,
|
||||
@ -449,36 +397,6 @@ impl ChainNotify for Informant<FullNodeInformantData> {
|
||||
}
|
||||
}
|
||||
|
||||
impl LightChainNotify for Informant<LightNodeInformantData> {
|
||||
fn new_headers(&self, good: &[H256]) {
|
||||
let mut last_import = self.last_import.lock();
|
||||
let client = &self.target.client;
|
||||
|
||||
let importing = self.target.is_major_importing();
|
||||
let ripe = Instant::now() > *last_import + Duration::from_secs(1) && !importing;
|
||||
|
||||
if ripe {
|
||||
if let Some(header) = good
|
||||
.last()
|
||||
.and_then(|h| client.block_header(BlockId::Hash(*h)))
|
||||
{
|
||||
info!(target: "import", "Imported {} {} ({} Mgas){}",
|
||||
Colour::White.bold().paint(format!("#{}", header.number())),
|
||||
Colour::White.bold().paint(format!("{}", header.hash())),
|
||||
Colour::Yellow.bold().paint(format!("{:.2}", header.gas_used().low_u64() as f32 / 1000000f32)),
|
||||
if good.len() > 1 {
|
||||
format!(" + another {} header(s)",
|
||||
Colour::Red.bold().paint(format!("{}", good.len() - 1)))
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
);
|
||||
*last_import = Instant::now();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const INFO_TIMER: TimerToken = 0;
|
||||
|
||||
impl<T: InformantData> IoHandler<ClientIoMessage> for Informant<T> {
|
||||
|
@ -46,7 +46,6 @@ extern crate ethcore;
|
||||
extern crate ethcore_call_contract as call_contract;
|
||||
extern crate ethcore_db;
|
||||
extern crate ethcore_io as io;
|
||||
extern crate ethcore_light as light;
|
||||
extern crate ethcore_logger;
|
||||
extern crate ethcore_miner as miner;
|
||||
extern crate ethcore_network as network;
|
||||
@ -98,11 +97,9 @@ mod cache;
|
||||
mod cli;
|
||||
mod configuration;
|
||||
mod db;
|
||||
mod export_hardcoded_sync;
|
||||
mod helpers;
|
||||
mod informant;
|
||||
mod ipfs;
|
||||
mod light_helpers;
|
||||
mod modules;
|
||||
mod params;
|
||||
mod presale;
|
||||
@ -235,9 +232,6 @@ where
|
||||
Cmd::Snapshot(snapshot_cmd) => {
|
||||
snapshot::execute(snapshot_cmd).map(|s| ExecutionAction::Instant(Some(s)))
|
||||
}
|
||||
Cmd::ExportHardcodedSync(export_hs_cmd) => {
|
||||
export_hardcoded_sync::execute(export_hs_cmd).map(|s| ExecutionAction::Instant(Some(s)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,100 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use ethcore::{
|
||||
engines::{EthEngine, StateDependentProof},
|
||||
machine::EthereumMachine,
|
||||
};
|
||||
use sync::{LightNetworkDispatcher, LightSync};
|
||||
use types::{encoded, header::Header, receipt::Receipt};
|
||||
|
||||
use futures::{future, future::Either, Future};
|
||||
|
||||
use light::{
|
||||
client::fetch::ChainDataFetcher,
|
||||
on_demand::{request, OnDemand, OnDemandRequester},
|
||||
};
|
||||
|
||||
use ethereum_types::H256;
|
||||
use parking_lot::RwLock;
|
||||
|
||||
const ALL_VALID_BACKREFS: &str = "no back-references, therefore all back-references valid; qed";
|
||||
|
||||
type BoxFuture<T, E> = Box<dyn Future<Item = T, Error = E>>;
|
||||
|
||||
/// Allows on-demand fetch of data useful for the light client.
|
||||
pub struct EpochFetch {
|
||||
/// A handle to the sync service.
|
||||
pub sync: Arc<RwLock<Weak<LightSync>>>,
|
||||
/// The on-demand request service.
|
||||
pub on_demand: Arc<OnDemand>,
|
||||
}
|
||||
|
||||
impl EpochFetch {
|
||||
fn request<T>(&self, req: T) -> BoxFuture<T::Out, &'static str>
|
||||
where
|
||||
T: Send + request::RequestAdapter + 'static,
|
||||
T::Out: Send + 'static,
|
||||
{
|
||||
Box::new(match self.sync.read().upgrade() {
|
||||
Some(sync) => {
|
||||
let on_demand = &self.on_demand;
|
||||
let maybe_future = sync.with_context(move |ctx| {
|
||||
on_demand.request(ctx, req).expect(ALL_VALID_BACKREFS)
|
||||
});
|
||||
|
||||
match maybe_future {
|
||||
Some(x) => Either::A(x.map_err(|_| "Request canceled")),
|
||||
None => Either::B(future::err("Unable to access network.")),
|
||||
}
|
||||
}
|
||||
None => Either::B(future::err("Unable to access network")),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ChainDataFetcher for EpochFetch {
|
||||
type Error = &'static str;
|
||||
|
||||
type Body = BoxFuture<encoded::Block, &'static str>;
|
||||
type Receipts = BoxFuture<Vec<Receipt>, &'static str>;
|
||||
type Transition = BoxFuture<Vec<u8>, &'static str>;
|
||||
|
||||
fn block_body(&self, header: &Header) -> Self::Body {
|
||||
self.request(request::Body(header.encoded().into()))
|
||||
}
|
||||
|
||||
/// Fetch block receipts.
|
||||
fn block_receipts(&self, header: &Header) -> Self::Receipts {
|
||||
self.request(request::BlockReceipts(header.encoded().into()))
|
||||
}
|
||||
|
||||
/// Fetch epoch transition proof at given header.
|
||||
fn epoch_transition(
|
||||
&self,
|
||||
hash: H256,
|
||||
engine: Arc<dyn EthEngine>,
|
||||
checker: Arc<dyn StateDependentProof<EthereumMachine>>,
|
||||
) -> Self::Transition {
|
||||
self.request(request::Signal {
|
||||
hash: hash,
|
||||
engine: engine,
|
||||
proof_check: checker,
|
||||
})
|
||||
}
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Utilities and helpers for the light client.
|
||||
|
||||
mod epoch_fetch;
|
||||
|
||||
pub use self::epoch_fetch::EpochFetch;
|
@ -17,7 +17,6 @@
|
||||
use std::sync::{mpsc, Arc};
|
||||
|
||||
use ethcore::{client::BlockChainClient, snapshot::SnapshotService};
|
||||
use light::Provider;
|
||||
use sync::{self, ConnectionFilter, NetworkConfiguration, Params, SyncConfig};
|
||||
|
||||
pub use ethcore::client::ChainNotify;
|
||||
@ -37,7 +36,6 @@ pub fn sync(
|
||||
chain: Arc<dyn BlockChainClient>,
|
||||
snapshot_service: Arc<dyn SnapshotService>,
|
||||
private_tx_handler: Option<Arc<dyn PrivateTxHandler>>,
|
||||
provider: Arc<dyn Provider>,
|
||||
_log_settings: &LogConfig,
|
||||
connection_filter: Option<Arc<dyn ConnectionFilter>>,
|
||||
) -> Result<SyncModules, sync::Error> {
|
||||
@ -45,7 +43,6 @@ pub fn sync(
|
||||
Params {
|
||||
config,
|
||||
chain,
|
||||
provider,
|
||||
snapshot_service,
|
||||
private_tx_handler,
|
||||
network_config,
|
||||
|
@ -14,34 +14,25 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::{
|
||||
cmp::PartialEq,
|
||||
collections::HashSet,
|
||||
str::FromStr,
|
||||
sync::{Arc, Weak},
|
||||
};
|
||||
use std::{cmp::PartialEq, collections::HashSet, str::FromStr, sync::Arc};
|
||||
|
||||
pub use parity_rpc::signer::SignerService;
|
||||
|
||||
use account_utils::{self, AccountProvider};
|
||||
use ethcore::{client::Client, miner::Miner, snapshot::SnapshotService};
|
||||
use ethcore_logger::RotatingLogger;
|
||||
use ethcore_private_tx::Provider as PrivateTransactionManager;
|
||||
use ethcore_service::PrivateTxService;
|
||||
use hash_fetch::fetch::Client as FetchClient;
|
||||
use jsonrpc_core::{self as core, MetaIoHandler};
|
||||
use light::{
|
||||
client::LightChainClient, Cache as LightDataCache, TransactionQueue as LightTransactionQueue,
|
||||
};
|
||||
use miner::external::ExternalMiner;
|
||||
use parity_rpc::{
|
||||
dispatch::{FullDispatcher, LightDispatcher},
|
||||
dispatch::FullDispatcher,
|
||||
informant::{ActivityNotifier, ClientNotifier},
|
||||
Host, Metadata, NetworkSettings,
|
||||
};
|
||||
use parity_runtime::Executor;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use sync::{LightSync, ManageNetwork, SyncProvider};
|
||||
use parking_lot::Mutex;
|
||||
use sync::{ManageNetwork, SyncProvider};
|
||||
use updater::Updater;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Eq, Hash)]
|
||||
@ -426,220 +417,6 @@ impl Dependencies for FullDependencies {
|
||||
}
|
||||
}
|
||||
|
||||
/// Light client notifier. Doesn't do anything yet, but might in the future.
|
||||
pub struct LightClientNotifier;
|
||||
|
||||
impl ActivityNotifier for LightClientNotifier {
|
||||
fn active(&self) {}
|
||||
}
|
||||
|
||||
/// RPC dependencies for a light client.
|
||||
pub struct LightDependencies<T> {
|
||||
pub signer_service: Arc<SignerService>,
|
||||
pub client: Arc<T>,
|
||||
pub sync: Arc<LightSync>,
|
||||
pub net: Arc<dyn ManageNetwork>,
|
||||
pub accounts: Arc<AccountProvider>,
|
||||
pub logger: Arc<RotatingLogger>,
|
||||
pub settings: Arc<NetworkSettings>,
|
||||
pub on_demand: Arc<::light::on_demand::OnDemand>,
|
||||
pub cache: Arc<Mutex<LightDataCache>>,
|
||||
pub transaction_queue: Arc<RwLock<LightTransactionQueue>>,
|
||||
pub ws_address: Option<Host>,
|
||||
pub fetch: FetchClient,
|
||||
pub experimental_rpcs: bool,
|
||||
pub executor: Executor,
|
||||
pub private_tx_service: Option<Arc<PrivateTransactionManager>>,
|
||||
pub gas_price_percentile: usize,
|
||||
pub poll_lifetime: u32,
|
||||
}
|
||||
|
||||
impl<C: LightChainClient + 'static> LightDependencies<C> {
|
||||
fn extend_api<T: core::Middleware<Metadata>>(
|
||||
&self,
|
||||
handler: &mut MetaIoHandler<Metadata, T>,
|
||||
apis: &HashSet<Api>,
|
||||
for_generic_pubsub: bool,
|
||||
) {
|
||||
use parity_rpc::v1::*;
|
||||
|
||||
let dispatcher = LightDispatcher::new(
|
||||
self.sync.clone(),
|
||||
self.client.clone(),
|
||||
self.on_demand.clone(),
|
||||
self.cache.clone(),
|
||||
self.transaction_queue.clone(),
|
||||
Arc::new(Mutex::new(dispatch::Reservations::new(
|
||||
self.executor.clone(),
|
||||
))),
|
||||
self.gas_price_percentile,
|
||||
);
|
||||
let account_signer = Arc::new(dispatch::Signer::new(self.accounts.clone())) as _;
|
||||
let accounts = account_utils::accounts_list(self.accounts.clone());
|
||||
|
||||
for api in apis {
|
||||
match *api {
|
||||
Api::Debug => {
|
||||
warn!(target: "rpc", "Debug API is not available in light client mode.")
|
||||
}
|
||||
Api::Web3 => {
|
||||
handler.extend_with(Web3Client::default().to_delegate());
|
||||
}
|
||||
Api::Net => {
|
||||
handler.extend_with(light::NetClient::new(self.sync.clone()).to_delegate());
|
||||
}
|
||||
Api::Eth => {
|
||||
let client = light::EthClient::new(
|
||||
self.sync.clone(),
|
||||
self.client.clone(),
|
||||
self.on_demand.clone(),
|
||||
self.transaction_queue.clone(),
|
||||
accounts.clone(),
|
||||
self.cache.clone(),
|
||||
self.gas_price_percentile,
|
||||
self.poll_lifetime,
|
||||
);
|
||||
handler.extend_with(Eth::to_delegate(client.clone()));
|
||||
|
||||
if !for_generic_pubsub {
|
||||
handler.extend_with(EthFilter::to_delegate(client));
|
||||
add_signing_methods!(
|
||||
EthSigning,
|
||||
handler,
|
||||
self,
|
||||
(&dispatcher, &account_signer)
|
||||
);
|
||||
}
|
||||
}
|
||||
Api::EthPubSub => {
|
||||
let client = EthPubSubClient::light(
|
||||
self.client.clone(),
|
||||
self.on_demand.clone(),
|
||||
self.sync.clone(),
|
||||
self.cache.clone(),
|
||||
self.executor.clone(),
|
||||
self.gas_price_percentile,
|
||||
);
|
||||
self.client.add_listener(client.handler() as Weak<_>);
|
||||
let h = client.handler();
|
||||
self.transaction_queue
|
||||
.write()
|
||||
.add_listener(Box::new(move |transactions| {
|
||||
if let Some(h) = h.upgrade() {
|
||||
h.notify_new_transactions(transactions);
|
||||
}
|
||||
}));
|
||||
handler.extend_with(EthPubSub::to_delegate(client));
|
||||
}
|
||||
Api::Personal => {
|
||||
#[cfg(feature = "accounts")]
|
||||
handler.extend_with(
|
||||
PersonalClient::new(
|
||||
&self.accounts,
|
||||
dispatcher.clone(),
|
||||
self.experimental_rpcs,
|
||||
)
|
||||
.to_delegate(),
|
||||
);
|
||||
}
|
||||
Api::Signer => {
|
||||
handler.extend_with(
|
||||
SignerClient::new(
|
||||
account_signer.clone(),
|
||||
dispatcher.clone(),
|
||||
&self.signer_service,
|
||||
self.executor.clone(),
|
||||
)
|
||||
.to_delegate(),
|
||||
);
|
||||
}
|
||||
Api::Parity => {
|
||||
let signer = match self.signer_service.is_enabled() {
|
||||
true => Some(self.signer_service.clone()),
|
||||
false => None,
|
||||
};
|
||||
handler.extend_with(
|
||||
light::ParityClient::new(
|
||||
Arc::new(dispatcher.clone()),
|
||||
self.logger.clone(),
|
||||
self.settings.clone(),
|
||||
signer,
|
||||
self.ws_address.clone(),
|
||||
self.gas_price_percentile,
|
||||
)
|
||||
.to_delegate(),
|
||||
);
|
||||
#[cfg(feature = "accounts")]
|
||||
handler.extend_with(ParityAccountsInfo::to_delegate(
|
||||
ParityAccountsClient::new(&self.accounts),
|
||||
));
|
||||
|
||||
if !for_generic_pubsub {
|
||||
add_signing_methods!(
|
||||
ParitySigning,
|
||||
handler,
|
||||
self,
|
||||
(&dispatcher, &account_signer)
|
||||
);
|
||||
}
|
||||
}
|
||||
Api::ParityPubSub => {
|
||||
if !for_generic_pubsub {
|
||||
let mut rpc = MetaIoHandler::default();
|
||||
let apis = ApiSet::List(apis.clone())
|
||||
.retain(ApiSet::PubSub)
|
||||
.list_apis();
|
||||
self.extend_api(&mut rpc, &apis, true);
|
||||
handler.extend_with(
|
||||
PubSubClient::new(rpc, self.executor.clone()).to_delegate(),
|
||||
);
|
||||
}
|
||||
}
|
||||
Api::ParityAccounts => {
|
||||
#[cfg(feature = "accounts")]
|
||||
handler.extend_with(ParityAccounts::to_delegate(ParityAccountsClient::new(
|
||||
&self.accounts,
|
||||
)));
|
||||
}
|
||||
Api::ParitySet => handler.extend_with(
|
||||
light::ParitySetClient::new(
|
||||
self.client.clone(),
|
||||
self.sync.clone(),
|
||||
self.fetch.clone(),
|
||||
)
|
||||
.to_delegate(),
|
||||
),
|
||||
Api::Traces => handler.extend_with(light::TracesClient.to_delegate()),
|
||||
Api::SecretStore => {
|
||||
#[cfg(feature = "accounts")]
|
||||
handler.extend_with(SecretStoreClient::new(&self.accounts).to_delegate());
|
||||
}
|
||||
Api::Private => {
|
||||
if let Some(ref tx_manager) = self.private_tx_service {
|
||||
let private_tx_service = Some(tx_manager.clone());
|
||||
handler.extend_with(PrivateClient::new(private_tx_service).to_delegate());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: LightChainClient + 'static> Dependencies for LightDependencies<T> {
|
||||
type Notifier = LightClientNotifier;
|
||||
|
||||
fn activity_notifier(&self) -> Self::Notifier {
|
||||
LightClientNotifier
|
||||
}
|
||||
|
||||
fn extend_with_set<S>(&self, handler: &mut MetaIoHandler<Metadata, S>, apis: &HashSet<Api>)
|
||||
where
|
||||
S: core::Middleware<Metadata>,
|
||||
{
|
||||
self.extend_api(handler, apis, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiSet {
|
||||
/// Retains only APIs in given set.
|
||||
pub fn retain(self, set: Self) -> Self {
|
||||
|
332
parity/run.rs
332
parity/run.rs
@ -34,7 +34,6 @@ use ethcore::{
|
||||
},
|
||||
miner::{self, stratum, Miner, MinerOptions, MinerService},
|
||||
snapshot::{self, SnapshotConfiguration},
|
||||
spec::{OptimizeFor, SpecParams},
|
||||
verification::queue::VerifierSettings,
|
||||
};
|
||||
use ethcore_logger::{Config as LogConfig, RotatingLogger};
|
||||
@ -44,11 +43,10 @@ use ethereum_types::Address;
|
||||
use futures::IntoFuture;
|
||||
use hash_fetch::{self, fetch};
|
||||
use helpers::{execute_upgrades, passwords_from_files, to_client_config};
|
||||
use informant::{FullNodeInformantData, Informant, LightNodeInformantData};
|
||||
use informant::{FullNodeInformantData, Informant};
|
||||
use ipfs;
|
||||
use journaldb::Algorithm;
|
||||
use jsonrpc_core;
|
||||
use light::Cache as LightDataCache;
|
||||
use miner::{external::ExternalMiner, work_notify::WorkPoster};
|
||||
use modules;
|
||||
use node_filter::NodeFilter;
|
||||
@ -77,16 +75,9 @@ const SNAPSHOT_PERIOD: u64 = 5000;
|
||||
// how many blocks to wait before starting a periodic snapshot.
|
||||
const SNAPSHOT_HISTORY: u64 = 100;
|
||||
|
||||
// Number of minutes before a given gas price corpus should expire.
|
||||
// Light client only.
|
||||
const GAS_CORPUS_EXPIRATION_MINUTES: u64 = 60 * 6;
|
||||
|
||||
// Full client number of DNS threads
|
||||
const FETCH_FULL_NUM_DNS_THREADS: usize = 4;
|
||||
|
||||
// Light client number of DNS threads
|
||||
const FETCH_LIGHT_NUM_DNS_THREADS: usize = 1;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct RunCmd {
|
||||
pub cache_config: CacheConfig,
|
||||
@ -132,16 +123,8 @@ pub struct RunCmd {
|
||||
pub allow_missing_blocks: bool,
|
||||
pub download_old_blocks: bool,
|
||||
pub verifier_settings: VerifierSettings,
|
||||
pub serve_light: bool,
|
||||
pub light: bool,
|
||||
pub no_persistent_txqueue: bool,
|
||||
pub no_hardcoded_sync: bool,
|
||||
pub max_round_blocks_to_import: usize,
|
||||
pub on_demand_response_time_window: Option<u64>,
|
||||
pub on_demand_request_backoff_start: Option<u64>,
|
||||
pub on_demand_request_backoff_max: Option<u64>,
|
||||
pub on_demand_request_backoff_rounds_max: Option<usize>,
|
||||
pub on_demand_request_consecutive_failures: Option<usize>,
|
||||
}
|
||||
|
||||
// node info fetcher for the local store.
|
||||
@ -169,252 +152,15 @@ impl ::local_store::NodeInfo for FullNodeInfo {
|
||||
}
|
||||
}
|
||||
|
||||
type LightClient = ::light::client::Client<::light_helpers::EpochFetch>;
|
||||
|
||||
// helper for light execution.
|
||||
fn execute_light_impl<Cr>(
|
||||
cmd: RunCmd,
|
||||
logger: Arc<RotatingLogger>,
|
||||
on_client_rq: Cr,
|
||||
) -> Result<RunningClient, String>
|
||||
where
|
||||
Cr: Fn(String) + 'static + Send,
|
||||
{
|
||||
use light::client as light_client;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use sync::{LightSync, LightSyncParams, ManageNetwork};
|
||||
|
||||
// load spec
|
||||
let spec = cmd.spec.spec(SpecParams::new(
|
||||
cmd.dirs.cache.as_ref(),
|
||||
OptimizeFor::Memory,
|
||||
))?;
|
||||
|
||||
// load genesis hash
|
||||
let genesis_hash = spec.genesis_header().hash();
|
||||
|
||||
// database paths
|
||||
let db_dirs = cmd.dirs.database(
|
||||
genesis_hash,
|
||||
cmd.spec.legacy_fork_name(),
|
||||
spec.data_dir.clone(),
|
||||
);
|
||||
|
||||
// user defaults path
|
||||
let user_defaults_path = db_dirs.user_defaults_path();
|
||||
|
||||
// load user defaults
|
||||
let user_defaults = UserDefaults::load(&user_defaults_path)?;
|
||||
|
||||
// select pruning algorithm
|
||||
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
||||
|
||||
// execute upgrades
|
||||
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?;
|
||||
|
||||
// create dirs used by parity
|
||||
cmd.dirs.create_dirs(
|
||||
cmd.acc_conf.unlocked_accounts.len() == 0,
|
||||
cmd.secretstore_conf.enabled,
|
||||
)?;
|
||||
|
||||
//print out running parity environment
|
||||
print_running_environment(&spec.data_dir, &cmd.dirs, &db_dirs);
|
||||
|
||||
info!(
|
||||
"Running in experimental {} mode.",
|
||||
Colour::Blue.bold().paint("Light Client")
|
||||
);
|
||||
|
||||
// TODO: configurable cache size.
|
||||
let cache = LightDataCache::new(
|
||||
Default::default(),
|
||||
Duration::from_secs(60 * GAS_CORPUS_EXPIRATION_MINUTES),
|
||||
);
|
||||
let cache = Arc::new(Mutex::new(cache));
|
||||
|
||||
// start client and create transaction queue.
|
||||
let mut config = light_client::Config {
|
||||
queue: Default::default(),
|
||||
chain_column: ::ethcore_db::COL_LIGHT_CHAIN,
|
||||
verify_full: true,
|
||||
check_seal: cmd.check_seal,
|
||||
no_hardcoded_sync: cmd.no_hardcoded_sync,
|
||||
};
|
||||
|
||||
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
||||
config.queue.verifier_settings = cmd.verifier_settings;
|
||||
|
||||
// start on_demand service.
|
||||
|
||||
let response_time_window = cmd
|
||||
.on_demand_response_time_window
|
||||
.map_or(::light::on_demand::DEFAULT_RESPONSE_TIME_TO_LIVE, |s| {
|
||||
Duration::from_secs(s)
|
||||
});
|
||||
|
||||
let request_backoff_start = cmd.on_demand_request_backoff_start.map_or(
|
||||
::light::on_demand::DEFAULT_REQUEST_MIN_BACKOFF_DURATION,
|
||||
|s| Duration::from_secs(s),
|
||||
);
|
||||
|
||||
let request_backoff_max = cmd.on_demand_request_backoff_max.map_or(
|
||||
::light::on_demand::DEFAULT_REQUEST_MAX_BACKOFF_DURATION,
|
||||
|s| Duration::from_secs(s),
|
||||
);
|
||||
|
||||
let on_demand = Arc::new({
|
||||
::light::on_demand::OnDemand::new(
|
||||
cache.clone(),
|
||||
response_time_window,
|
||||
request_backoff_start,
|
||||
request_backoff_max,
|
||||
cmd.on_demand_request_backoff_rounds_max
|
||||
.unwrap_or(::light::on_demand::DEFAULT_MAX_REQUEST_BACKOFF_ROUNDS),
|
||||
cmd.on_demand_request_consecutive_failures
|
||||
.unwrap_or(::light::on_demand::DEFAULT_NUM_CONSECUTIVE_FAILED_REQUESTS),
|
||||
)
|
||||
});
|
||||
|
||||
let sync_handle = Arc::new(RwLock::new(Weak::new()));
|
||||
let fetch = ::light_helpers::EpochFetch {
|
||||
on_demand: on_demand.clone(),
|
||||
sync: sync_handle.clone(),
|
||||
};
|
||||
|
||||
// initialize database.
|
||||
let db = db::open_db(
|
||||
&db_dirs
|
||||
.client_path(algorithm)
|
||||
.to_str()
|
||||
.expect("DB path could not be converted to string."),
|
||||
&cmd.cache_config,
|
||||
&cmd.compaction,
|
||||
)
|
||||
.map_err(|e| format!("Failed to open database {:?}", e))?;
|
||||
|
||||
let service = light_client::Service::start(config, &spec, fetch, db, cache.clone())
|
||||
.map_err(|e| format!("Error starting light client: {}", e))?;
|
||||
let client = service.client().clone();
|
||||
let txq = Arc::new(RwLock::new(
|
||||
::light::transaction_queue::TransactionQueue::default(),
|
||||
));
|
||||
let provider = ::light::provider::LightProvider::new(client.clone(), txq.clone());
|
||||
|
||||
// start network.
|
||||
// set up bootnodes
|
||||
let mut net_conf = cmd.net_conf;
|
||||
if !cmd.custom_bootnodes {
|
||||
net_conf.boot_nodes = spec.nodes.clone();
|
||||
}
|
||||
|
||||
// set network path.
|
||||
net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned());
|
||||
let sync_params = LightSyncParams {
|
||||
network_config: net_conf
|
||||
.into_basic()
|
||||
.map_err(|e| format!("Failed to produce network config: {}", e))?,
|
||||
client: Arc::new(provider),
|
||||
network_id: cmd.network_id.unwrap_or(spec.network_id()),
|
||||
subprotocol_name: sync::LIGHT_PROTOCOL,
|
||||
handlers: vec![on_demand.clone()],
|
||||
};
|
||||
let light_sync =
|
||||
LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?;
|
||||
let light_sync = Arc::new(light_sync);
|
||||
*sync_handle.write() = Arc::downgrade(&light_sync);
|
||||
|
||||
// spin up event loop
|
||||
let runtime = Runtime::with_default_thread_count();
|
||||
|
||||
// start the network.
|
||||
light_sync.start_network();
|
||||
|
||||
// fetch service
|
||||
let fetch = fetch::Client::new(FETCH_LIGHT_NUM_DNS_THREADS)
|
||||
.map_err(|e| format!("Error starting fetch client: {:?}", e))?;
|
||||
let passwords = passwords_from_files(&cmd.acc_conf.password_files)?;
|
||||
|
||||
// prepare account provider
|
||||
let account_provider = Arc::new(account_utils::prepare_account_provider(
|
||||
&cmd.spec,
|
||||
&cmd.dirs,
|
||||
&spec.data_dir,
|
||||
cmd.acc_conf,
|
||||
&passwords,
|
||||
)?);
|
||||
let rpc_stats = Arc::new(informant::RpcStats::default());
|
||||
|
||||
// the dapps server
|
||||
let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.logger_config));
|
||||
|
||||
// start RPCs
|
||||
let deps_for_rpc_apis = Arc::new(rpc_apis::LightDependencies {
|
||||
signer_service: signer_service,
|
||||
client: client.clone(),
|
||||
sync: light_sync.clone(),
|
||||
net: light_sync.clone(),
|
||||
accounts: account_provider,
|
||||
logger: logger,
|
||||
settings: Arc::new(cmd.net_settings),
|
||||
on_demand: on_demand,
|
||||
cache: cache.clone(),
|
||||
transaction_queue: txq,
|
||||
ws_address: cmd.ws_conf.address(),
|
||||
fetch: fetch,
|
||||
experimental_rpcs: cmd.experimental_rpcs,
|
||||
executor: runtime.executor(),
|
||||
private_tx_service: None, //TODO: add this to client.
|
||||
gas_price_percentile: cmd.gas_price_percentile,
|
||||
poll_lifetime: cmd.poll_lifetime,
|
||||
});
|
||||
|
||||
let dependencies = rpc::Dependencies {
|
||||
apis: deps_for_rpc_apis.clone(),
|
||||
executor: runtime.executor(),
|
||||
stats: rpc_stats.clone(),
|
||||
};
|
||||
|
||||
// start rpc servers
|
||||
let rpc_direct = rpc::setup_apis(rpc_apis::ApiSet::All, &dependencies);
|
||||
let ws_server = rpc::new_ws(cmd.ws_conf, &dependencies)?;
|
||||
let http_server = rpc::new_http(
|
||||
"HTTP JSON-RPC",
|
||||
"jsonrpc",
|
||||
cmd.http_conf.clone(),
|
||||
&dependencies,
|
||||
)?;
|
||||
let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?;
|
||||
|
||||
// the informant
|
||||
let informant = Arc::new(Informant::new(
|
||||
LightNodeInformantData {
|
||||
client: client.clone(),
|
||||
sync: light_sync.clone(),
|
||||
cache: cache,
|
||||
},
|
||||
None,
|
||||
Some(rpc_stats),
|
||||
cmd.logger_config.color,
|
||||
));
|
||||
service.add_notify(informant.clone());
|
||||
service
|
||||
.register_handler(informant.clone())
|
||||
.map_err(|_| "Unable to register informant handler".to_owned())?;
|
||||
|
||||
client.set_exit_handler(on_client_rq);
|
||||
|
||||
Ok(RunningClient {
|
||||
inner: RunningClientInner::Light {
|
||||
rpc: rpc_direct,
|
||||
informant,
|
||||
client,
|
||||
keep_alive: Box::new((service, ws_server, http_server, ipc_server, runtime)),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn execute_impl<Cr, Rr>(
|
||||
/// Executes the given run command.
|
||||
///
|
||||
/// `on_client_rq` is the action to perform when the client receives an RPC request to be restarted
|
||||
/// with a different chain.
|
||||
///
|
||||
/// `on_updater_rq` is the action to perform when the updater has a new binary to execute.
|
||||
///
|
||||
/// On error, returns what to print on stderr.
|
||||
pub fn execute<Cr, Rr>(
|
||||
cmd: RunCmd,
|
||||
logger: Arc<RotatingLogger>,
|
||||
on_client_rq: Cr,
|
||||
@ -540,7 +286,6 @@ where
|
||||
_ => sync::WarpSync::Disabled,
|
||||
};
|
||||
sync_config.download_old_blocks = cmd.download_old_blocks;
|
||||
sync_config.serve_light = cmd.serve_light;
|
||||
|
||||
let passwords = passwords_from_files(&cmd.acc_conf.password_files)?;
|
||||
|
||||
@ -597,11 +342,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
// display warning if using --no-hardcoded-sync
|
||||
if cmd.no_hardcoded_sync {
|
||||
warn!("The --no-hardcoded-sync flag has no effect if you don't use --light");
|
||||
}
|
||||
|
||||
// create client config
|
||||
let mut client_config = to_client_config(
|
||||
&cmd.cache_config,
|
||||
@ -747,7 +487,6 @@ where
|
||||
client.clone(),
|
||||
snapshot_service.clone(),
|
||||
private_tx_sync,
|
||||
client.clone(),
|
||||
&cmd.logger_config,
|
||||
connection_filter
|
||||
.clone()
|
||||
@ -965,15 +704,6 @@ pub struct RunningClient {
|
||||
}
|
||||
|
||||
enum RunningClientInner {
|
||||
Light {
|
||||
rpc: jsonrpc_core::MetaIoHandler<
|
||||
Metadata,
|
||||
informant::Middleware<rpc_apis::LightClientNotifier>,
|
||||
>,
|
||||
informant: Arc<Informant<LightNodeInformantData>>,
|
||||
client: Arc<LightClient>,
|
||||
keep_alive: Box<dyn Any>,
|
||||
},
|
||||
Full {
|
||||
rpc:
|
||||
jsonrpc_core::MetaIoHandler<Metadata, informant::Middleware<informant::ClientNotifier>>,
|
||||
@ -998,7 +728,6 @@ impl RunningClient {
|
||||
};
|
||||
|
||||
match self.inner {
|
||||
RunningClientInner::Light { ref rpc, .. } => rpc.handle_request(request, metadata),
|
||||
RunningClientInner::Full { ref rpc, .. } => rpc.handle_request(request, metadata),
|
||||
}
|
||||
}
|
||||
@ -1006,22 +735,6 @@ impl RunningClient {
|
||||
/// Shuts down the client.
|
||||
pub fn shutdown(self) {
|
||||
match self.inner {
|
||||
RunningClientInner::Light {
|
||||
rpc,
|
||||
informant,
|
||||
client,
|
||||
keep_alive,
|
||||
} => {
|
||||
// Create a weak reference to the client so that we can wait on shutdown
|
||||
// until it is dropped
|
||||
let weak_client = Arc::downgrade(&client);
|
||||
drop(rpc);
|
||||
drop(keep_alive);
|
||||
informant.shutdown();
|
||||
drop(informant);
|
||||
drop(client);
|
||||
wait_for_drop(weak_client);
|
||||
}
|
||||
RunningClientInner::Full {
|
||||
rpc,
|
||||
informant,
|
||||
@ -1060,31 +773,6 @@ impl RunningClient {
|
||||
}
|
||||
}
|
||||
|
||||
/// Executes the given run command.
|
||||
///
|
||||
/// `on_client_rq` is the action to perform when the client receives an RPC request to be restarted
|
||||
/// with a different chain.
|
||||
///
|
||||
/// `on_updater_rq` is the action to perform when the updater has a new binary to execute.
|
||||
///
|
||||
/// On error, returns what to print on stderr.
|
||||
pub fn execute<Cr, Rr>(
|
||||
cmd: RunCmd,
|
||||
logger: Arc<RotatingLogger>,
|
||||
on_client_rq: Cr,
|
||||
on_updater_rq: Rr,
|
||||
) -> Result<RunningClient, String>
|
||||
where
|
||||
Cr: Fn(String) + 'static + Send,
|
||||
Rr: Fn() + 'static + Send,
|
||||
{
|
||||
if cmd.light {
|
||||
execute_light_impl(cmd, logger, on_client_rq)
|
||||
} else {
|
||||
execute_impl(cmd, logger, on_client_rq, on_updater_rq)
|
||||
}
|
||||
}
|
||||
|
||||
fn print_running_environment(data_dir: &str, dirs: &Directories, db_dirs: &DatabaseDirectories) {
|
||||
info!("Starting {}", Colour::White.bold().paint(version()));
|
||||
info!(
|
||||
|
@ -38,14 +38,12 @@ common-types = { path = "../ethcore/types" }
|
||||
ethash = { path = "../ethash" }
|
||||
ethcore = { path = "../ethcore" }
|
||||
ethcore-accounts = { path = "../accounts", optional = true }
|
||||
ethcore-light = { path = "../ethcore/light" }
|
||||
ethcore-logger = { path = "../parity/logger" }
|
||||
ethcore-miner = { path = "../miner" }
|
||||
ethcore-network = { path = "../util/network" }
|
||||
ethcore-private-tx = { path = "../ethcore/private-tx" }
|
||||
ethcore-sync = { path = "../ethcore/sync" }
|
||||
ethereum-types = "0.4"
|
||||
fastmap = { path = "../util/fastmap" }
|
||||
parity-bytes = "0.1"
|
||||
parity-crypto = "0.3.0"
|
||||
|
||||
|
@ -64,7 +64,6 @@ extern crate common_types as types;
|
||||
extern crate eip_712;
|
||||
extern crate ethash;
|
||||
extern crate ethcore;
|
||||
extern crate ethcore_light as light;
|
||||
extern crate ethcore_logger;
|
||||
extern crate ethcore_miner as miner;
|
||||
extern crate ethcore_network as network;
|
||||
@ -73,7 +72,6 @@ extern crate ethcore_sync as sync;
|
||||
extern crate ethereum_types;
|
||||
extern crate ethkey;
|
||||
extern crate ethstore;
|
||||
extern crate fastmap;
|
||||
extern crate fetch;
|
||||
extern crate keccak_hash as hash;
|
||||
extern crate parity_bytes as bytes;
|
||||
|
@ -1,327 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use ethereum_types::{Address, H256, U256};
|
||||
use light::{
|
||||
cache::Cache as LightDataCache,
|
||||
client::LightChainClient,
|
||||
on_demand::{request, OnDemandRequester},
|
||||
TransactionQueue as LightTransactionQueue,
|
||||
};
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use stats::Corpus;
|
||||
use sync::{LightNetworkDispatcher, LightSyncProvider, ManageNetwork};
|
||||
use types::{
|
||||
basic_account::BasicAccount,
|
||||
ids::BlockId,
|
||||
transaction::{Error as TransactionError, PendingTransaction, SignedTransaction},
|
||||
};
|
||||
|
||||
use jsonrpc_core::{
|
||||
futures::{future, future::Either, Future, IntoFuture},
|
||||
BoxFuture, Result,
|
||||
};
|
||||
use v1::{
|
||||
helpers::{errors, nonce, FilledTransactionRequest, TransactionRequest},
|
||||
types::RichRawTransaction as RpcRichRawTransaction,
|
||||
};
|
||||
|
||||
use super::{Accounts, Dispatcher, PostSign, SignWith};
|
||||
|
||||
/// Dispatcher for light clients -- fetches default gas price, next nonce, etc. from network.
|
||||
pub struct LightDispatcher<S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
/// Sync service.
|
||||
pub sync: Arc<S>,
|
||||
/// Header chain client.
|
||||
pub client: Arc<dyn LightChainClient>,
|
||||
/// On-demand request service.
|
||||
pub on_demand: Arc<OD>,
|
||||
/// Data cache.
|
||||
pub cache: Arc<Mutex<LightDataCache>>,
|
||||
/// Transaction queue.
|
||||
pub transaction_queue: Arc<RwLock<LightTransactionQueue>>,
|
||||
/// Nonce reservations
|
||||
pub nonces: Arc<Mutex<nonce::Reservations>>,
|
||||
/// Gas Price percentile value used as default gas price.
|
||||
pub gas_price_percentile: usize,
|
||||
}
|
||||
|
||||
impl<S, OD> LightDispatcher<S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
/// Create a new `LightDispatcher` from its requisite parts.
|
||||
///
|
||||
/// For correct operation, the OnDemand service is assumed to be registered as a network handler,
|
||||
pub fn new(
|
||||
sync: Arc<S>,
|
||||
client: Arc<dyn LightChainClient>,
|
||||
on_demand: Arc<OD>,
|
||||
cache: Arc<Mutex<LightDataCache>>,
|
||||
transaction_queue: Arc<RwLock<LightTransactionQueue>>,
|
||||
nonces: Arc<Mutex<nonce::Reservations>>,
|
||||
gas_price_percentile: usize,
|
||||
) -> Self {
|
||||
LightDispatcher {
|
||||
sync,
|
||||
client,
|
||||
on_demand,
|
||||
cache,
|
||||
transaction_queue,
|
||||
nonces,
|
||||
gas_price_percentile,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a recent gas price corpus.
|
||||
// TODO: this could be `impl Trait`.
|
||||
pub fn gas_price_corpus(&self) -> BoxFuture<Corpus<U256>> {
|
||||
fetch_gas_price_corpus(
|
||||
self.sync.clone(),
|
||||
self.client.clone(),
|
||||
self.on_demand.clone(),
|
||||
self.cache.clone(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Get an account's state
|
||||
fn account(&self, addr: Address) -> BoxFuture<Option<BasicAccount>> {
|
||||
let best_header = self.client.best_block_header();
|
||||
let account_future = self.sync.with_context(|ctx| {
|
||||
self.on_demand
|
||||
.request(
|
||||
ctx,
|
||||
request::Account {
|
||||
header: best_header.into(),
|
||||
address: addr,
|
||||
},
|
||||
)
|
||||
.expect("no back-references; therefore all back-references valid; qed")
|
||||
});
|
||||
|
||||
match account_future {
|
||||
Some(response) => Box::new(response.map_err(|_| errors::no_light_peers())),
|
||||
None => Box::new(future::err(errors::network_disabled())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get an account's next nonce.
|
||||
pub fn next_nonce(&self, addr: Address) -> BoxFuture<U256> {
|
||||
let account_start_nonce = self
|
||||
.client
|
||||
.engine()
|
||||
.account_start_nonce(self.client.best_block_header().number());
|
||||
Box::new(self.account(addr).and_then(move |maybe_account| {
|
||||
future::ok(maybe_account.map_or(account_start_nonce, |account| account.nonce))
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, OD> Clone for LightDispatcher<S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
sync: self.sync.clone(),
|
||||
client: self.client.clone(),
|
||||
on_demand: self.on_demand.clone(),
|
||||
cache: self.cache.clone(),
|
||||
transaction_queue: self.transaction_queue.clone(),
|
||||
nonces: self.nonces.clone(),
|
||||
gas_price_percentile: self.gas_price_percentile,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, OD> Dispatcher for LightDispatcher<S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
// Ignore the `force_nonce` flag in order to always query the network when fetching the nonce and
|
||||
// the account state. If the nonce is specified in the transaction use that nonce instead but do the
|
||||
// network request anyway to the account state (balance)
|
||||
fn fill_optional_fields(
|
||||
&self,
|
||||
request: TransactionRequest,
|
||||
default_sender: Address,
|
||||
_force_nonce: bool,
|
||||
) -> BoxFuture<FilledTransactionRequest> {
|
||||
const DEFAULT_GAS_PRICE: U256 = U256([0, 0, 0, 21_000_000]);
|
||||
|
||||
let gas_limit = self.client.best_block_header().gas_limit();
|
||||
let request_gas_price = request.gas_price;
|
||||
let from = request.from.unwrap_or(default_sender);
|
||||
|
||||
let with_gas_price = move |gas_price| {
|
||||
let request = request;
|
||||
FilledTransactionRequest {
|
||||
from,
|
||||
used_default_from: request.from.is_none(),
|
||||
to: request.to,
|
||||
nonce: request.nonce,
|
||||
gas_price,
|
||||
gas: request.gas.unwrap_or_else(|| gas_limit / 3),
|
||||
value: request.value.unwrap_or_default(),
|
||||
data: request.data.unwrap_or_else(Vec::new),
|
||||
condition: request.condition,
|
||||
}
|
||||
};
|
||||
|
||||
// fast path for known gas price.
|
||||
let gas_price_percentile = self.gas_price_percentile;
|
||||
let gas_price = match request_gas_price {
|
||||
Some(gas_price) => Either::A(future::ok(with_gas_price(gas_price))),
|
||||
None => Either::B(
|
||||
fetch_gas_price_corpus(
|
||||
self.sync.clone(),
|
||||
self.client.clone(),
|
||||
self.on_demand.clone(),
|
||||
self.cache.clone(),
|
||||
)
|
||||
.and_then(move |corp| match corp.percentile(gas_price_percentile) {
|
||||
Some(percentile) => Ok(*percentile),
|
||||
None => Ok(DEFAULT_GAS_PRICE), // fall back to default on error.
|
||||
})
|
||||
.map(with_gas_price),
|
||||
),
|
||||
};
|
||||
|
||||
let future_account = self.account(from);
|
||||
|
||||
Box::new(gas_price.and_then(move |mut filled| {
|
||||
future_account.and_then(move |maybe_account| {
|
||||
let cost = filled
|
||||
.value
|
||||
.saturating_add(filled.gas.saturating_mul(filled.gas_price));
|
||||
match maybe_account {
|
||||
Some(ref account) if cost > account.balance => {
|
||||
Err(errors::transaction(TransactionError::InsufficientBalance {
|
||||
balance: account.balance,
|
||||
cost,
|
||||
}))
|
||||
}
|
||||
Some(account) => {
|
||||
if filled.nonce.is_none() {
|
||||
filled.nonce = Some(account.nonce);
|
||||
}
|
||||
Ok(filled)
|
||||
}
|
||||
None => Err(errors::account("Account not found", "")),
|
||||
}
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
fn sign<P>(
|
||||
&self,
|
||||
filled: FilledTransactionRequest,
|
||||
signer: &Arc<dyn Accounts>,
|
||||
password: SignWith,
|
||||
post_sign: P,
|
||||
) -> BoxFuture<P::Item>
|
||||
where
|
||||
P: PostSign + 'static,
|
||||
<P::Out as futures::future::IntoFuture>::Future: Send,
|
||||
{
|
||||
let chain_id = self.client.signing_chain_id();
|
||||
let nonce = filled.nonce.expect("nonce is always provided; qed");
|
||||
let future = signer
|
||||
.sign_transaction(filled, chain_id, nonce, password)
|
||||
.into_future()
|
||||
.and_then(move |signed| post_sign.execute(signed));
|
||||
Box::new(future)
|
||||
}
|
||||
|
||||
fn enrich(&self, signed_transaction: SignedTransaction) -> RpcRichRawTransaction {
|
||||
RpcRichRawTransaction::from_signed(signed_transaction)
|
||||
}
|
||||
|
||||
fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result<H256> {
|
||||
let hash = signed_transaction.transaction.hash();
|
||||
|
||||
self.transaction_queue
|
||||
.write()
|
||||
.import(signed_transaction)
|
||||
.map_err(errors::transaction)
|
||||
.map(|_| hash)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a recent gas price corpus.
|
||||
// TODO: this could be `impl Trait`.
|
||||
pub fn fetch_gas_price_corpus<S, OD>(
|
||||
sync: Arc<S>,
|
||||
client: Arc<dyn LightChainClient>,
|
||||
on_demand: Arc<OD>,
|
||||
cache: Arc<Mutex<LightDataCache>>,
|
||||
) -> BoxFuture<Corpus<U256>>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
const GAS_PRICE_SAMPLE_SIZE: usize = 100;
|
||||
|
||||
if let Some(cached) = { cache.lock().gas_price_corpus() } {
|
||||
return Box::new(future::ok(cached));
|
||||
}
|
||||
|
||||
let cache = cache.clone();
|
||||
let eventual_corpus = sync.with_context(|ctx| {
|
||||
// get some recent headers with gas used,
|
||||
// and request each of the blocks from the network.
|
||||
let block_requests = client
|
||||
.ancestry_iter(BlockId::Latest)
|
||||
.filter(|hdr| hdr.gas_used() != U256::default())
|
||||
.take(GAS_PRICE_SAMPLE_SIZE)
|
||||
.map(|hdr| request::Body(hdr.into()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// when the blocks come in, collect gas prices into a vector
|
||||
on_demand
|
||||
.request(ctx, block_requests)
|
||||
.expect("no back-references; therefore all back-references are valid; qed")
|
||||
.map(|bodies| {
|
||||
bodies.into_iter().fold(Vec::new(), |mut v, block| {
|
||||
for t in block.transaction_views().iter() {
|
||||
v.push(t.gas_price())
|
||||
}
|
||||
v
|
||||
})
|
||||
})
|
||||
.map(move |prices| {
|
||||
// produce a corpus from the vector and cache it.
|
||||
// It's later used to get a percentile for default gas price.
|
||||
let corpus: ::stats::Corpus<_> = prices.into();
|
||||
cache.lock().set_gas_price_corpus(corpus.clone());
|
||||
corpus
|
||||
})
|
||||
});
|
||||
|
||||
match eventual_corpus {
|
||||
Some(corp) => Box::new(corp.map_err(|_| errors::no_light_peers())),
|
||||
None => Box::new(future::err(errors::network_disabled())),
|
||||
}
|
||||
}
|
@ -17,7 +17,6 @@
|
||||
//! Utilities and helpers for transaction dispatch.
|
||||
|
||||
mod full;
|
||||
pub(crate) mod light;
|
||||
mod prospective_signer;
|
||||
|
||||
#[cfg(any(test, feature = "accounts"))]
|
||||
@ -81,7 +80,7 @@ mod signing {
|
||||
}
|
||||
}
|
||||
|
||||
pub use self::{full::FullDispatcher, light::LightDispatcher, signing::Signer};
|
||||
pub use self::{full::FullDispatcher, signing::Signer};
|
||||
pub use v1::helpers::nonce::Reservations;
|
||||
|
||||
use std::{fmt::Debug, ops::Deref, sync::Arc};
|
||||
|
@ -23,8 +23,7 @@ use ethcore::{
|
||||
error::{CallError, Error as EthcoreError, ErrorKind},
|
||||
};
|
||||
use ethcore_private_tx::Error as PrivateTransactionError;
|
||||
use jsonrpc_core::{futures, Error, ErrorCode, Result as RpcResult, Value};
|
||||
use light::on_demand::error::{Error as OnDemandError, ErrorKind as OnDemandErrorKind};
|
||||
use jsonrpc_core::{Error, ErrorCode, Result as RpcResult, Value};
|
||||
use rlp::DecoderError;
|
||||
use types::{blockchain_info::BlockChainInfo, transaction::Error as TransactionError};
|
||||
use v1::{impls::EthClientOptions, types::BlockNumber};
|
||||
@ -55,7 +54,6 @@ mod codes {
|
||||
pub const ENCRYPTION_ERROR: i64 = -32055;
|
||||
pub const ENCODING_ERROR: i64 = -32058;
|
||||
pub const FETCH_ERROR: i64 = -32060;
|
||||
pub const NO_LIGHT_PEERS: i64 = -32065;
|
||||
pub const NO_PEERS: i64 = -32066;
|
||||
pub const DEPRECATED: i64 = -32070;
|
||||
pub const EXPERIMENTAL_RPC: i64 = -32071;
|
||||
@ -111,17 +109,6 @@ pub fn request_rejected_limit() -> Error {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn request_rejected_param_limit(limit: u64, items_desc: &str) -> Error {
|
||||
Error {
|
||||
code: ErrorCode::ServerError(codes::REQUEST_REJECTED_LIMIT),
|
||||
message: format!(
|
||||
"Requested data size exceeds limit of {} {}.",
|
||||
limit, items_desc
|
||||
),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn account<T: fmt::Debug>(error: &str, details: T) -> Error {
|
||||
Error {
|
||||
code: ErrorCode::ServerError(codes::ACCOUNT_ERROR),
|
||||
@ -522,14 +509,6 @@ pub fn unknown_block() -> Error {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn no_light_peers() -> Error {
|
||||
Error {
|
||||
code: ErrorCode::ServerError(codes::NO_LIGHT_PEERS),
|
||||
message: "No light peers who can serve data".into(),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deprecated<S: Into<String>, T: Into<Option<S>>>(message: T) -> Error {
|
||||
Error {
|
||||
code: ErrorCode::ServerError(codes::DEPRECATED),
|
||||
@ -559,44 +538,6 @@ pub fn filter_block_not_found(id: BlockId) -> Error {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn on_demand_error(err: OnDemandError) -> Error {
|
||||
match err {
|
||||
OnDemandError(OnDemandErrorKind::ChannelCanceled(e), _) => on_demand_cancel(e),
|
||||
OnDemandError(OnDemandErrorKind::RequestLimit, _) => timeout_new_peer(&err),
|
||||
OnDemandError(OnDemandErrorKind::BadResponse(_), _) => max_attempts_reached(&err),
|
||||
_ => on_demand_others(&err),
|
||||
}
|
||||
}
|
||||
|
||||
// on-demand sender cancelled.
|
||||
pub fn on_demand_cancel(_cancel: futures::sync::oneshot::Canceled) -> Error {
|
||||
internal("on-demand sender cancelled", "")
|
||||
}
|
||||
|
||||
pub fn max_attempts_reached(err: &OnDemandError) -> Error {
|
||||
Error {
|
||||
code: ErrorCode::ServerError(codes::REQUEST_NOT_FOUND),
|
||||
message: err.to_string(),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn timeout_new_peer(err: &OnDemandError) -> Error {
|
||||
Error {
|
||||
code: ErrorCode::ServerError(codes::NO_LIGHT_PEERS),
|
||||
message: err.to_string(),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn on_demand_others(err: &OnDemandError) -> Error {
|
||||
Error {
|
||||
code: ErrorCode::ServerError(codes::UNKNOWN_ERROR),
|
||||
message: err.to_string(),
|
||||
data: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn status_error(has_peers: bool) -> Error {
|
||||
if has_peers {
|
||||
no_work()
|
||||
|
@ -1,974 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Helpers for fetching blockchain data either from the light client or the network.
|
||||
|
||||
use std::{clone::Clone, cmp, collections::BTreeMap, sync::Arc};
|
||||
|
||||
use ethcore::executed::ExecutionError;
|
||||
use types::{
|
||||
basic_account::BasicAccount, encoded, filter::Filter as EthcoreFilter, ids::BlockId,
|
||||
receipt::Receipt,
|
||||
};
|
||||
|
||||
use jsonrpc_core::{
|
||||
futures::{future, future::Either, Future},
|
||||
Error, Result,
|
||||
};
|
||||
|
||||
use light::{
|
||||
cache::Cache,
|
||||
cht,
|
||||
client::LightChainClient,
|
||||
on_demand::{
|
||||
error::Error as OnDemandError, request, ExecutionResult, HeaderRef, OnDemandRequester,
|
||||
Request as OnDemandRequest, Response as OnDemandResponse,
|
||||
},
|
||||
request::Field,
|
||||
TransactionQueue, MAX_HEADERS_PER_REQUEST,
|
||||
};
|
||||
|
||||
use sync::{LightNetworkDispatcher, LightSyncProvider, ManageNetwork};
|
||||
|
||||
use ethereum_types::{Address, U256};
|
||||
use fastmap::H256FastMap;
|
||||
use hash::H256;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use types::transaction::{
|
||||
Action, LocalizedTransaction, PendingTransaction, SignedTransaction,
|
||||
Transaction as EthTransaction,
|
||||
};
|
||||
|
||||
use v1::{
|
||||
helpers::{dispatch, errors, CallRequest as CallRequestHelper},
|
||||
types::{BlockNumber, CallRequest, Log, Transaction},
|
||||
};
|
||||
|
||||
const NO_INVALID_BACK_REFS_PROOF: &str =
|
||||
"Fails only on invalid back-references; back-references here known to be valid; qed";
|
||||
const WRONG_RESPONSE_AMOUNT_TYPE_PROOF: &str =
|
||||
"responses correspond directly with requests in amount and type; qed";
|
||||
const DEFAULT_GAS_PRICE: u64 = 21_000;
|
||||
|
||||
pub fn light_all_transactions<S, OD>(
|
||||
dispatch: &Arc<dispatch::LightDispatcher<S, OD>>,
|
||||
) -> impl Iterator<Item = PendingTransaction>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
let txq = dispatch.transaction_queue.read();
|
||||
let chain_info = dispatch.client.chain_info();
|
||||
|
||||
let current = txq.ready_transactions(
|
||||
chain_info.best_block_number,
|
||||
chain_info.best_block_timestamp,
|
||||
);
|
||||
let future = txq.future_transactions(
|
||||
chain_info.best_block_number,
|
||||
chain_info.best_block_timestamp,
|
||||
);
|
||||
current.into_iter().chain(future.into_iter())
|
||||
}
|
||||
|
||||
/// Helper for fetching blockchain data either from the light client or the network
|
||||
/// as necessary.
|
||||
pub struct LightFetch<S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
/// The light client.
|
||||
pub client: Arc<dyn LightChainClient>,
|
||||
/// The on-demand request service.
|
||||
pub on_demand: Arc<OD>,
|
||||
/// Handle to the network.
|
||||
pub sync: Arc<S>,
|
||||
/// The light data cache.
|
||||
pub cache: Arc<Mutex<Cache>>,
|
||||
/// Gas Price percentile
|
||||
pub gas_price_percentile: usize,
|
||||
}
|
||||
|
||||
impl<S, OD> Clone for LightFetch<S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
client: self.client.clone(),
|
||||
on_demand: self.on_demand.clone(),
|
||||
sync: self.sync.clone(),
|
||||
cache: self.cache.clone(),
|
||||
gas_price_percentile: self.gas_price_percentile,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract a transaction at given index.
|
||||
pub fn extract_transaction_at_index(block: encoded::Block, index: usize) -> Option<Transaction> {
|
||||
block
|
||||
.transactions()
|
||||
.into_iter()
|
||||
.nth(index)
|
||||
// Verify if transaction signature is correct.
|
||||
.and_then(|tx| SignedTransaction::new(tx).ok())
|
||||
.map(|signed_tx| {
|
||||
let (signed, sender, _) = signed_tx.deconstruct();
|
||||
let block_hash = block.hash();
|
||||
let block_number = block.number();
|
||||
let transaction_index = index;
|
||||
let cached_sender = Some(sender);
|
||||
|
||||
LocalizedTransaction {
|
||||
signed,
|
||||
block_number,
|
||||
block_hash,
|
||||
transaction_index,
|
||||
cached_sender,
|
||||
}
|
||||
})
|
||||
.map(Transaction::from_localized)
|
||||
}
|
||||
|
||||
// extract the header indicated by the given `HeaderRef` from the given responses.
|
||||
// fails only if they do not correspond.
|
||||
fn extract_header(res: &[OnDemandResponse], header: HeaderRef) -> Option<encoded::Header> {
|
||||
match header {
|
||||
HeaderRef::Stored(hdr) => Some(hdr),
|
||||
HeaderRef::Unresolved(idx, _) => match res.get(idx) {
|
||||
Some(&OnDemandResponse::HeaderByHash(ref hdr)) => Some(hdr.clone()),
|
||||
_ => None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, OD> LightFetch<S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
// push the necessary requests onto the request chain to get the header by the given ID.
|
||||
// yield a header reference which other requests can use.
|
||||
fn make_header_requests(
|
||||
&self,
|
||||
id: BlockId,
|
||||
reqs: &mut Vec<OnDemandRequest>,
|
||||
) -> Result<HeaderRef> {
|
||||
if let Some(h) = self.client.block_header(id) {
|
||||
return Ok(h.into());
|
||||
}
|
||||
|
||||
match id {
|
||||
BlockId::Number(n) => {
|
||||
let cht_root =
|
||||
cht::block_to_cht_number(n).and_then(|cn| self.client.cht_root(cn as usize));
|
||||
match cht_root {
|
||||
None => Err(errors::unknown_block()),
|
||||
Some(root) => {
|
||||
let req = request::HeaderProof::new(n, root)
|
||||
.expect("only fails for 0; client always stores genesis; client already queried; qed");
|
||||
|
||||
let idx = reqs.len();
|
||||
let hash_ref = Field::back_ref(idx, 0);
|
||||
reqs.push(req.into());
|
||||
reqs.push(request::HeaderByHash(hash_ref).into());
|
||||
|
||||
Ok(HeaderRef::Unresolved(idx + 1, hash_ref))
|
||||
}
|
||||
}
|
||||
}
|
||||
BlockId::Hash(h) => {
|
||||
let idx = reqs.len();
|
||||
reqs.push(request::HeaderByHash(h.into()).into());
|
||||
Ok(HeaderRef::Unresolved(idx, h.into()))
|
||||
}
|
||||
_ => Err(errors::unknown_block()), // latest, earliest, and pending will have all already returned.
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a block header from the on demand service or client, or error.
|
||||
pub fn header(&self, id: BlockId) -> impl Future<Item = encoded::Header, Error = Error> + Send {
|
||||
let mut reqs = Vec::new();
|
||||
let header_ref = match self.make_header_requests(id, &mut reqs) {
|
||||
Ok(r) => r,
|
||||
Err(e) => return Either::A(future::err(e)),
|
||||
};
|
||||
|
||||
Either::B(self.send_requests(reqs, |res| {
|
||||
extract_header(&res, header_ref).expect(
|
||||
"these responses correspond to requests that header_ref belongs to \
|
||||
therefore it will not fail; qed",
|
||||
)
|
||||
}))
|
||||
}
|
||||
|
||||
/// Helper for getting contract code at a given block.
|
||||
pub fn code(
|
||||
&self,
|
||||
address: Address,
|
||||
id: BlockId,
|
||||
) -> impl Future<Item = Vec<u8>, Error = Error> + Send {
|
||||
let mut reqs = Vec::new();
|
||||
let header_ref = match self.make_header_requests(id, &mut reqs) {
|
||||
Ok(r) => r,
|
||||
Err(e) => return Either::A(future::err(e)),
|
||||
};
|
||||
|
||||
reqs.push(
|
||||
request::Account {
|
||||
header: header_ref.clone(),
|
||||
address,
|
||||
}
|
||||
.into(),
|
||||
);
|
||||
let account_idx = reqs.len() - 1;
|
||||
reqs.push(
|
||||
request::Code {
|
||||
header: header_ref,
|
||||
code_hash: Field::back_ref(account_idx, 0),
|
||||
}
|
||||
.into(),
|
||||
);
|
||||
|
||||
Either::B(self.send_requests(reqs, |mut res| match res.pop() {
|
||||
Some(OnDemandResponse::Code(code)) => code,
|
||||
_ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Helper for getting account info at a given block.
|
||||
/// `None` indicates the account doesn't exist at the given block.
|
||||
pub fn account(
|
||||
&self,
|
||||
address: Address,
|
||||
id: BlockId,
|
||||
tx_queue: Arc<RwLock<TransactionQueue>>,
|
||||
) -> impl Future<Item = Option<BasicAccount>, Error = Error> + Send {
|
||||
let mut reqs = Vec::new();
|
||||
let header_ref = match self.make_header_requests(id, &mut reqs) {
|
||||
Ok(r) => r,
|
||||
Err(e) => return Either::A(future::err(e)),
|
||||
};
|
||||
|
||||
reqs.push(
|
||||
request::Account {
|
||||
header: header_ref,
|
||||
address,
|
||||
}
|
||||
.into(),
|
||||
);
|
||||
|
||||
Either::B(self.send_requests(reqs, move |mut res| match res.pop() {
|
||||
Some(OnDemandResponse::Account(maybe_account)) => {
|
||||
if let Some(ref acc) = maybe_account {
|
||||
let mut txq = tx_queue.write();
|
||||
txq.cull(address, acc.nonce);
|
||||
}
|
||||
maybe_account
|
||||
}
|
||||
_ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Helper for getting proved execution.
|
||||
pub fn proved_read_only_execution(
|
||||
&self,
|
||||
req: CallRequest,
|
||||
num: Option<BlockNumber>,
|
||||
txq: Arc<RwLock<TransactionQueue>>,
|
||||
) -> impl Future<Item = ExecutionResult, Error = Error> + Send {
|
||||
// (21000 G_transaction + 32000 G_create + some marginal to allow a few operations)
|
||||
const START_GAS: u64 = 60_000;
|
||||
|
||||
let (sync, on_demand, client) = (
|
||||
self.sync.clone(),
|
||||
self.on_demand.clone(),
|
||||
self.client.clone(),
|
||||
);
|
||||
let req: CallRequestHelper = req.into();
|
||||
|
||||
// Note: Here we treat `Pending` as `Latest`.
|
||||
// Since light clients don't produce pending blocks
|
||||
// (they don't have state) we can safely fallback to `Latest`.
|
||||
let id = match num.unwrap_or_default() {
|
||||
BlockNumber::Num(n) => BlockId::Number(n),
|
||||
BlockNumber::Earliest => BlockId::Earliest,
|
||||
BlockNumber::Latest => BlockId::Latest,
|
||||
BlockNumber::Pending => {
|
||||
warn!("`Pending` is deprecated and may be removed in future versions. Falling back to `Latest`");
|
||||
BlockId::Latest
|
||||
}
|
||||
};
|
||||
|
||||
let from = req.from.unwrap_or_default();
|
||||
let nonce_fut = match req.nonce {
|
||||
Some(nonce) => Either::A(future::ok(Some(nonce))),
|
||||
None => Either::B(self.account(from, id, txq).map(|acc| acc.map(|a| a.nonce))),
|
||||
};
|
||||
|
||||
let gas_price_fut = match req.gas_price {
|
||||
Some(price) => Either::A(future::ok(price)),
|
||||
None => Either::B(self.gas_price()),
|
||||
};
|
||||
|
||||
// if nonce resolves, this should too since it'll be in the LRU-cache.
|
||||
let header_fut = self.header(id);
|
||||
|
||||
// fetch missing transaction fields from the network.
|
||||
Box::new(
|
||||
nonce_fut
|
||||
.join(gas_price_fut)
|
||||
.and_then(move |(nonce, gas_price)| {
|
||||
future::done(Ok((
|
||||
req.gas.is_some(),
|
||||
EthTransaction {
|
||||
nonce: nonce.unwrap_or_default(),
|
||||
action: req.to.map_or(Action::Create, Action::Call),
|
||||
gas: req.gas.unwrap_or_else(|| START_GAS.into()),
|
||||
gas_price,
|
||||
value: req.value.unwrap_or_default(),
|
||||
data: req.data.unwrap_or_default(),
|
||||
},
|
||||
)))
|
||||
})
|
||||
.join(header_fut)
|
||||
.and_then(move |((gas_known, tx), hdr)| {
|
||||
// then request proved execution.
|
||||
// TODO: get last-hashes from network.
|
||||
let hash = hdr.hash();
|
||||
let env_info = match client.env_info(BlockId::Hash(hash)) {
|
||||
Some(env_info) => env_info,
|
||||
_ => return Either::A(future::err(errors::unknown_block())),
|
||||
};
|
||||
|
||||
Either::B(execute_read_only_tx(
|
||||
gas_known,
|
||||
ExecuteParams {
|
||||
from,
|
||||
tx,
|
||||
hdr,
|
||||
env_info,
|
||||
engine: client.engine().clone(),
|
||||
on_demand,
|
||||
sync,
|
||||
},
|
||||
))
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
/// Helper to fetch the corpus gas price from 1) the cache 2) the network then it tries to estimate the percentile
|
||||
/// using `gas_price_percentile` if the estimated percentile is zero the `DEFAULT_GAS_PRICE` is returned
|
||||
pub fn gas_price(&self) -> impl Future<Item = U256, Error = Error> + Send {
|
||||
let gas_price_percentile = self.gas_price_percentile;
|
||||
|
||||
dispatch::light::fetch_gas_price_corpus(
|
||||
self.sync.clone(),
|
||||
self.client.clone(),
|
||||
self.on_demand.clone(),
|
||||
self.cache.clone(),
|
||||
)
|
||||
.map(move |corp| {
|
||||
corp.percentile(gas_price_percentile)
|
||||
.map_or_else(|| DEFAULT_GAS_PRICE.into(), |percentile| *percentile)
|
||||
})
|
||||
}
|
||||
|
||||
/// Get a block itself. Fails on unknown block ID.
|
||||
pub fn block(&self, id: BlockId) -> impl Future<Item = encoded::Block, Error = Error> + Send {
|
||||
let mut reqs = Vec::new();
|
||||
let header_ref = match self.make_header_requests(id, &mut reqs) {
|
||||
Ok(r) => r,
|
||||
Err(e) => return Either::A(future::err(e)),
|
||||
};
|
||||
|
||||
reqs.push(request::Body(header_ref).into());
|
||||
|
||||
Either::B(self.send_requests(reqs, |mut res| match res.pop() {
|
||||
Some(OnDemandResponse::Body(b)) => b,
|
||||
_ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Get the block receipts. Fails on unknown block ID.
|
||||
pub fn receipts(&self, id: BlockId) -> impl Future<Item = Vec<Receipt>, Error = Error> + Send {
|
||||
let mut reqs = Vec::new();
|
||||
let header_ref = match self.make_header_requests(id, &mut reqs) {
|
||||
Ok(r) => r,
|
||||
Err(e) => return Either::A(future::err(e)),
|
||||
};
|
||||
|
||||
reqs.push(request::BlockReceipts(header_ref).into());
|
||||
|
||||
Either::B(self.send_requests(reqs, |mut res| match res.pop() {
|
||||
Some(OnDemandResponse::Receipts(b)) => b,
|
||||
_ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF),
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn logs_no_tx_hash(
|
||||
&self,
|
||||
filter: EthcoreFilter,
|
||||
) -> impl Future<Item = Vec<Log>, Error = Error> + Send {
|
||||
use jsonrpc_core::futures::stream::{self, Stream};
|
||||
|
||||
const MAX_BLOCK_RANGE: u64 = 1000;
|
||||
|
||||
let fetcher = self.clone();
|
||||
self.headers_range_by_block_id(filter.from_block, filter.to_block, MAX_BLOCK_RANGE)
|
||||
.and_then(move |mut headers| {
|
||||
if headers.is_empty() {
|
||||
return Either::A(future::ok(Vec::new()));
|
||||
}
|
||||
|
||||
let on_demand = &fetcher.on_demand;
|
||||
|
||||
let maybe_future = fetcher.sync.with_context(move |ctx| {
|
||||
// find all headers which match the filter, and fetch the receipts for each one.
|
||||
// match them with their numbers for easy sorting later.
|
||||
let bit_combos = filter.bloom_possibilities();
|
||||
let receipts_futures: Vec<_> = headers
|
||||
.drain(..)
|
||||
.filter(|ref hdr| {
|
||||
let hdr_bloom = hdr.log_bloom();
|
||||
bit_combos
|
||||
.iter()
|
||||
.any(|bloom| hdr_bloom.contains_bloom(bloom))
|
||||
})
|
||||
.map(|hdr| (hdr.number(), hdr.hash(), request::BlockReceipts(hdr.into())))
|
||||
.map(|(num, hash, req)| {
|
||||
on_demand
|
||||
.request(ctx, req)
|
||||
.expect(NO_INVALID_BACK_REFS_PROOF)
|
||||
.map(move |x| (num, hash, x))
|
||||
})
|
||||
.collect();
|
||||
|
||||
// as the receipts come in, find logs within them which match the filter.
|
||||
// insert them into a BTreeMap to maintain order by number and block index.
|
||||
stream::futures_unordered(receipts_futures)
|
||||
.fold(
|
||||
BTreeMap::new(),
|
||||
move |mut matches, (num, hash, receipts)| {
|
||||
let mut block_index: usize = 0;
|
||||
for (transaction_index, receipt) in receipts.into_iter().enumerate()
|
||||
{
|
||||
for (transaction_log_index, log) in
|
||||
receipt.logs.into_iter().enumerate()
|
||||
{
|
||||
if filter.matches(&log) {
|
||||
matches.insert(
|
||||
(num, block_index),
|
||||
Log {
|
||||
address: log.address,
|
||||
topics: log
|
||||
.topics
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect(),
|
||||
data: log.data.into(),
|
||||
block_hash: Some(hash),
|
||||
block_number: Some(num.into()),
|
||||
// No way to easily retrieve transaction hash, so let's just skip it.
|
||||
transaction_hash: None,
|
||||
transaction_index: Some(
|
||||
transaction_index.into(),
|
||||
),
|
||||
log_index: Some(block_index.into()),
|
||||
transaction_log_index: Some(
|
||||
transaction_log_index.into(),
|
||||
),
|
||||
log_type: "mined".into(),
|
||||
removed: false,
|
||||
},
|
||||
);
|
||||
}
|
||||
block_index += 1;
|
||||
}
|
||||
}
|
||||
future::ok::<_, OnDemandError>(matches)
|
||||
},
|
||||
)
|
||||
.map_err(errors::on_demand_error)
|
||||
.map(|matches| matches.into_iter().map(|(_, v)| v).collect())
|
||||
});
|
||||
|
||||
match maybe_future {
|
||||
Some(fut) => Either::B(Either::A(fut)),
|
||||
None => Either::B(Either::B(future::err(errors::network_disabled()))),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Get transaction logs
|
||||
pub fn logs(
|
||||
&self,
|
||||
filter: EthcoreFilter,
|
||||
) -> impl Future<Item = Vec<Log>, Error = Error> + Send {
|
||||
use jsonrpc_core::futures::stream::{self, Stream};
|
||||
let fetcher_block = self.clone();
|
||||
self.logs_no_tx_hash(filter)
|
||||
// retrieve transaction hash.
|
||||
.and_then(move |mut result| {
|
||||
let mut blocks = BTreeMap::new();
|
||||
for log in result.iter() {
|
||||
let block_hash = log
|
||||
.block_hash
|
||||
.as_ref()
|
||||
.expect("Previously initialized with value; qed");
|
||||
blocks
|
||||
.entry(*block_hash)
|
||||
.or_insert_with(|| fetcher_block.block(BlockId::Hash(*block_hash)));
|
||||
}
|
||||
// future get blocks (unordered it)
|
||||
stream::futures_unordered(blocks.into_iter().map(|(_, v)| v))
|
||||
.collect()
|
||||
.map(move |blocks| {
|
||||
let transactions_per_block: BTreeMap<_, _> = blocks
|
||||
.iter()
|
||||
.map(|block| (block.hash(), block.transactions()))
|
||||
.collect();
|
||||
for log in result.iter_mut() {
|
||||
let log_index = log
|
||||
.transaction_index
|
||||
.expect("Previously initialized with value; qed");
|
||||
let block_hash = log
|
||||
.block_hash
|
||||
.expect("Previously initialized with value; qed");
|
||||
let tx_hash = transactions_per_block
|
||||
.get(&block_hash)
|
||||
// transaction index is from an enumerate call in log common so not need to check value
|
||||
.and_then(|txs| txs.get(log_index.as_usize()))
|
||||
.map(types::transaction::UnverifiedTransaction::hash);
|
||||
log.transaction_hash = tx_hash;
|
||||
}
|
||||
result
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Get a transaction by hash. also returns the index in the block.
|
||||
// Only returns transactions in the canonical chain.
|
||||
pub fn transaction_by_hash(
|
||||
&self,
|
||||
tx_hash: H256,
|
||||
) -> impl Future<Item = Option<(Transaction, usize)>, Error = Error> + Send {
|
||||
let params = (self.sync.clone(), self.on_demand.clone());
|
||||
let fetcher: Self = self.clone();
|
||||
|
||||
Box::new(future::loop_fn(params, move |(sync, on_demand)| {
|
||||
let maybe_future = sync.with_context(|ctx| {
|
||||
let req = request::TransactionIndex(tx_hash.into());
|
||||
on_demand.request(ctx, req)
|
||||
});
|
||||
|
||||
let eventual_index = match maybe_future {
|
||||
Some(e) => e
|
||||
.expect(NO_INVALID_BACK_REFS_PROOF)
|
||||
.map_err(errors::on_demand_error),
|
||||
None => return Either::A(future::err(errors::network_disabled())),
|
||||
};
|
||||
|
||||
let fetcher = fetcher.clone();
|
||||
let extract_transaction = eventual_index.and_then(move |index| {
|
||||
// check that the block is known by number.
|
||||
// that ensures that it is within the chain that we are aware of.
|
||||
fetcher
|
||||
.block(BlockId::Number(index.num))
|
||||
.then(move |blk| match blk {
|
||||
Ok(blk) => {
|
||||
// if the block is known by number, make sure the
|
||||
// index from earlier isn't garbage.
|
||||
|
||||
if blk.hash() != index.hash {
|
||||
// index is on a different chain from us.
|
||||
return Ok(future::Loop::Continue((sync, on_demand)));
|
||||
}
|
||||
|
||||
let index = index.index as usize;
|
||||
let transaction = extract_transaction_at_index(blk, index);
|
||||
|
||||
if transaction.as_ref().map_or(true, |tx| tx.hash != tx_hash) {
|
||||
// index is actively wrong: indicated block has
|
||||
// fewer transactions than necessary or the transaction
|
||||
// at that index had a different hash.
|
||||
// TODO: punish peer/move into OnDemand somehow?
|
||||
Ok(future::Loop::Continue((sync, on_demand)))
|
||||
} else {
|
||||
let transaction = transaction.map(move |tx| (tx, index));
|
||||
Ok(future::Loop::Break(transaction))
|
||||
}
|
||||
}
|
||||
Err(ref e) if e == &errors::unknown_block() => {
|
||||
// block by number not in the canonical chain.
|
||||
Ok(future::Loop::Break(None))
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
})
|
||||
});
|
||||
|
||||
Either::B(extract_transaction)
|
||||
}))
|
||||
}
|
||||
|
||||
/// Helper to cull the `light` transaction queue of mined transactions
|
||||
pub fn light_cull(
|
||||
&self,
|
||||
txq: Arc<RwLock<TransactionQueue>>,
|
||||
) -> impl Future<Item = (), Error = Error> + Send {
|
||||
let senders = txq.read().queued_senders();
|
||||
if senders.is_empty() {
|
||||
return Either::B(future::err(errors::internal(
|
||||
"No pending local transactions",
|
||||
"",
|
||||
)));
|
||||
}
|
||||
|
||||
let sync = self.sync.clone();
|
||||
let on_demand = self.on_demand.clone();
|
||||
let best_header = self.client.best_block_header();
|
||||
let start_nonce = self
|
||||
.client
|
||||
.engine()
|
||||
.account_start_nonce(best_header.number());
|
||||
|
||||
let account_request = sync.with_context(move |ctx| {
|
||||
// fetch the nonce of each sender in the queue.
|
||||
let nonce_reqs = senders
|
||||
.iter()
|
||||
.map(|&address| request::Account {
|
||||
header: best_header.clone().into(),
|
||||
address,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// when they come in, update each sender to the new nonce.
|
||||
on_demand
|
||||
.request(ctx, nonce_reqs)
|
||||
.expect(NO_INVALID_BACK_REFS_PROOF)
|
||||
.map(move |accs| {
|
||||
let mut txq = txq.write();
|
||||
accs.into_iter()
|
||||
.map(|maybe_acc| maybe_acc.map_or(start_nonce, |acc| acc.nonce))
|
||||
.zip(senders)
|
||||
.for_each(|(nonce, addr)| {
|
||||
txq.cull(addr, nonce);
|
||||
});
|
||||
})
|
||||
.map_err(errors::on_demand_error)
|
||||
});
|
||||
|
||||
if let Some(fut) = account_request {
|
||||
Either::A(fut)
|
||||
} else {
|
||||
Either::B(future::err(errors::network_disabled()))
|
||||
}
|
||||
}
|
||||
|
||||
fn send_requests<T, F>(
|
||||
&self,
|
||||
reqs: Vec<OnDemandRequest>,
|
||||
parse_response: F,
|
||||
) -> impl Future<Item = T, Error = Error> + Send
|
||||
where
|
||||
F: FnOnce(Vec<OnDemandResponse>) -> T + Send + 'static,
|
||||
T: Send + 'static,
|
||||
{
|
||||
let maybe_future = self.sync.with_context(move |ctx| {
|
||||
Box::new(
|
||||
self.on_demand
|
||||
.request_raw(ctx, reqs)
|
||||
.expect(NO_INVALID_BACK_REFS_PROOF)
|
||||
.map_err(errors::on_demand_cancel)
|
||||
.and_then(|responses| match responses {
|
||||
Ok(responses) => Ok(parse_response(responses)),
|
||||
Err(e) => Err(errors::on_demand_error(e)),
|
||||
}),
|
||||
)
|
||||
});
|
||||
|
||||
match maybe_future {
|
||||
Some(recv) => recv,
|
||||
None => Box::new(future::err(errors::network_disabled()))
|
||||
as Box<dyn Future<Item = _, Error = _> + Send>,
|
||||
}
|
||||
}
|
||||
|
||||
fn headers_range_by_block_id(
|
||||
&self,
|
||||
from_block: BlockId,
|
||||
to_block: BlockId,
|
||||
max: u64,
|
||||
) -> impl Future<Item = Vec<encoded::Header>, Error = Error> {
|
||||
let fetch_hashes = [from_block, to_block]
|
||||
.iter()
|
||||
.filter_map(|block_id| match block_id {
|
||||
BlockId::Hash(hash) => Some(*hash),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let best_number = self.client.chain_info().best_block_number;
|
||||
|
||||
let fetcher = self.clone();
|
||||
self.headers_by_hash(&fetch_hashes[..]).and_then(move |mut header_map| {
|
||||
let (from_block_num, to_block_num) = {
|
||||
let block_number = |id| match id {
|
||||
BlockId::Earliest => 0,
|
||||
BlockId::Latest => best_number,
|
||||
BlockId::Hash(ref h) =>
|
||||
header_map.get(h).map(types::encoded::Header::number)
|
||||
.expect("from_block and to_block headers are fetched by hash; this closure is only called on from_block and to_block; qed"),
|
||||
BlockId::Number(x) => x,
|
||||
};
|
||||
(block_number(from_block), block_number(to_block))
|
||||
};
|
||||
|
||||
if to_block_num < from_block_num {
|
||||
// early exit for "to" block before "from" block.
|
||||
return Either::A(future::err(errors::filter_block_not_found(to_block)));
|
||||
} else if to_block_num - from_block_num >= max {
|
||||
return Either::A(future::err(errors::request_rejected_param_limit(max, "blocks")));
|
||||
}
|
||||
|
||||
let to_header_hint = match to_block {
|
||||
BlockId::Hash(ref h) => header_map.remove(h),
|
||||
_ => None,
|
||||
};
|
||||
let headers_fut = fetcher.headers_range(from_block_num, to_block_num, to_header_hint);
|
||||
Either::B(headers_fut.map(move |headers| {
|
||||
// Validate from_block if it's a hash
|
||||
let last_hash = headers.last().map(types::encoded::Header::hash);
|
||||
match (last_hash, from_block) {
|
||||
(Some(h1), BlockId::Hash(h2)) if h1 != h2 => Vec::new(),
|
||||
_ => headers,
|
||||
}
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
fn headers_by_hash(
|
||||
&self,
|
||||
hashes: &[H256],
|
||||
) -> impl Future<Item = H256FastMap<encoded::Header>, Error = Error> {
|
||||
let mut refs = H256FastMap::with_capacity_and_hasher(hashes.len(), Default::default());
|
||||
let mut reqs = Vec::with_capacity(hashes.len());
|
||||
|
||||
for hash in hashes {
|
||||
refs.entry(*hash).or_insert_with(|| {
|
||||
self.make_header_requests(BlockId::Hash(*hash), &mut reqs)
|
||||
.expect("make_header_requests never fails for BlockId::Hash; qed")
|
||||
});
|
||||
}
|
||||
|
||||
self.send_requests(reqs, move |res| {
|
||||
refs.into_iter()
|
||||
.map(|(hash, header_ref)| {
|
||||
let hdr = extract_header(&res, header_ref).expect(
|
||||
"these responses correspond to requests that header_ref belongs to; \
|
||||
qed",
|
||||
);
|
||||
(hash, hdr)
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
fn headers_range(
|
||||
&self,
|
||||
from_number: u64,
|
||||
to_number: u64,
|
||||
to_header_hint: Option<encoded::Header>,
|
||||
) -> impl Future<Item = Vec<encoded::Header>, Error = Error> {
|
||||
let range_length = (to_number - from_number + 1) as usize;
|
||||
let mut headers: Vec<encoded::Header> = Vec::with_capacity(range_length);
|
||||
|
||||
let iter_start = match to_header_hint {
|
||||
Some(hdr) => {
|
||||
let block_id = BlockId::Hash(hdr.parent_hash());
|
||||
headers.push(hdr);
|
||||
block_id
|
||||
}
|
||||
None => BlockId::Number(to_number),
|
||||
};
|
||||
headers.extend(
|
||||
self.client
|
||||
.ancestry_iter(iter_start)
|
||||
.take_while(|hdr| hdr.number() >= from_number),
|
||||
);
|
||||
|
||||
let fetcher = self.clone();
|
||||
future::loop_fn(headers, move |mut headers| {
|
||||
let remaining = range_length - headers.len();
|
||||
if remaining == 0 {
|
||||
return Either::A(future::ok(future::Loop::Break(headers)));
|
||||
}
|
||||
|
||||
let mut reqs: Vec<request::Request> = Vec::with_capacity(2);
|
||||
|
||||
let start_hash = if let Some(hdr) = headers.last() {
|
||||
hdr.parent_hash().into()
|
||||
} else {
|
||||
let cht_root = cht::block_to_cht_number(to_number)
|
||||
.and_then(|cht_num| fetcher.client.cht_root(cht_num as usize));
|
||||
|
||||
let cht_root = match cht_root {
|
||||
Some(cht_root) => cht_root,
|
||||
None => return Either::A(future::err(errors::unknown_block())),
|
||||
};
|
||||
|
||||
let header_proof = request::HeaderProof::new(to_number, cht_root).expect(
|
||||
"HeaderProof::new is Some(_) if cht::block_to_cht_number() is Some(_); \
|
||||
this would return above if block_to_cht_number returned None; qed",
|
||||
);
|
||||
|
||||
let idx = reqs.len();
|
||||
let hash_ref = Field::back_ref(idx, 0);
|
||||
reqs.push(header_proof.into());
|
||||
|
||||
hash_ref
|
||||
};
|
||||
|
||||
let max = cmp::min(remaining as u64, MAX_HEADERS_PER_REQUEST);
|
||||
reqs.push(
|
||||
request::HeaderWithAncestors {
|
||||
block_hash: start_hash,
|
||||
ancestor_count: max - 1,
|
||||
}
|
||||
.into(),
|
||||
);
|
||||
|
||||
Either::B(fetcher.send_requests(reqs, |mut res| {
|
||||
match res.last_mut() {
|
||||
Some(&mut OnDemandResponse::HeaderWithAncestors(ref mut res_headers)) => {
|
||||
headers.extend(res_headers.drain(..))
|
||||
}
|
||||
_ => {
|
||||
panic!("reqs has at least one entry; each request maps to a response; qed")
|
||||
}
|
||||
};
|
||||
future::Loop::Continue(headers)
|
||||
}))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct ExecuteParams<S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
from: Address,
|
||||
tx: EthTransaction,
|
||||
hdr: encoded::Header,
|
||||
env_info: ::vm::EnvInfo,
|
||||
engine: Arc<dyn crate::ethcore::engines::EthEngine>,
|
||||
on_demand: Arc<OD>,
|
||||
sync: Arc<S>,
|
||||
}
|
||||
|
||||
impl<S, OD> Clone for ExecuteParams<S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
from: self.from,
|
||||
tx: self.tx.clone(),
|
||||
hdr: self.hdr.clone(),
|
||||
env_info: self.env_info.clone(),
|
||||
engine: self.engine.clone(),
|
||||
on_demand: self.on_demand.clone(),
|
||||
sync: self.sync.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Has a peer execute the transaction with given params. If `gas_known` is false, this will set the `gas value` to the
|
||||
// `required gas value` unless it exceeds the block gas limit
|
||||
fn execute_read_only_tx<S, OD>(
|
||||
gas_known: bool,
|
||||
params: ExecuteParams<S, OD>,
|
||||
) -> impl Future<Item = ExecutionResult, Error = Error> + Send
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
if !gas_known {
|
||||
Box::new(future::loop_fn(params, |mut params| {
|
||||
execute_read_only_tx(true, params.clone()).and_then(move |res| {
|
||||
match res {
|
||||
Ok(executed) => {
|
||||
// `OutOfGas` exception, try double the gas
|
||||
if let Some(::vm::Error::OutOfGas) = executed.exception {
|
||||
// block gas limit already tried, regard as an error and don't retry
|
||||
if params.tx.gas >= params.hdr.gas_limit() {
|
||||
trace!(target: "light_fetch", "OutOutGas exception received, gas increase: failed");
|
||||
} else {
|
||||
params.tx.gas = cmp::min(params.tx.gas * 2_u32, params.hdr.gas_limit());
|
||||
trace!(target: "light_fetch", "OutOutGas exception received, gas increased to {}",
|
||||
params.tx.gas);
|
||||
return Ok(future::Loop::Continue(params))
|
||||
}
|
||||
}
|
||||
Ok(future::Loop::Break(Ok(executed)))
|
||||
}
|
||||
Err(ExecutionError::NotEnoughBaseGas { required, got }) => {
|
||||
trace!(target: "light_fetch", "Not enough start gas provided required: {}, got: {}",
|
||||
required, got);
|
||||
if required <= params.hdr.gas_limit() {
|
||||
params.tx.gas = required;
|
||||
Ok(future::Loop::Continue(params))
|
||||
} else {
|
||||
warn!(target: "light_fetch",
|
||||
"Required gas is bigger than block header's gas dropping the request");
|
||||
Ok(future::Loop::Break(Err(ExecutionError::NotEnoughBaseGas { required, got })))
|
||||
}
|
||||
}
|
||||
// Non-recoverable execution error
|
||||
failed => Ok(future::Loop::Break(failed)),
|
||||
}
|
||||
})
|
||||
})) as Box<dyn Future<Item = _, Error = _> + Send>
|
||||
} else {
|
||||
trace!(target: "light_fetch", "Placing execution request for {} gas in on_demand",
|
||||
params.tx.gas);
|
||||
|
||||
let request = request::TransactionProof {
|
||||
tx: params.tx.fake_sign(params.from),
|
||||
header: params.hdr.into(),
|
||||
env_info: params.env_info,
|
||||
engine: params.engine,
|
||||
};
|
||||
|
||||
let on_demand = params.on_demand;
|
||||
let proved_future = params.sync.with_context(move |ctx| {
|
||||
on_demand
|
||||
.request(ctx, request)
|
||||
.expect("no back-references; therefore all back-refs valid; qed")
|
||||
.map_err(errors::on_demand_error)
|
||||
});
|
||||
|
||||
match proved_future {
|
||||
Some(fut) => Box::new(fut) as Box<dyn Future<Item = _, Error = _> + Send>,
|
||||
None => Box::new(future::err(errors::network_disabled()))
|
||||
as Box<dyn Future<Item = _, Error = _> + Send>,
|
||||
}
|
||||
}
|
||||
}
|
@ -27,7 +27,6 @@ pub mod engine_signer;
|
||||
pub mod external_signer;
|
||||
pub mod fake_sign;
|
||||
pub mod ipfs;
|
||||
pub mod light_fetch;
|
||||
pub mod nonce;
|
||||
#[cfg(any(test, feature = "accounts"))]
|
||||
pub mod secretstore;
|
||||
@ -42,7 +41,7 @@ mod subscription_manager;
|
||||
mod work;
|
||||
|
||||
pub use self::{
|
||||
dispatch::{Dispatcher, FullDispatcher, LightDispatcher},
|
||||
dispatch::{Dispatcher, FullDispatcher},
|
||||
network_settings::NetworkSettings,
|
||||
poll_filter::{limit_logs, PollFilter, SyncPollFilter},
|
||||
poll_manager::PollManager,
|
||||
|
@ -155,36 +155,6 @@ enum PendingTransactionId {
|
||||
Location(PendingOrBlock, usize),
|
||||
}
|
||||
|
||||
pub fn base_logs<C, M, T: StateInfo + 'static>(
|
||||
client: &C,
|
||||
miner: &M,
|
||||
filter: Filter,
|
||||
) -> BoxFuture<Vec<Log>>
|
||||
where
|
||||
C: miner::BlockChainClient + BlockChainClient + StateClient<State = T> + Call<State = T>,
|
||||
M: MinerService<State = T>,
|
||||
{
|
||||
let include_pending = filter.to_block == Some(BlockNumber::Pending);
|
||||
let filter: EthcoreFilter = match filter.try_into() {
|
||||
Ok(value) => value,
|
||||
Err(err) => return Box::new(future::err(err)),
|
||||
};
|
||||
let mut logs = match client.logs(filter.clone()) {
|
||||
Ok(logs) => logs.into_iter().map(From::from).collect::<Vec<Log>>(),
|
||||
Err(id) => return Box::new(future::err(errors::filter_block_not_found(id))),
|
||||
};
|
||||
|
||||
if include_pending {
|
||||
let best_block = client.chain_info().best_block_number;
|
||||
let pending = pending_logs(&*miner, best_block, &filter);
|
||||
logs.extend(pending);
|
||||
}
|
||||
|
||||
let logs = limit_logs(logs, filter.limit);
|
||||
|
||||
Box::new(future::ok(logs))
|
||||
}
|
||||
|
||||
impl<C, SN: ?Sized, S: ?Sized, M, EM, T: StateInfo + 'static> EthClient<C, SN, S, M, EM>
|
||||
where
|
||||
C: miner::BlockChainClient
|
||||
@ -1009,7 +979,25 @@ where
|
||||
}
|
||||
|
||||
fn logs(&self, filter: Filter) -> BoxFuture<Vec<Log>> {
|
||||
base_logs(&*self.client, &*self.miner, filter)
|
||||
let include_pending = filter.to_block == Some(BlockNumber::Pending);
|
||||
let filter: EthcoreFilter = match filter.try_into() {
|
||||
Ok(value) => value,
|
||||
Err(err) => return Box::new(future::err(err)),
|
||||
};
|
||||
let mut logs = match self.client.logs(filter.clone()) {
|
||||
Ok(logs) => logs.into_iter().map(From::from).collect::<Vec<Log>>(),
|
||||
Err(id) => return Box::new(future::err(errors::filter_block_not_found(id))),
|
||||
};
|
||||
|
||||
if include_pending {
|
||||
let best_block = self.client.chain_info().best_block_number;
|
||||
let pending = pending_logs(&*self.miner, best_block, &filter);
|
||||
logs.extend(pending);
|
||||
}
|
||||
|
||||
let logs = limit_logs(logs, filter.limit);
|
||||
|
||||
Box::new(future::ok(logs))
|
||||
}
|
||||
|
||||
fn work(&self, no_new_work_timeout: Option<u64>) -> Result<Work> {
|
||||
|
@ -23,7 +23,7 @@ use std::{
|
||||
|
||||
use jsonrpc_core::{
|
||||
futures::{self, Future, IntoFuture},
|
||||
BoxFuture, Error, Result,
|
||||
Error, Result,
|
||||
};
|
||||
use jsonrpc_pubsub::{
|
||||
typed::{Sink, Subscriber},
|
||||
@ -31,7 +31,7 @@ use jsonrpc_pubsub::{
|
||||
};
|
||||
|
||||
use v1::{
|
||||
helpers::{errors, light_fetch::LightFetch, limit_logs, Subscribers},
|
||||
helpers::{errors, limit_logs, Subscribers},
|
||||
metadata::Metadata,
|
||||
traits::EthPubSub,
|
||||
types::{pubsub, Log, RichHeader},
|
||||
@ -39,15 +39,8 @@ use v1::{
|
||||
|
||||
use ethcore::client::{BlockChainClient, BlockId, ChainNotify, ChainRouteType, NewBlocks};
|
||||
use ethereum_types::H256;
|
||||
use light::{
|
||||
cache::Cache,
|
||||
client::{LightChainClient, LightChainNotify},
|
||||
on_demand::OnDemandRequester,
|
||||
};
|
||||
use parity_runtime::Executor;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
|
||||
use sync::{LightNetworkDispatcher, LightSyncProvider, ManageNetwork};
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use types::{encoded, filter::Filter as EthFilter};
|
||||
|
||||
@ -98,31 +91,6 @@ impl<C> EthPubSubClient<C> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, OD> EthPubSubClient<LightFetch<S, OD>>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
/// Creates a new `EthPubSubClient` for `LightClient`.
|
||||
pub fn light(
|
||||
client: Arc<dyn LightChainClient>,
|
||||
on_demand: Arc<OD>,
|
||||
sync: Arc<S>,
|
||||
cache: Arc<Mutex<Cache>>,
|
||||
executor: Executor,
|
||||
gas_price_percentile: usize,
|
||||
) -> Self {
|
||||
let fetch = LightFetch {
|
||||
client,
|
||||
on_demand,
|
||||
sync,
|
||||
cache,
|
||||
gas_price_percentile,
|
||||
};
|
||||
EthPubSubClient::new(Arc::new(fetch), executor)
|
||||
}
|
||||
}
|
||||
|
||||
/// PubSub Notification handler.
|
||||
pub struct ChainNotificationHandler<C> {
|
||||
client: Arc<C>,
|
||||
@ -206,45 +174,6 @@ impl<C> ChainNotificationHandler<C> {
|
||||
}
|
||||
}
|
||||
|
||||
/// A light client wrapper struct.
|
||||
pub trait LightClient: Send + Sync {
|
||||
/// Get a recent block header.
|
||||
fn block_header(&self, id: BlockId) -> Option<encoded::Header>;
|
||||
|
||||
/// Fetch logs.
|
||||
fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>>;
|
||||
}
|
||||
|
||||
impl<S, OD> LightClient for LightFetch<S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
|
||||
self.client.block_header(id)
|
||||
}
|
||||
|
||||
fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>> {
|
||||
Box::new(LightFetch::logs(self, filter)) as BoxFuture<_>
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: LightClient> LightChainNotify for ChainNotificationHandler<C> {
|
||||
fn new_headers(&self, enacted: &[H256]) {
|
||||
let headers = enacted
|
||||
.iter()
|
||||
.filter_map(|hash| self.client.block_header(BlockId::Hash(*hash)))
|
||||
.map(|header| (header, Default::default()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.notify_heads(&headers);
|
||||
self.notify_logs(
|
||||
&enacted.iter().map(|h| (*h, ())).collect::<Vec<_>>(),
|
||||
|filter, _| self.client.logs(filter),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> {
|
||||
fn new_blocks(&self, new_blocks: NewBlocks) {
|
||||
if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() {
|
||||
|
@ -1,744 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Eth RPC interface for the light client.
|
||||
|
||||
use std::{collections::BTreeSet, sync::Arc};
|
||||
|
||||
use jsonrpc_core::{
|
||||
futures::{future, future::Either, Future},
|
||||
BoxFuture, Result,
|
||||
};
|
||||
|
||||
use light::{
|
||||
cache::Cache as LightDataCache,
|
||||
cht,
|
||||
client::LightChainClient,
|
||||
on_demand::{request, OnDemandRequester},
|
||||
TransactionQueue,
|
||||
};
|
||||
|
||||
use ethereum_types::{Address, H160, H256, H64, U256, U64};
|
||||
use hash::{KECCAK_EMPTY_LIST_RLP, KECCAK_NULL_RLP};
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use rlp::Rlp;
|
||||
use types::{
|
||||
encoded, filter::Filter as EthcoreFilter, ids::BlockId, transaction::SignedTransaction,
|
||||
};
|
||||
|
||||
use v1::{
|
||||
helpers::{
|
||||
deprecated::{self, DeprecationNotice},
|
||||
errors,
|
||||
light_fetch::{self, LightFetch},
|
||||
limit_logs, PollManager, SyncPollFilter,
|
||||
},
|
||||
impls::eth_filter::Filterable,
|
||||
metadata::Metadata,
|
||||
traits::Eth,
|
||||
types::{
|
||||
Block, BlockNumber, BlockTransactions, Bytes, CallRequest, EthAccount, Filter, Index,
|
||||
LightBlockNumber, Log, Receipt, RichBlock, SyncInfo as RpcSyncInfo,
|
||||
SyncStatus as RpcSyncStatus, Transaction, Work,
|
||||
},
|
||||
};
|
||||
|
||||
use sync::{LightNetworkDispatcher, LightSyncInfo, LightSyncProvider, ManageNetwork};
|
||||
|
||||
const NO_INVALID_BACK_REFS: &str =
|
||||
"Fails only on invalid back-references; back-references here known to be valid; qed";
|
||||
|
||||
/// Light client `ETH` (and filter) RPC.
|
||||
pub struct EthClient<
|
||||
C,
|
||||
S: LightSyncProvider + LightNetworkDispatcher + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
> {
|
||||
sync: Arc<S>,
|
||||
client: Arc<C>,
|
||||
on_demand: Arc<OD>,
|
||||
transaction_queue: Arc<RwLock<TransactionQueue>>,
|
||||
accounts: Arc<dyn Fn() -> Vec<Address> + Send + Sync>,
|
||||
cache: Arc<Mutex<LightDataCache>>,
|
||||
polls: Mutex<PollManager<SyncPollFilter>>,
|
||||
poll_lifetime: u32,
|
||||
gas_price_percentile: usize,
|
||||
deprecation_notice: DeprecationNotice,
|
||||
}
|
||||
|
||||
impl<C, S, OD> Clone for EthClient<C, S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
// each instance should have its own poll manager.
|
||||
EthClient {
|
||||
sync: self.sync.clone(),
|
||||
client: self.client.clone(),
|
||||
on_demand: self.on_demand.clone(),
|
||||
transaction_queue: self.transaction_queue.clone(),
|
||||
accounts: self.accounts.clone(),
|
||||
cache: self.cache.clone(),
|
||||
polls: Mutex::new(PollManager::new(self.poll_lifetime)),
|
||||
poll_lifetime: self.poll_lifetime,
|
||||
gas_price_percentile: self.gas_price_percentile,
|
||||
deprecation_notice: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<C, S, OD> EthClient<C, S, OD>
|
||||
where
|
||||
C: LightChainClient + 'static,
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
/// Create a new `EthClient` with a handle to the light sync instance, client,
|
||||
/// and on-demand request service, which is assumed to be attached as a handler.
|
||||
pub fn new(
|
||||
sync: Arc<S>,
|
||||
client: Arc<C>,
|
||||
on_demand: Arc<OD>,
|
||||
transaction_queue: Arc<RwLock<TransactionQueue>>,
|
||||
accounts: Arc<dyn Fn() -> Vec<Address> + Send + Sync>,
|
||||
cache: Arc<Mutex<LightDataCache>>,
|
||||
gas_price_percentile: usize,
|
||||
poll_lifetime: u32,
|
||||
) -> Self {
|
||||
EthClient {
|
||||
sync,
|
||||
client,
|
||||
on_demand,
|
||||
transaction_queue,
|
||||
accounts,
|
||||
cache,
|
||||
polls: Mutex::new(PollManager::new(poll_lifetime)),
|
||||
poll_lifetime,
|
||||
gas_price_percentile,
|
||||
deprecation_notice: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a light data fetcher instance.
|
||||
fn fetcher(&self) -> LightFetch<S, OD> {
|
||||
LightFetch {
|
||||
client: self.client.clone(),
|
||||
on_demand: self.on_demand.clone(),
|
||||
sync: self.sync.clone(),
|
||||
cache: self.cache.clone(),
|
||||
gas_price_percentile: self.gas_price_percentile,
|
||||
}
|
||||
}
|
||||
|
||||
// get a "rich" block structure. Fails on unknown block.
|
||||
fn rich_block(&self, id: BlockId, include_txs: bool) -> BoxFuture<RichBlock> {
|
||||
let (on_demand, sync) = (self.on_demand.clone(), self.sync.clone());
|
||||
let (client, engine) = (self.client.clone(), self.client.engine().clone());
|
||||
|
||||
// helper for filling out a rich block once we've got a block and a score.
|
||||
let fill_rich = move |block: encoded::Block, score: Option<U256>| {
|
||||
let header = block.decode_header();
|
||||
let extra_info = engine.extra_info(&header);
|
||||
RichBlock {
|
||||
inner: Block {
|
||||
hash: Some(header.hash()),
|
||||
size: Some(block.rlp().as_raw().len().into()),
|
||||
parent_hash: *header.parent_hash(),
|
||||
uncles_hash: *header.uncles_hash(),
|
||||
author: *header.author(),
|
||||
miner: *header.author(),
|
||||
state_root: *header.state_root(),
|
||||
transactions_root: *header.transactions_root(),
|
||||
receipts_root: *header.receipts_root(),
|
||||
number: Some(header.number().into()),
|
||||
gas_used: *header.gas_used(),
|
||||
gas_limit: *header.gas_limit(),
|
||||
logs_bloom: Some(*header.log_bloom()),
|
||||
timestamp: header.timestamp().into(),
|
||||
difficulty: *header.difficulty(),
|
||||
total_difficulty: score.map(Into::into),
|
||||
seal_fields: header.seal().iter().cloned().map(Into::into).collect(),
|
||||
uncles: block.uncle_hashes().into_iter().map(Into::into).collect(),
|
||||
transactions: match include_txs {
|
||||
true => BlockTransactions::Full(
|
||||
block
|
||||
.view()
|
||||
.localized_transactions()
|
||||
.into_iter()
|
||||
.map(Transaction::from_localized)
|
||||
.collect(),
|
||||
),
|
||||
_ => BlockTransactions::Hashes(
|
||||
block
|
||||
.transaction_hashes()
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect(),
|
||||
),
|
||||
},
|
||||
extra_data: Bytes::new(header.extra_data().clone()),
|
||||
},
|
||||
extra_info,
|
||||
}
|
||||
};
|
||||
|
||||
// get the block itself.
|
||||
Box::new(self.fetcher().block(id).and_then(move |block| {
|
||||
// then fetch the total difficulty (this is much easier after getting the block).
|
||||
match client.score(id) {
|
||||
Some(score) => Either::A(future::ok(fill_rich(block, Some(score)))),
|
||||
None => {
|
||||
// make a CHT request to fetch the chain score.
|
||||
let req = cht::block_to_cht_number(block.number())
|
||||
.and_then(|num| client.cht_root(num as usize))
|
||||
.and_then(|root| request::HeaderProof::new(block.number(), root));
|
||||
|
||||
let req = match req {
|
||||
Some(req) => req,
|
||||
None => {
|
||||
// somehow the genesis block slipped past other checks.
|
||||
// return it now.
|
||||
let score = client
|
||||
.block_header(BlockId::Number(0))
|
||||
.expect("genesis always stored; qed")
|
||||
.difficulty();
|
||||
|
||||
return Either::A(future::ok(fill_rich(block, Some(score))));
|
||||
}
|
||||
};
|
||||
|
||||
// three possible outcomes:
|
||||
// - network is down.
|
||||
// - we get a score, but our hash is non-canonical.
|
||||
// - we get a score, and our hash is canonical.
|
||||
let maybe_fut = sync.with_context(move |ctx| {
|
||||
on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS)
|
||||
});
|
||||
match maybe_fut {
|
||||
Some(fut) => Either::B(
|
||||
fut.map(move |(hash, score)| {
|
||||
let score = if hash == block.hash() {
|
||||
Some(score)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
fill_rich(block, score)
|
||||
})
|
||||
.map_err(errors::on_demand_error),
|
||||
),
|
||||
None => Either::A(future::err(errors::network_disabled())),
|
||||
}
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl<C, S, OD> Eth for EthClient<C, S, OD>
|
||||
where
|
||||
C: LightChainClient + 'static,
|
||||
S: LightSyncInfo + LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
type Metadata = Metadata;
|
||||
|
||||
fn protocol_version(&self) -> Result<String> {
|
||||
Ok(format!("{}", ::light::net::MAX_PROTOCOL_VERSION))
|
||||
}
|
||||
|
||||
fn syncing(&self) -> Result<RpcSyncStatus> {
|
||||
if self.sync.is_major_importing() {
|
||||
let chain_info = self.client.chain_info();
|
||||
let current_block = U256::from(chain_info.best_block_number);
|
||||
let highest_block = self
|
||||
.sync
|
||||
.highest_block()
|
||||
.map(U256::from)
|
||||
.unwrap_or_else(|| current_block);
|
||||
|
||||
Ok(RpcSyncStatus::Info(RpcSyncInfo {
|
||||
starting_block: U256::from(self.sync.start_block()),
|
||||
current_block,
|
||||
highest_block,
|
||||
warp_chunks_amount: None,
|
||||
warp_chunks_processed: None,
|
||||
}))
|
||||
} else {
|
||||
Ok(RpcSyncStatus::None)
|
||||
}
|
||||
}
|
||||
|
||||
fn author(&self) -> Result<H160> {
|
||||
(self.accounts)()
|
||||
.first()
|
||||
.cloned()
|
||||
.map(From::from)
|
||||
.ok_or_else(|| errors::account("No accounts were found", ""))
|
||||
}
|
||||
|
||||
fn is_mining(&self) -> Result<bool> {
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
fn chain_id(&self) -> Result<Option<U64>> {
|
||||
Ok(self.client.signing_chain_id().map(U64::from))
|
||||
}
|
||||
|
||||
fn hashrate(&self) -> Result<U256> {
|
||||
Ok(Default::default())
|
||||
}
|
||||
|
||||
fn gas_price(&self) -> BoxFuture<U256> {
|
||||
Box::new(self.fetcher().gas_price())
|
||||
}
|
||||
|
||||
fn accounts(&self) -> Result<Vec<H160>> {
|
||||
self.deprecation_notice
|
||||
.print("eth_accounts", deprecated::msgs::ACCOUNTS);
|
||||
|
||||
Ok((self.accounts)().into_iter().map(Into::into).collect())
|
||||
}
|
||||
|
||||
fn block_number(&self) -> Result<U256> {
|
||||
Ok(self.client.chain_info().best_block_number.into())
|
||||
}
|
||||
|
||||
fn balance(&self, address: H160, num: Option<BlockNumber>) -> BoxFuture<U256> {
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.account(
|
||||
address,
|
||||
num.unwrap_or_default().to_block_id(),
|
||||
self.transaction_queue.clone(),
|
||||
)
|
||||
.map(|acc| acc.map_or(0.into(), |a| a.balance)),
|
||||
)
|
||||
}
|
||||
|
||||
fn storage_at(&self, _address: H160, _key: U256, _num: Option<BlockNumber>) -> BoxFuture<H256> {
|
||||
Box::new(future::err(errors::unimplemented(None)))
|
||||
}
|
||||
|
||||
fn block_by_hash(&self, hash: H256, include_txs: bool) -> BoxFuture<Option<RichBlock>> {
|
||||
Box::new(self.rich_block(BlockId::Hash(hash), include_txs).map(Some))
|
||||
}
|
||||
|
||||
fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture<Option<RichBlock>> {
|
||||
Box::new(self.rich_block(num.to_block_id(), include_txs).map(Some))
|
||||
}
|
||||
|
||||
fn transaction_count(&self, address: H160, num: Option<BlockNumber>) -> BoxFuture<U256> {
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.account(
|
||||
address,
|
||||
num.unwrap_or_default().to_block_id(),
|
||||
self.transaction_queue.clone(),
|
||||
)
|
||||
.map(|acc| acc.map_or(0.into(), |a| a.nonce)),
|
||||
)
|
||||
}
|
||||
|
||||
fn block_transaction_count_by_hash(&self, hash: H256) -> BoxFuture<Option<U256>> {
|
||||
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
||||
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.header(BlockId::Hash(hash))
|
||||
.and_then(move |hdr| {
|
||||
if hdr.transactions_root() == KECCAK_NULL_RLP {
|
||||
Either::A(future::ok(Some(U256::from(0))))
|
||||
} else {
|
||||
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
|
||||
.map(|x| x.expect(NO_INVALID_BACK_REFS))
|
||||
.map(|x| x.map(|b| Some(U256::from(b.transactions_count()))))
|
||||
.map(|x| Either::B(x.map_err(errors::on_demand_error)))
|
||||
.unwrap_or_else(|| Either::A(future::err(errors::network_disabled())))
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<U256>> {
|
||||
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
||||
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.header(num.to_block_id())
|
||||
.and_then(move |hdr| {
|
||||
if hdr.transactions_root() == KECCAK_NULL_RLP {
|
||||
Either::A(future::ok(Some(U256::from(0))))
|
||||
} else {
|
||||
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
|
||||
.map(|x| x.expect(NO_INVALID_BACK_REFS))
|
||||
.map(|x| x.map(|b| Some(U256::from(b.transactions_count()))))
|
||||
.map(|x| Either::B(x.map_err(errors::on_demand_error)))
|
||||
.unwrap_or_else(|| Either::A(future::err(errors::network_disabled())))
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
fn block_uncles_count_by_hash(&self, hash: H256) -> BoxFuture<Option<U256>> {
|
||||
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
||||
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.header(BlockId::Hash(hash))
|
||||
.and_then(move |hdr| {
|
||||
if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP {
|
||||
Either::A(future::ok(Some(U256::from(0))))
|
||||
} else {
|
||||
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
|
||||
.map(|x| x.expect(NO_INVALID_BACK_REFS))
|
||||
.map(|x| x.map(|b| Some(U256::from(b.uncles_count()))))
|
||||
.map(|x| Either::B(x.map_err(errors::on_demand_error)))
|
||||
.unwrap_or_else(|| Either::A(future::err(errors::network_disabled())))
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<U256>> {
|
||||
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
|
||||
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.header(num.to_block_id())
|
||||
.and_then(move |hdr| {
|
||||
if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP {
|
||||
Either::B(future::ok(Some(U256::from(0))))
|
||||
} else {
|
||||
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
|
||||
.map(|x| x.expect(NO_INVALID_BACK_REFS))
|
||||
.map(|x| x.map(|b| Some(U256::from(b.uncles_count()))))
|
||||
.map(|x| Either::A(x.map_err(errors::on_demand_error)))
|
||||
.unwrap_or_else(|| Either::B(future::err(errors::network_disabled())))
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
fn code_at(&self, address: H160, num: Option<BlockNumber>) -> BoxFuture<Bytes> {
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.code(address, num.unwrap_or_default().to_block_id())
|
||||
.map(Into::into),
|
||||
)
|
||||
}
|
||||
|
||||
fn send_raw_transaction(&self, raw: Bytes) -> Result<H256> {
|
||||
let best_header = self
|
||||
.client
|
||||
.best_block_header()
|
||||
.decode()
|
||||
.map_err(errors::decode)?;
|
||||
|
||||
Rlp::new(&raw.into_vec())
|
||||
.as_val()
|
||||
.map_err(errors::rlp)
|
||||
.and_then(|tx| {
|
||||
self.client
|
||||
.engine()
|
||||
.verify_transaction_basic(&tx, &best_header)
|
||||
.map_err(errors::transaction)?;
|
||||
|
||||
let signed = SignedTransaction::new(tx).map_err(errors::transaction)?;
|
||||
let hash = signed.hash();
|
||||
|
||||
self.transaction_queue
|
||||
.write()
|
||||
.import(signed.into())
|
||||
.map(|_| hash)
|
||||
.map_err(errors::transaction)
|
||||
})
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn submit_transaction(&self, raw: Bytes) -> Result<H256> {
|
||||
self.send_raw_transaction(raw)
|
||||
}
|
||||
|
||||
fn call(&self, req: CallRequest, num: Option<BlockNumber>) -> BoxFuture<Bytes> {
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.proved_read_only_execution(req, num, self.transaction_queue.clone())
|
||||
.and_then(|res| match res {
|
||||
Ok(exec) => Ok(exec.output.into()),
|
||||
Err(e) => Err(errors::execution(e)),
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
fn estimate_gas(&self, req: CallRequest, num: Option<BlockNumber>) -> BoxFuture<U256> {
|
||||
// TODO: binary chop for more accurate estimates.
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.proved_read_only_execution(req, num, self.transaction_queue.clone())
|
||||
.and_then(|res| match res {
|
||||
Ok(exec) => Ok(exec.refunded + exec.gas_used),
|
||||
Err(e) => Err(errors::execution(e)),
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
fn transaction_by_hash(&self, hash: H256) -> BoxFuture<Option<Transaction>> {
|
||||
let in_txqueue = self.transaction_queue.read().get(&hash).is_some();
|
||||
|
||||
// The transaction is in the `local txqueue` then fetch the latest state from the network and attempt
|
||||
// to cull the transaction queue.
|
||||
if in_txqueue {
|
||||
// Note, this will block (relies on HTTP timeout) to make sure `cull` will finish to avoid having to call
|
||||
// `eth_getTransactionByHash` more than once to ensure the `txqueue` is up to `date` when it is called
|
||||
if let Err(e) = self
|
||||
.fetcher()
|
||||
.light_cull(self.transaction_queue.clone())
|
||||
.wait()
|
||||
{
|
||||
debug!(target: "cull", "failed because of: {:?}", e);
|
||||
}
|
||||
if let Some(tx) = self.transaction_queue.read().get(&hash) {
|
||||
return Box::new(future::ok(Some(Transaction::from_pending(tx.clone()))));
|
||||
}
|
||||
}
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.transaction_by_hash(hash)
|
||||
.map(|x| x.map(|(tx, _)| tx)),
|
||||
)
|
||||
}
|
||||
|
||||
fn transaction_by_block_hash_and_index(
|
||||
&self,
|
||||
hash: H256,
|
||||
idx: Index,
|
||||
) -> BoxFuture<Option<Transaction>> {
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.block(BlockId::Hash(hash))
|
||||
.map(move |block| light_fetch::extract_transaction_at_index(block, idx.value())),
|
||||
)
|
||||
}
|
||||
|
||||
fn transaction_by_block_number_and_index(
|
||||
&self,
|
||||
num: BlockNumber,
|
||||
idx: Index,
|
||||
) -> BoxFuture<Option<Transaction>> {
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.block(num.to_block_id())
|
||||
.map(move |block| light_fetch::extract_transaction_at_index(block, idx.value())),
|
||||
)
|
||||
}
|
||||
|
||||
fn transaction_receipt(&self, hash: H256) -> BoxFuture<Option<Receipt>> {
|
||||
let fetcher = self.fetcher();
|
||||
Box::new(fetcher.transaction_by_hash(hash).and_then(move |tx| {
|
||||
// the block hash included in the transaction object here has
|
||||
// already been checked for canonicality and whether it contains
|
||||
// the transaction.
|
||||
match tx {
|
||||
Some((tx, index)) => match tx.block_hash {
|
||||
Some(block_hash) => {
|
||||
let extract_receipt = fetcher
|
||||
.receipts(BlockId::Hash(block_hash))
|
||||
.and_then(move |mut receipts| future::ok(receipts.swap_remove(index)))
|
||||
.map(Receipt::from)
|
||||
.map(move |mut receipt| {
|
||||
receipt.transaction_hash = Some(hash);
|
||||
receipt.transaction_index = Some(index.into());
|
||||
receipt.block_hash = Some(block_hash);
|
||||
receipt.block_number = tx.block_number;
|
||||
receipt
|
||||
})
|
||||
.map(Some);
|
||||
|
||||
Either::B(extract_receipt)
|
||||
}
|
||||
None => Either::A(future::err(errors::unknown_block())),
|
||||
},
|
||||
None => Either::A(future::ok(None)),
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
fn uncle_by_block_hash_and_index(
|
||||
&self,
|
||||
hash: H256,
|
||||
idx: Index,
|
||||
) -> BoxFuture<Option<RichBlock>> {
|
||||
let client = self.client.clone();
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.block(BlockId::Hash(hash))
|
||||
.map(move |block| extract_uncle_at_index(block, idx, client)),
|
||||
)
|
||||
}
|
||||
|
||||
fn uncle_by_block_number_and_index(
|
||||
&self,
|
||||
num: BlockNumber,
|
||||
idx: Index,
|
||||
) -> BoxFuture<Option<RichBlock>> {
|
||||
let client = self.client.clone();
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.block(num.to_block_id())
|
||||
.map(move |block| extract_uncle_at_index(block, idx, client)),
|
||||
)
|
||||
}
|
||||
|
||||
fn proof(
|
||||
&self,
|
||||
_address: H160,
|
||||
_values: Vec<H256>,
|
||||
_num: Option<BlockNumber>,
|
||||
) -> BoxFuture<EthAccount> {
|
||||
Box::new(future::err(errors::unimplemented(None)))
|
||||
}
|
||||
|
||||
fn compilers(&self) -> Result<Vec<String>> {
|
||||
Err(errors::deprecated(
|
||||
"Compilation functionality is deprecated.".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
fn compile_lll(&self, _: String) -> Result<Bytes> {
|
||||
Err(errors::deprecated(
|
||||
"Compilation of LLL via RPC is deprecated".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
fn compile_serpent(&self, _: String) -> Result<Bytes> {
|
||||
Err(errors::deprecated(
|
||||
"Compilation of Serpent via RPC is deprecated".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
fn compile_solidity(&self, _: String) -> Result<Bytes> {
|
||||
Err(errors::deprecated(
|
||||
"Compilation of Solidity via RPC is deprecated".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
fn logs(&self, filter: Filter) -> BoxFuture<Vec<Log>> {
|
||||
let limit = filter.limit;
|
||||
|
||||
Box::new(
|
||||
Filterable::logs(
|
||||
self,
|
||||
match filter.try_into() {
|
||||
Ok(value) => value,
|
||||
Err(err) => return Box::new(future::err(err)),
|
||||
},
|
||||
)
|
||||
.map(move |logs| limit_logs(logs, limit)),
|
||||
)
|
||||
}
|
||||
|
||||
fn work(&self, _timeout: Option<u64>) -> Result<Work> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn submit_work(&self, _nonce: H64, _pow_hash: H256, _mix_hash: H256) -> Result<bool> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn submit_hashrate(&self, _rate: U256, _id: H256) -> Result<bool> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
}
|
||||
|
||||
// This trait implementation triggers a blanked impl of `EthFilter`.
|
||||
impl<C, S, OD> Filterable for EthClient<C, S, OD>
|
||||
where
|
||||
C: LightChainClient + 'static,
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
fn best_block_number(&self) -> u64 {
|
||||
self.client.chain_info().best_block_number
|
||||
}
|
||||
|
||||
fn block_hash(&self, id: BlockId) -> Option<H256> {
|
||||
self.client.block_hash(id)
|
||||
}
|
||||
|
||||
fn pending_transaction_hashes(&self) -> BTreeSet<H256> {
|
||||
BTreeSet::new()
|
||||
}
|
||||
|
||||
fn logs(&self, filter: EthcoreFilter) -> BoxFuture<Vec<Log>> {
|
||||
Box::new(self.fetcher().logs(filter)) as BoxFuture<_>
|
||||
}
|
||||
|
||||
fn pending_logs(&self, _block_number: u64, _filter: &EthcoreFilter) -> Vec<Log> {
|
||||
Vec::new() // light clients don't mine.
|
||||
}
|
||||
|
||||
fn polls(&self) -> &Mutex<PollManager<SyncPollFilter>> {
|
||||
&self.polls
|
||||
}
|
||||
|
||||
fn removed_logs(
|
||||
&self,
|
||||
_block_hash: ::ethereum_types::H256,
|
||||
_filter: &EthcoreFilter,
|
||||
) -> (Vec<Log>, u64) {
|
||||
(Default::default(), 0)
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_uncle_at_index<T: LightChainClient>(
|
||||
block: encoded::Block,
|
||||
index: Index,
|
||||
client: Arc<T>,
|
||||
) -> Option<RichBlock> {
|
||||
let uncle = match block.uncles().into_iter().nth(index.value()) {
|
||||
Some(u) => u,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let extra_info = client.engine().extra_info(&uncle);
|
||||
Some(RichBlock {
|
||||
inner: Block {
|
||||
hash: Some(uncle.hash()),
|
||||
size: None,
|
||||
parent_hash: *uncle.parent_hash(),
|
||||
uncles_hash: *uncle.uncles_hash(),
|
||||
author: *uncle.author(),
|
||||
miner: *uncle.author(),
|
||||
state_root: *uncle.state_root(),
|
||||
transactions_root: *uncle.transactions_root(),
|
||||
number: Some(uncle.number().into()),
|
||||
gas_used: *uncle.gas_used(),
|
||||
gas_limit: *uncle.gas_limit(),
|
||||
logs_bloom: Some(*uncle.log_bloom()),
|
||||
timestamp: uncle.timestamp().into(),
|
||||
difficulty: *uncle.difficulty(),
|
||||
total_difficulty: None,
|
||||
receipts_root: *uncle.receipts_root(),
|
||||
extra_data: uncle.extra_data().clone().into(),
|
||||
seal_fields: uncle.seal().iter().cloned().map(Into::into).collect(),
|
||||
uncles: vec![],
|
||||
transactions: BlockTransactions::Hashes(vec![]),
|
||||
},
|
||||
extra_info,
|
||||
})
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! RPC implementations for the light client.
|
||||
//!
|
||||
//! This doesn't re-implement all of the RPC APIs, just those which aren't
|
||||
//! significantly generic to be reused.
|
||||
|
||||
pub mod eth;
|
||||
pub mod net;
|
||||
pub mod parity;
|
||||
pub mod parity_set;
|
||||
pub mod trace;
|
||||
|
||||
pub use self::{
|
||||
eth::EthClient, net::NetClient, parity::ParityClient, parity_set::ParitySetClient,
|
||||
trace::TracesClient,
|
||||
};
|
@ -1,53 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Net rpc implementation.
|
||||
use jsonrpc_core::Result;
|
||||
use std::sync::Arc;
|
||||
use sync::LightSyncProvider;
|
||||
use v1::traits::Net;
|
||||
|
||||
/// Net rpc implementation.
|
||||
pub struct NetClient<S: ?Sized> {
|
||||
sync: Arc<S>,
|
||||
}
|
||||
|
||||
impl<S: ?Sized> NetClient<S>
|
||||
where
|
||||
S: LightSyncProvider,
|
||||
{
|
||||
/// Creates new NetClient.
|
||||
pub fn new(sync: Arc<S>) -> Self {
|
||||
NetClient { sync }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ?Sized + Sync + Send + 'static> Net for NetClient<S>
|
||||
where
|
||||
S: LightSyncProvider,
|
||||
{
|
||||
fn version(&self) -> Result<String> {
|
||||
Ok(format!("{}", self.sync.network_id()).to_owned())
|
||||
}
|
||||
|
||||
fn peer_count(&self) -> Result<String> {
|
||||
Ok(format!("0x{:x}", self.sync.peer_numbers().connected as u64).to_owned())
|
||||
}
|
||||
|
||||
fn is_listening(&self) -> Result<bool> {
|
||||
Ok(true)
|
||||
}
|
||||
}
|
@ -1,464 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Parity-specific rpc implementation.
|
||||
use std::{collections::BTreeMap, sync::Arc};
|
||||
|
||||
use version::version_data;
|
||||
|
||||
use crypto::DEFAULT_MAC;
|
||||
use ethcore_logger::RotatingLogger;
|
||||
use ethereum_types::{H160, H256, H512, H64, U256, U64};
|
||||
use ethkey::{crypto::ecies, Brain, Generator};
|
||||
use ethstore::random_phrase;
|
||||
use sync::{LightNetworkDispatcher, LightSyncInfo, LightSyncProvider, ManageNetwork};
|
||||
use updater::VersionInfo as UpdaterVersionInfo;
|
||||
|
||||
use jsonrpc_core::{
|
||||
futures::{future, Future},
|
||||
BoxFuture, Result,
|
||||
};
|
||||
use light::on_demand::OnDemandRequester;
|
||||
use v1::{
|
||||
helpers::{
|
||||
self,
|
||||
dispatch::LightDispatcher,
|
||||
errors,
|
||||
external_signer::{SignerService, SigningQueue},
|
||||
ipfs,
|
||||
light_fetch::{light_all_transactions, LightFetch},
|
||||
verify_signature, NetworkSettings,
|
||||
},
|
||||
metadata::Metadata,
|
||||
traits::Parity,
|
||||
types::{
|
||||
BlockNumber, Bytes, CallRequest, ChainStatus, ConsensusCapability, Filter, Header,
|
||||
Histogram, LightBlockNumber, LocalTransactionStatus, Log, OperationsInfo, Peers, Receipt,
|
||||
RecoveredAccount, RichHeader, RpcSettings, Transaction, TransactionStats, VersionInfo,
|
||||
},
|
||||
};
|
||||
use Host;
|
||||
|
||||
/// Parity implementation for light client.
|
||||
pub struct ParityClient<S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
light_dispatch: Arc<LightDispatcher<S, OD>>,
|
||||
logger: Arc<RotatingLogger>,
|
||||
settings: Arc<NetworkSettings>,
|
||||
signer: Option<Arc<SignerService>>,
|
||||
ws_address: Option<Host>,
|
||||
gas_price_percentile: usize,
|
||||
}
|
||||
|
||||
impl<S, OD> ParityClient<S, OD>
|
||||
where
|
||||
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
/// Creates new `ParityClient`.
|
||||
pub fn new(
|
||||
light_dispatch: Arc<LightDispatcher<S, OD>>,
|
||||
logger: Arc<RotatingLogger>,
|
||||
settings: Arc<NetworkSettings>,
|
||||
signer: Option<Arc<SignerService>>,
|
||||
ws_address: Option<Host>,
|
||||
gas_price_percentile: usize,
|
||||
) -> Self {
|
||||
ParityClient {
|
||||
light_dispatch,
|
||||
logger,
|
||||
settings,
|
||||
signer,
|
||||
ws_address,
|
||||
gas_price_percentile,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a light blockchain data fetcher.
|
||||
fn fetcher(&self) -> LightFetch<S, OD> {
|
||||
LightFetch {
|
||||
client: self.light_dispatch.client.clone(),
|
||||
on_demand: self.light_dispatch.on_demand.clone(),
|
||||
sync: self.light_dispatch.sync.clone(),
|
||||
cache: self.light_dispatch.cache.clone(),
|
||||
gas_price_percentile: self.gas_price_percentile,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, OD> Parity for ParityClient<S, OD>
|
||||
where
|
||||
S: LightSyncInfo + LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
|
||||
OD: OnDemandRequester + 'static,
|
||||
{
|
||||
type Metadata = Metadata;
|
||||
|
||||
fn transactions_limit(&self) -> Result<usize> {
|
||||
Ok(usize::max_value())
|
||||
}
|
||||
|
||||
fn min_gas_price(&self) -> Result<U256> {
|
||||
Ok(U256::default())
|
||||
}
|
||||
|
||||
fn extra_data(&self) -> Result<Bytes> {
|
||||
Ok(Bytes::default())
|
||||
}
|
||||
|
||||
fn gas_floor_target(&self) -> Result<U256> {
|
||||
Ok(U256::default())
|
||||
}
|
||||
|
||||
fn gas_ceil_target(&self) -> Result<U256> {
|
||||
Ok(U256::default())
|
||||
}
|
||||
|
||||
fn dev_logs(&self) -> Result<Vec<String>> {
|
||||
let logs = self.logger.logs();
|
||||
Ok(logs.as_slice().to_owned())
|
||||
}
|
||||
|
||||
fn dev_logs_levels(&self) -> Result<String> {
|
||||
Ok(self.logger.levels().to_owned())
|
||||
}
|
||||
|
||||
fn net_chain(&self) -> Result<String> {
|
||||
Ok(self.settings.chain.clone())
|
||||
}
|
||||
|
||||
fn net_peers(&self) -> Result<Peers> {
|
||||
let peers = self
|
||||
.light_dispatch
|
||||
.sync
|
||||
.peers()
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect();
|
||||
let peer_numbers = self.light_dispatch.sync.peer_numbers();
|
||||
|
||||
Ok(Peers {
|
||||
active: peer_numbers.active,
|
||||
connected: peer_numbers.connected,
|
||||
max: peer_numbers.max as u32,
|
||||
peers,
|
||||
})
|
||||
}
|
||||
|
||||
fn net_port(&self) -> Result<u16> {
|
||||
Ok(self.settings.network_port)
|
||||
}
|
||||
|
||||
fn node_name(&self) -> Result<String> {
|
||||
Ok(self.settings.name.clone())
|
||||
}
|
||||
|
||||
fn registry_address(&self) -> Result<Option<H160>> {
|
||||
let reg = self.light_dispatch.client.engine().params().registrar;
|
||||
if reg == Default::default() {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(reg))
|
||||
}
|
||||
}
|
||||
|
||||
fn rpc_settings(&self) -> Result<RpcSettings> {
|
||||
Ok(RpcSettings {
|
||||
enabled: self.settings.rpc_enabled,
|
||||
interface: self.settings.rpc_interface.clone(),
|
||||
port: self.settings.rpc_port as u64,
|
||||
})
|
||||
}
|
||||
|
||||
fn default_extra_data(&self) -> Result<Bytes> {
|
||||
Ok(Bytes::new(version_data()))
|
||||
}
|
||||
|
||||
fn gas_price_histogram(&self) -> BoxFuture<Histogram> {
|
||||
Box::new(
|
||||
self.light_dispatch
|
||||
.gas_price_corpus()
|
||||
.and_then(|corpus| corpus.histogram(10).ok_or_else(errors::not_enough_data))
|
||||
.map(Into::into),
|
||||
)
|
||||
}
|
||||
|
||||
fn unsigned_transactions_count(&self) -> Result<usize> {
|
||||
match self.signer {
|
||||
None => Err(errors::signer_disabled()),
|
||||
Some(ref signer) => Ok(signer.len()),
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_secret_phrase(&self) -> Result<String> {
|
||||
Ok(random_phrase(12))
|
||||
}
|
||||
|
||||
fn phrase_to_address(&self, phrase: String) -> Result<H160> {
|
||||
Ok(Brain::new(phrase)
|
||||
.generate()
|
||||
.expect("Brain::generate always returns Ok; qed")
|
||||
.address())
|
||||
}
|
||||
|
||||
fn list_accounts(
|
||||
&self,
|
||||
_: u64,
|
||||
_: Option<H160>,
|
||||
_: Option<BlockNumber>,
|
||||
) -> Result<Option<Vec<H160>>> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn list_storage_keys(
|
||||
&self,
|
||||
_: H160,
|
||||
_: u64,
|
||||
_: Option<H256>,
|
||||
_: Option<BlockNumber>,
|
||||
) -> Result<Option<Vec<H256>>> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn encrypt_message(&self, key: H512, phrase: Bytes) -> Result<Bytes> {
|
||||
ecies::encrypt(&key, &DEFAULT_MAC, &phrase.0)
|
||||
.map_err(errors::encryption)
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
fn pending_transactions(&self, limit: Option<usize>) -> Result<Vec<Transaction>> {
|
||||
let txq = self.light_dispatch.transaction_queue.read();
|
||||
let chain_info = self.light_dispatch.client.chain_info();
|
||||
Ok(txq
|
||||
.ready_transactions(
|
||||
chain_info.best_block_number,
|
||||
chain_info.best_block_timestamp,
|
||||
)
|
||||
.into_iter()
|
||||
.take(limit.unwrap_or_else(usize::max_value))
|
||||
.map(Transaction::from_pending)
|
||||
.collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
fn all_transactions(&self) -> Result<Vec<Transaction>> {
|
||||
Ok(light_all_transactions(&self.light_dispatch)
|
||||
.map(Transaction::from_pending)
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn all_transaction_hashes(&self) -> Result<Vec<H256>> {
|
||||
Ok(light_all_transactions(&self.light_dispatch)
|
||||
.map(|tx| tx.transaction.hash())
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn future_transactions(&self) -> Result<Vec<Transaction>> {
|
||||
let txq = self.light_dispatch.transaction_queue.read();
|
||||
let chain_info = self.light_dispatch.client.chain_info();
|
||||
Ok(txq
|
||||
.future_transactions(
|
||||
chain_info.best_block_number,
|
||||
chain_info.best_block_timestamp,
|
||||
)
|
||||
.into_iter()
|
||||
.map(Transaction::from_pending)
|
||||
.collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
fn pending_transactions_stats(&self) -> Result<BTreeMap<H256, TransactionStats>> {
|
||||
let stats = self.light_dispatch.sync.transactions_stats();
|
||||
Ok(stats
|
||||
.into_iter()
|
||||
.map(|(hash, stats)| (hash, stats.into()))
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn local_transactions(&self) -> Result<BTreeMap<H256, LocalTransactionStatus>> {
|
||||
let mut map = BTreeMap::new();
|
||||
let chain_info = self.light_dispatch.client.chain_info();
|
||||
let (best_num, best_tm) = (
|
||||
chain_info.best_block_number,
|
||||
chain_info.best_block_timestamp,
|
||||
);
|
||||
let txq = self.light_dispatch.transaction_queue.read();
|
||||
|
||||
for pending in txq.ready_transactions(best_num, best_tm) {
|
||||
map.insert(pending.hash(), LocalTransactionStatus::Pending);
|
||||
}
|
||||
|
||||
for future in txq.future_transactions(best_num, best_tm) {
|
||||
map.insert(future.hash(), LocalTransactionStatus::Future);
|
||||
}
|
||||
|
||||
// TODO: other types?
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
fn ws_url(&self) -> Result<String> {
|
||||
helpers::to_url(&self.ws_address).ok_or_else(errors::ws_disabled)
|
||||
}
|
||||
|
||||
fn next_nonce(&self, address: H160) -> BoxFuture<U256> {
|
||||
Box::new(self.light_dispatch.next_nonce(address))
|
||||
}
|
||||
|
||||
fn mode(&self) -> Result<String> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn chain(&self) -> Result<String> {
|
||||
Ok(self.settings.chain.clone())
|
||||
}
|
||||
|
||||
fn enode(&self) -> Result<String> {
|
||||
self.light_dispatch
|
||||
.sync
|
||||
.enode()
|
||||
.ok_or_else(errors::network_disabled)
|
||||
}
|
||||
|
||||
fn consensus_capability(&self) -> Result<ConsensusCapability> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn version_info(&self) -> Result<VersionInfo> {
|
||||
Ok(UpdaterVersionInfo::this().into())
|
||||
}
|
||||
|
||||
fn releases_info(&self) -> Result<Option<OperationsInfo>> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn chain_status(&self) -> Result<ChainStatus> {
|
||||
let chain_info = self.light_dispatch.client.chain_info();
|
||||
|
||||
let gap = chain_info
|
||||
.ancient_block_number
|
||||
.map(|x| U256::from(x + 1))
|
||||
.and_then(|first| {
|
||||
chain_info
|
||||
.first_block_number
|
||||
.map(|last| (first, U256::from(last)))
|
||||
});
|
||||
|
||||
Ok(ChainStatus { block_gap: gap })
|
||||
}
|
||||
|
||||
fn node_kind(&self) -> Result<::v1::types::NodeKind> {
|
||||
use v1::types::{Availability, Capability, NodeKind};
|
||||
|
||||
Ok(NodeKind {
|
||||
availability: Availability::Personal,
|
||||
capability: Capability::Light,
|
||||
})
|
||||
}
|
||||
|
||||
fn block_header(&self, number: Option<BlockNumber>) -> BoxFuture<RichHeader> {
|
||||
use types::encoded;
|
||||
|
||||
let engine = self.light_dispatch.client.engine().clone();
|
||||
let from_encoded = move |encoded: encoded::Header| {
|
||||
let header = encoded.decode().map_err(errors::decode)?;
|
||||
let extra_info = engine.extra_info(&header);
|
||||
Ok(RichHeader {
|
||||
inner: Header {
|
||||
hash: Some(header.hash()),
|
||||
size: Some(encoded.rlp().as_raw().len().into()),
|
||||
parent_hash: *header.parent_hash(),
|
||||
uncles_hash: *header.uncles_hash(),
|
||||
author: *header.author(),
|
||||
miner: *header.author(),
|
||||
state_root: *header.state_root(),
|
||||
transactions_root: *header.transactions_root(),
|
||||
receipts_root: *header.receipts_root(),
|
||||
number: Some(header.number().into()),
|
||||
gas_used: *header.gas_used(),
|
||||
gas_limit: *header.gas_limit(),
|
||||
logs_bloom: *header.log_bloom(),
|
||||
timestamp: header.timestamp().into(),
|
||||
difficulty: *header.difficulty(),
|
||||
seal_fields: header.seal().iter().cloned().map(Into::into).collect(),
|
||||
extra_data: Bytes::new(header.extra_data().clone()),
|
||||
},
|
||||
extra_info,
|
||||
})
|
||||
};
|
||||
let id = number.unwrap_or_default().to_block_id();
|
||||
Box::new(self.fetcher().header(id).and_then(from_encoded))
|
||||
}
|
||||
|
||||
fn block_receipts(&self, number: Option<BlockNumber>) -> BoxFuture<Vec<Receipt>> {
|
||||
let id = number.unwrap_or_default().to_block_id();
|
||||
Box::new(
|
||||
self.fetcher()
|
||||
.receipts(id)
|
||||
.and_then(|receipts| Ok(receipts.into_iter().map(Into::into).collect())),
|
||||
)
|
||||
}
|
||||
|
||||
fn ipfs_cid(&self, content: Bytes) -> Result<String> {
|
||||
ipfs::cid(content)
|
||||
}
|
||||
|
||||
fn call(&self, _requests: Vec<CallRequest>, _block: Option<BlockNumber>) -> Result<Vec<Bytes>> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn submit_work_detail(&self, _nonce: H64, _pow_hash: H256, _mix_hash: H256) -> Result<H256> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn status(&self) -> Result<()> {
|
||||
let has_peers =
|
||||
self.settings.is_dev_chain || self.light_dispatch.sync.peer_numbers().connected > 0;
|
||||
let is_importing = (*self.light_dispatch.sync).is_major_importing();
|
||||
|
||||
if has_peers && !is_importing {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(errors::status_error(has_peers))
|
||||
}
|
||||
}
|
||||
|
||||
fn logs_no_tx_hash(&self, filter: Filter) -> BoxFuture<Vec<Log>> {
|
||||
let filter = match filter.try_into() {
|
||||
Ok(value) => value,
|
||||
Err(err) => return Box::new(future::err(err)),
|
||||
};
|
||||
Box::new(self.fetcher().logs_no_tx_hash(filter)) as BoxFuture<_>
|
||||
}
|
||||
|
||||
fn verify_signature(
|
||||
&self,
|
||||
is_prefixed: bool,
|
||||
message: Bytes,
|
||||
r: H256,
|
||||
s: H256,
|
||||
v: U64,
|
||||
) -> Result<RecoveredAccount> {
|
||||
verify_signature(
|
||||
is_prefixed,
|
||||
message,
|
||||
r,
|
||||
s,
|
||||
v,
|
||||
self.light_dispatch.client.signing_chain_id(),
|
||||
)
|
||||
}
|
||||
}
|
@ -1,154 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Parity-specific rpc interface for operations altering the settings.
|
||||
//! Implementation for light client.
|
||||
|
||||
use std::{io, sync::Arc};
|
||||
|
||||
use ethereum_types::{H160, H256, U256};
|
||||
use fetch::{self, Fetch};
|
||||
use hash::keccak_buffer;
|
||||
use light::client::LightChainClient;
|
||||
use sync::ManageNetwork;
|
||||
|
||||
use jsonrpc_core::{futures::Future, BoxFuture, Result};
|
||||
use v1::{
|
||||
helpers::errors,
|
||||
traits::ParitySet,
|
||||
types::{Bytes, ReleaseInfo, Transaction},
|
||||
};
|
||||
|
||||
/// Parity-specific rpc interface for operations altering the settings.
|
||||
pub struct ParitySetClient<F> {
|
||||
client: Arc<dyn LightChainClient>,
|
||||
net: Arc<dyn ManageNetwork>,
|
||||
fetch: F,
|
||||
}
|
||||
|
||||
impl<F: Fetch> ParitySetClient<F> {
|
||||
/// Creates new `ParitySetClient` with given `Fetch`.
|
||||
pub fn new(client: Arc<dyn LightChainClient>, net: Arc<dyn ManageNetwork>, fetch: F) -> Self {
|
||||
ParitySetClient { client, net, fetch }
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Fetch> ParitySet for ParitySetClient<F> {
|
||||
fn set_min_gas_price(&self, _gas_price: U256) -> Result<bool> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn set_gas_floor_target(&self, _target: U256) -> Result<bool> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn set_gas_ceil_target(&self, _target: U256) -> Result<bool> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn set_extra_data(&self, _extra_data: Bytes) -> Result<bool> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn set_author(&self, _author: H160) -> Result<bool> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn set_engine_signer_secret(&self, _secret: H256) -> Result<bool> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn set_transactions_limit(&self, _limit: usize) -> Result<bool> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn set_tx_gas_limit(&self, _limit: U256) -> Result<bool> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn add_reserved_peer(&self, peer: String) -> Result<bool> {
|
||||
match self.net.add_reserved_peer(peer) {
|
||||
Ok(()) => Ok(true),
|
||||
Err(e) => Err(errors::invalid_params("Peer address", e)),
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_reserved_peer(&self, peer: String) -> Result<bool> {
|
||||
match self.net.remove_reserved_peer(peer) {
|
||||
Ok(()) => Ok(true),
|
||||
Err(e) => Err(errors::invalid_params("Peer address", e)),
|
||||
}
|
||||
}
|
||||
|
||||
fn drop_non_reserved_peers(&self) -> Result<bool> {
|
||||
self.net.deny_unreserved_peers();
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn accept_non_reserved_peers(&self) -> Result<bool> {
|
||||
self.net.accept_unreserved_peers();
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn start_network(&self) -> Result<bool> {
|
||||
self.net.start_network();
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn stop_network(&self) -> Result<bool> {
|
||||
self.net.stop_network();
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn set_mode(&self, _mode: String) -> Result<bool> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn set_spec_name(&self, spec_name: String) -> Result<bool> {
|
||||
self.client
|
||||
.set_spec_name(spec_name)
|
||||
.map(|_| true)
|
||||
.map_err(|()| errors::cannot_restart())
|
||||
}
|
||||
|
||||
fn hash_content(&self, url: String) -> BoxFuture<H256> {
|
||||
let future = self
|
||||
.fetch
|
||||
.get(&url, Default::default())
|
||||
.then(move |result| {
|
||||
result
|
||||
.map_err(errors::fetch)
|
||||
.and_then(move |response| {
|
||||
let mut reader = io::BufReader::new(fetch::BodyReader::new(response));
|
||||
keccak_buffer(&mut reader).map_err(errors::fetch)
|
||||
})
|
||||
.map(Into::into)
|
||||
});
|
||||
Box::new(future)
|
||||
}
|
||||
|
||||
fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn execute_upgrade(&self) -> Result<bool> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn remove_transaction(&self, _hash: H256) -> Result<Option<Transaction>> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
}
|
@ -1,99 +0,0 @@
|
||||
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity Ethereum.
|
||||
|
||||
// Parity Ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity Ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Traces api implementation.
|
||||
|
||||
use ethereum_types::H256;
|
||||
use jsonrpc_core::Result;
|
||||
use v1::{
|
||||
helpers::errors,
|
||||
traits::Traces,
|
||||
types::{
|
||||
BlockNumber, Bytes, CallRequest, Index, LocalizedTrace, TraceFilter, TraceOptions,
|
||||
TraceResults, TraceResultsWithTransactionHash,
|
||||
},
|
||||
Metadata,
|
||||
};
|
||||
|
||||
/// Traces api implementation.
|
||||
// TODO: all calling APIs should be possible w. proved remote TX execution.
|
||||
pub struct TracesClient;
|
||||
|
||||
impl Traces for TracesClient {
|
||||
type Metadata = Metadata;
|
||||
|
||||
fn filter(&self, _filter: TraceFilter) -> Result<Option<Vec<LocalizedTrace>>> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn block_traces(&self, _block_number: BlockNumber) -> Result<Option<Vec<LocalizedTrace>>> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn transaction_traces(&self, _transaction_hash: H256) -> Result<Option<Vec<LocalizedTrace>>> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn trace(
|
||||
&self,
|
||||
_transaction_hash: H256,
|
||||
_address: Vec<Index>,
|
||||
) -> Result<Option<LocalizedTrace>> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn call(
|
||||
&self,
|
||||
_request: CallRequest,
|
||||
_flags: TraceOptions,
|
||||
_block: Option<BlockNumber>,
|
||||
) -> Result<TraceResults> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn call_many(
|
||||
&self,
|
||||
_request: Vec<(CallRequest, TraceOptions)>,
|
||||
_block: Option<BlockNumber>,
|
||||
) -> Result<Vec<TraceResults>> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn raw_transaction(
|
||||
&self,
|
||||
_raw_transaction: Bytes,
|
||||
_flags: TraceOptions,
|
||||
_block: Option<BlockNumber>,
|
||||
) -> Result<TraceResults> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn replay_transaction(
|
||||
&self,
|
||||
_transaction_hash: H256,
|
||||
_flags: TraceOptions,
|
||||
) -> Result<TraceResults> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
|
||||
fn replay_block_transactions(
|
||||
&self,
|
||||
_block_number: BlockNumber,
|
||||
_flags: TraceOptions,
|
||||
) -> Result<Vec<TraceResultsWithTransactionHash>> {
|
||||
Err(errors::light_unimplemented(None))
|
||||
}
|
||||
}
|
@ -37,8 +37,6 @@ mod signing_unsafe;
|
||||
mod traces;
|
||||
mod web3;
|
||||
|
||||
pub mod light;
|
||||
|
||||
#[cfg(any(test, feature = "accounts"))]
|
||||
pub use self::parity_accounts::ParityAccountsClient;
|
||||
#[cfg(any(test, feature = "accounts"))]
|
||||
|
@ -46,8 +46,8 @@ use v1::{
|
||||
traits::Parity,
|
||||
types::{
|
||||
block_number_to_id, BlockNumber, Bytes, CallRequest, ChainStatus, ConsensusCapability,
|
||||
Filter, Histogram, LocalTransactionStatus, Log, OperationsInfo, Peers, Receipt,
|
||||
RecoveredAccount, RichHeader, RpcSettings, Transaction, TransactionStats, VersionInfo,
|
||||
Histogram, LocalTransactionStatus, OperationsInfo, Peers, Receipt, RecoveredAccount,
|
||||
RichHeader, RpcSettings, Transaction, TransactionStats, VersionInfo,
|
||||
},
|
||||
};
|
||||
use Host;
|
||||
@ -493,12 +493,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn logs_no_tx_hash(&self, filter: Filter) -> BoxFuture<Vec<Log>> {
|
||||
use v1::impls::eth::base_logs;
|
||||
// only specific impl for lightclient
|
||||
base_logs(&*self.client, &*self.miner, filter)
|
||||
}
|
||||
|
||||
fn verify_signature(
|
||||
&self,
|
||||
is_prefixed: bool,
|
||||
|
@ -85,7 +85,6 @@ impl SyncProvider for TestSyncProvider {
|
||||
difficulty: Some(40.into()),
|
||||
head: 50.into(),
|
||||
}),
|
||||
pip_info: None,
|
||||
},
|
||||
PeerInfo {
|
||||
id: None,
|
||||
@ -98,7 +97,6 @@ impl SyncProvider for TestSyncProvider {
|
||||
difficulty: None,
|
||||
head: 60.into(),
|
||||
}),
|
||||
pip_info: None,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
@ -260,7 +260,7 @@ fn rpc_parity_net_peers() {
|
||||
let io = deps.default_client();
|
||||
|
||||
let request = r#"{"jsonrpc": "2.0", "method": "parity_netPeers", "params":[], "id": 1}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","result":{"active":0,"connected":120,"max":50,"peers":[{"caps":["eth/62","eth/63"],"id":"node1","name":{"ParityClient":{"can_handle_large_requests":true,"compiler":"rustc","identity":"1","name":"Parity-Ethereum","os":"linux","semver":"2.4.0"}},"network":{"localAddress":"127.0.0.1:8888","remoteAddress":"127.0.0.1:7777"},"protocols":{"eth":{"difficulty":"0x28","head":"0000000000000000000000000000000000000000000000000000000000000032","version":62},"pip":null}},{"caps":["eth/63","eth/64"],"id":null,"name":{"ParityClient":{"can_handle_large_requests":true,"compiler":"rustc","identity":"2","name":"Parity-Ethereum","os":"linux","semver":"2.4.0"}},"network":{"localAddress":"127.0.0.1:3333","remoteAddress":"Handshake"},"protocols":{"eth":{"difficulty":null,"head":"000000000000000000000000000000000000000000000000000000000000003c","version":64},"pip":null}}]},"id":1}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","result":{"active":0,"connected":120,"max":50,"peers":[{"caps":["eth/62","eth/63"],"id":"node1","name":{"ParityClient":{"can_handle_large_requests":true,"compiler":"rustc","identity":"1","name":"Parity-Ethereum","os":"linux","semver":"2.4.0"}},"network":{"localAddress":"127.0.0.1:8888","remoteAddress":"127.0.0.1:7777"},"protocols":{"eth":{"difficulty":"0x28","head":"0000000000000000000000000000000000000000000000000000000000000032","version":62}}},{"caps":["eth/63","eth/64"],"id":null,"name":{"ParityClient":{"can_handle_large_requests":true,"compiler":"rustc","identity":"2","name":"Parity-Ethereum","os":"linux","semver":"2.4.0"}},"network":{"localAddress":"127.0.0.1:3333","remoteAddress":"Handshake"},"protocols":{"eth":{"difficulty":null,"head":"000000000000000000000000000000000000000000000000000000000000003c","version":64}}}]},"id":1}"#;
|
||||
|
||||
assert_eq!(io.handle_request_sync(request), Some(response.to_owned()));
|
||||
}
|
||||
|
@ -22,8 +22,8 @@ use ethereum_types::{H160, H256, H512, H64, U256, U64};
|
||||
use jsonrpc_core::{BoxFuture, Result};
|
||||
use jsonrpc_derive::rpc;
|
||||
use v1::types::{
|
||||
BlockNumber, Bytes, CallRequest, ChainStatus, ConsensusCapability, Filter, Histogram,
|
||||
LocalTransactionStatus, Log, OperationsInfo, Peers, Receipt, RecoveredAccount, RichHeader,
|
||||
BlockNumber, Bytes, CallRequest, ChainStatus, ConsensusCapability, Histogram,
|
||||
LocalTransactionStatus, OperationsInfo, Peers, Receipt, RecoveredAccount, RichHeader,
|
||||
RpcSettings, Transaction, TransactionStats, VersionInfo,
|
||||
};
|
||||
|
||||
@ -242,9 +242,4 @@ pub trait Parity {
|
||||
_: H256,
|
||||
_: U64,
|
||||
) -> Result<RecoveredAccount>;
|
||||
|
||||
/// Returns logs matching given filter object.
|
||||
/// Is allowed to skip filling transaction hash for faster query.
|
||||
#[rpc(name = "parity_getLogsNoTransactionHash")]
|
||||
fn logs_no_tx_hash(&self, _: Filter) -> BoxFuture<Vec<Log>>;
|
||||
}
|
||||
|
@ -59,31 +59,6 @@ impl BlockNumber {
|
||||
}
|
||||
}
|
||||
|
||||
/// BlockNumber to BlockId conversion
|
||||
///
|
||||
/// NOTE use only for light clients.
|
||||
pub trait LightBlockNumber {
|
||||
/// Convert block number to block id.
|
||||
fn to_block_id(self) -> BlockId;
|
||||
}
|
||||
|
||||
impl LightBlockNumber for BlockNumber {
|
||||
fn to_block_id(self) -> BlockId {
|
||||
// NOTE Here we treat `Pending` as `Latest`.
|
||||
// Since light clients don't produce pending blocks
|
||||
// (they don't have state) we can safely fallback to `Latest`.
|
||||
match self {
|
||||
BlockNumber::Num(n) => BlockId::Number(n),
|
||||
BlockNumber::Earliest => BlockId::Earliest,
|
||||
BlockNumber::Latest => BlockId::Latest,
|
||||
BlockNumber::Pending => {
|
||||
warn!("`Pending` is deprecated and may be removed in future versions. Falling back to `Latest`");
|
||||
BlockId::Latest
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for BlockNumber {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
|
@ -51,7 +51,7 @@ pub mod pubsub;
|
||||
pub use self::{
|
||||
account_info::{AccountInfo, EthAccount, ExtAccountInfo, RecoveredAccount, StorageProof},
|
||||
block::{Block, BlockTransactions, Header, Rich, RichBlock, RichHeader},
|
||||
block_number::{block_number_to_id, BlockNumber, LightBlockNumber},
|
||||
block_number::{block_number_to_id, BlockNumber},
|
||||
bytes::Bytes,
|
||||
call_request::CallRequest,
|
||||
confirmations::{
|
||||
@ -74,7 +74,7 @@ pub use self::{
|
||||
secretstore::EncryptedDocumentKey,
|
||||
sync::{
|
||||
ChainStatus, EthProtocolInfo, PeerInfo, PeerNetworkInfo, PeerProtocolsInfo, Peers,
|
||||
PipProtocolInfo, SyncInfo, SyncStatus, TransactionStats,
|
||||
SyncInfo, SyncStatus, TransactionStats,
|
||||
},
|
||||
trace::{LocalizedTrace, TraceResults, TraceResultsWithTransactionHash},
|
||||
trace_filter::TraceFilter,
|
||||
|
@ -42,9 +42,6 @@ pub enum Availability {
|
||||
pub enum Capability {
|
||||
/// A full node stores the full state and fully enacts incoming blocks.
|
||||
Full,
|
||||
/// A light node does a minimal header sync and fetches data as needed
|
||||
/// from the network.
|
||||
Light,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -78,16 +75,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn capability() {
|
||||
let light = r#""light""#;
|
||||
let full = r#""full""#;
|
||||
|
||||
assert_eq!(serde_json::to_string(&Capability::Light).unwrap(), light);
|
||||
assert_eq!(serde_json::to_string(&Capability::Full).unwrap(), full);
|
||||
|
||||
assert_eq!(
|
||||
serde_json::from_str::<Capability>(light).unwrap(),
|
||||
Capability::Light
|
||||
);
|
||||
assert_eq!(
|
||||
serde_json::from_str::<Capability>(full).unwrap(),
|
||||
Capability::Full
|
||||
|
@ -80,8 +80,6 @@ pub struct PeerNetworkInfo {
|
||||
pub struct PeerProtocolsInfo {
|
||||
/// Ethereum protocol information
|
||||
pub eth: Option<EthProtocolInfo>,
|
||||
/// PIP protocol information.
|
||||
pub pip: Option<PipProtocolInfo>,
|
||||
}
|
||||
|
||||
/// Peer Ethereum protocol information
|
||||
@ -105,27 +103,6 @@ impl From<sync::EthProtocolInfo> for EthProtocolInfo {
|
||||
}
|
||||
}
|
||||
|
||||
/// Peer PIP protocol information
|
||||
#[derive(Default, Debug, Serialize)]
|
||||
pub struct PipProtocolInfo {
|
||||
/// Negotiated PIP protocol version
|
||||
pub version: u32,
|
||||
/// Peer total difficulty
|
||||
pub difficulty: U256,
|
||||
/// SHA3 of peer best block hash
|
||||
pub head: String,
|
||||
}
|
||||
|
||||
impl From<sync::PipProtocolInfo> for PipProtocolInfo {
|
||||
fn from(info: sync::PipProtocolInfo) -> Self {
|
||||
PipProtocolInfo {
|
||||
version: info.version,
|
||||
difficulty: info.difficulty,
|
||||
head: format!("{:x}", info.head),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sync status
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum SyncStatus {
|
||||
@ -169,7 +146,6 @@ impl From<SyncPeerInfo> for PeerInfo {
|
||||
},
|
||||
protocols: PeerProtocolsInfo {
|
||||
eth: p.eth_info.map(Into::into),
|
||||
pip: p.pip_info.map(Into::into),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -39,15 +39,9 @@ pub use home::home_dir;
|
||||
/// Platform-specific chains path for standard client - Windows only
|
||||
#[cfg(target_os = "windows")]
|
||||
pub const CHAINS_PATH: &str = "$LOCAL/chains";
|
||||
/// Platform-specific chains path for light client - Windows only
|
||||
#[cfg(target_os = "windows")]
|
||||
pub const CHAINS_PATH_LIGHT: &str = "$LOCAL/chains_light";
|
||||
/// Platform-specific chains path for standard client
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
pub const CHAINS_PATH: &str = "$BASE/chains";
|
||||
/// Platform-specific chains path for light client
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
pub const CHAINS_PATH_LIGHT: &str = "$BASE/chains_light";
|
||||
|
||||
/// Platform-specific cache path - Windows only
|
||||
#[cfg(target_os = "windows")]
|
||||
|
@ -732,26 +732,6 @@ mod test {
|
||||
runtime.block_on(future).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn it_should_fetch_in_light_mode() {
|
||||
let server = TestServer::run();
|
||||
let client = Client::new(1).unwrap();
|
||||
let mut runtime = Runtime::new().unwrap();
|
||||
|
||||
let future = client
|
||||
.get(&format!("http://{}?123", server.addr()), Abort::default())
|
||||
.map(|resp| {
|
||||
assert!(resp.is_success());
|
||||
resp
|
||||
})
|
||||
.map(|resp| resp.concat2())
|
||||
.flatten()
|
||||
.map(|body| assert_eq!(&body[..], b"123"))
|
||||
.map_err(|err| panic!(err));
|
||||
|
||||
runtime.block_on(future).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn it_should_timeout() {
|
||||
let server = TestServer::run();
|
||||
|
Loading…
Reference in New Issue
Block a user