Merge pull request #5403 from adrianbrink/adrian-lightclientcache
Add caching to HeaderChain struct
This commit is contained in:
commit
95808f51f1
@ -61,6 +61,7 @@ impl Default for CacheSizes {
|
|||||||
///
|
///
|
||||||
/// Note that almost all getter methods take `&mut self` due to the necessity to update
|
/// Note that almost all getter methods take `&mut self` due to the necessity to update
|
||||||
/// the underlying LRU-caches on read.
|
/// the underlying LRU-caches on read.
|
||||||
|
/// [LRU-cache](https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_Recently_Used_.28LRU.29)
|
||||||
pub struct Cache {
|
pub struct Cache {
|
||||||
headers: MemoryLruCache<H256, encoded::Header>,
|
headers: MemoryLruCache<H256, encoded::Header>,
|
||||||
canon_hashes: MemoryLruCache<BlockNumber, H256>,
|
canon_hashes: MemoryLruCache<BlockNumber, H256>,
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
//!
|
//!
|
||||||
//! Each CHT is a trie mapping block numbers to canonical hashes and total difficulty.
|
//! Each CHT is a trie mapping block numbers to canonical hashes and total difficulty.
|
||||||
//! One is generated for every `SIZE` blocks, allowing us to discard those blocks in
|
//! One is generated for every `SIZE` blocks, allowing us to discard those blocks in
|
||||||
//! favor the the trie root. When the "ancient" blocks need to be accessed, we simply
|
//! favor of the trie root. When the "ancient" blocks need to be accessed, we simply
|
||||||
//! request an inclusion proof of a specific block number against the trie with the
|
//! request an inclusion proof of a specific block number against the trie with the
|
||||||
//! root has. A correct proof implies that the claimed block is identical to the one
|
//! root has. A correct proof implies that the claimed block is identical to the one
|
||||||
//! we discarded.
|
//! we discarded.
|
||||||
|
@ -39,6 +39,9 @@ use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp, UntrustedRlp};
|
|||||||
use util::{H256, U256, HeapSizeOf, RwLock};
|
use util::{H256, U256, HeapSizeOf, RwLock};
|
||||||
use util::kvdb::{DBTransaction, KeyValueDB};
|
use util::kvdb::{DBTransaction, KeyValueDB};
|
||||||
|
|
||||||
|
use cache::Cache;
|
||||||
|
use util::Mutex;
|
||||||
|
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
|
|
||||||
/// Store at least this many candidate headers at all times.
|
/// Store at least this many candidate headers at all times.
|
||||||
@ -138,11 +141,12 @@ pub struct HeaderChain {
|
|||||||
best_block: RwLock<BlockDescriptor>,
|
best_block: RwLock<BlockDescriptor>,
|
||||||
db: Arc<KeyValueDB>,
|
db: Arc<KeyValueDB>,
|
||||||
col: Option<u32>,
|
col: Option<u32>,
|
||||||
|
cache: Arc<Mutex<Cache>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HeaderChain {
|
impl HeaderChain {
|
||||||
/// Create a new header chain given this genesis block and database to read from.
|
/// Create a new header chain given this genesis block and database to read from.
|
||||||
pub fn new(db: Arc<KeyValueDB>, col: Option<u32>, genesis: &[u8]) -> Result<Self, String> {
|
pub fn new(db: Arc<KeyValueDB>, col: Option<u32>, genesis: &[u8], cache: Arc<Mutex<Cache>>) -> Result<Self, String> {
|
||||||
use ethcore::views::HeaderView;
|
use ethcore::views::HeaderView;
|
||||||
|
|
||||||
let chain = if let Some(current) = db.get(col, CURRENT_KEY)? {
|
let chain = if let Some(current) = db.get(col, CURRENT_KEY)? {
|
||||||
@ -186,6 +190,7 @@ impl HeaderChain {
|
|||||||
candidates: RwLock::new(candidates),
|
candidates: RwLock::new(candidates),
|
||||||
db: db,
|
db: db,
|
||||||
col: col,
|
col: col,
|
||||||
|
cache: cache,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let g_view = HeaderView::new(genesis);
|
let g_view = HeaderView::new(genesis);
|
||||||
@ -199,6 +204,7 @@ impl HeaderChain {
|
|||||||
candidates: RwLock::new(BTreeMap::new()),
|
candidates: RwLock::new(BTreeMap::new()),
|
||||||
db: db,
|
db: db,
|
||||||
col: col,
|
col: col,
|
||||||
|
cache: cache,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -359,11 +365,24 @@ impl HeaderChain {
|
|||||||
/// will be returned.
|
/// will be returned.
|
||||||
pub fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
|
pub fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
|
||||||
let load_from_db = |hash: H256| {
|
let load_from_db = |hash: H256| {
|
||||||
match self.db.get(self.col, &hash) {
|
let mut cache = self.cache.lock();
|
||||||
Ok(val) => val.map(|x| x.to_vec()).map(encoded::Header::new),
|
|
||||||
Err(e) => {
|
match cache.block_header(&hash) {
|
||||||
warn!(target: "chain", "Failed to read from database: {}", e);
|
Some(header) => Some(header),
|
||||||
None
|
None => {
|
||||||
|
match self.db.get(self.col, &hash) {
|
||||||
|
Ok(db_value) => {
|
||||||
|
db_value.map(|x| x.to_vec()).map(encoded::Header::new)
|
||||||
|
.and_then(|header| {
|
||||||
|
cache.insert_block_header(hash.clone(), header.clone());
|
||||||
|
Some(header)
|
||||||
|
})
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
warn!(target: "chain", "Failed to read from database: {}", e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -493,6 +512,10 @@ mod tests {
|
|||||||
use ethcore::ids::BlockId;
|
use ethcore::ids::BlockId;
|
||||||
use ethcore::header::Header;
|
use ethcore::header::Header;
|
||||||
use ethcore::spec::Spec;
|
use ethcore::spec::Spec;
|
||||||
|
use cache::Cache;
|
||||||
|
|
||||||
|
use time::Duration;
|
||||||
|
use util::Mutex;
|
||||||
|
|
||||||
fn make_db() -> Arc<::util::KeyValueDB> {
|
fn make_db() -> Arc<::util::KeyValueDB> {
|
||||||
Arc::new(::util::kvdb::in_memory(0))
|
Arc::new(::util::kvdb::in_memory(0))
|
||||||
@ -504,7 +527,9 @@ mod tests {
|
|||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
|
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap();
|
||||||
|
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
@ -534,9 +559,10 @@ mod tests {
|
|||||||
fn reorganize() {
|
fn reorganize() {
|
||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
|
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
|
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap();
|
||||||
|
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
@ -617,8 +643,10 @@ mod tests {
|
|||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
|
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap();
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
|
|
||||||
|
|
||||||
assert!(chain.block_header(BlockId::Earliest).is_some());
|
assert!(chain.block_header(BlockId::Earliest).is_some());
|
||||||
assert!(chain.block_header(BlockId::Latest).is_some());
|
assert!(chain.block_header(BlockId::Latest).is_some());
|
||||||
@ -630,9 +658,10 @@ mod tests {
|
|||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
{
|
{
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
for i in 1..10000 {
|
for i in 1..10000 {
|
||||||
@ -652,7 +681,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
|
||||||
assert!(chain.block_header(BlockId::Number(10)).is_none());
|
assert!(chain.block_header(BlockId::Number(10)).is_none());
|
||||||
assert!(chain.block_header(BlockId::Number(9000)).is_some());
|
assert!(chain.block_header(BlockId::Number(9000)).is_some());
|
||||||
assert!(chain.cht_root(2).is_some());
|
assert!(chain.cht_root(2).is_some());
|
||||||
@ -665,9 +694,10 @@ mod tests {
|
|||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
{
|
{
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
|
|
||||||
@ -709,7 +739,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// after restoration, non-canonical eras should still be loaded.
|
// after restoration, non-canonical eras should still be loaded.
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
|
||||||
assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10);
|
assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10);
|
||||||
assert!(chain.candidates.read().get(&100).is_some())
|
assert!(chain.candidates.read().get(&100).is_some())
|
||||||
}
|
}
|
||||||
|
@ -36,6 +36,8 @@ use util::kvdb::{KeyValueDB, CompactionProfile};
|
|||||||
|
|
||||||
use self::header_chain::{AncestryIter, HeaderChain};
|
use self::header_chain::{AncestryIter, HeaderChain};
|
||||||
|
|
||||||
|
use cache::Cache;
|
||||||
|
|
||||||
pub use self::service::Service;
|
pub use self::service::Service;
|
||||||
|
|
||||||
mod header_chain;
|
mod header_chain;
|
||||||
@ -120,13 +122,13 @@ pub struct Client {
|
|||||||
|
|
||||||
impl Client {
|
impl Client {
|
||||||
/// Create a new `Client`.
|
/// Create a new `Client`.
|
||||||
pub fn new(config: Config, db: Arc<KeyValueDB>, chain_col: Option<u32>, spec: &Spec, io_channel: IoChannel<ClientIoMessage>) -> Result<Self, String> {
|
pub fn new(config: Config, db: Arc<KeyValueDB>, chain_col: Option<u32>, spec: &Spec, io_channel: IoChannel<ClientIoMessage>, cache: Arc<Mutex<Cache>>) -> Result<Self, String> {
|
||||||
let gh = ::rlp::encode(&spec.genesis_header());
|
let gh = ::rlp::encode(&spec.genesis_header());
|
||||||
|
|
||||||
Ok(Client {
|
Ok(Client {
|
||||||
queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true),
|
queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true),
|
||||||
engine: spec.engine.clone(),
|
engine: spec.engine.clone(),
|
||||||
chain: HeaderChain::new(db.clone(), chain_col, &gh)?,
|
chain: HeaderChain::new(db.clone(), chain_col, &gh, cache)?,
|
||||||
report: RwLock::new(ClientReport::default()),
|
report: RwLock::new(ClientReport::default()),
|
||||||
import_lock: Mutex::new(()),
|
import_lock: Mutex::new(()),
|
||||||
db: db,
|
db: db,
|
||||||
@ -135,10 +137,10 @@ impl Client {
|
|||||||
|
|
||||||
/// Create a new `Client` backed purely in-memory.
|
/// Create a new `Client` backed purely in-memory.
|
||||||
/// This will ignore all database options in the configuration.
|
/// This will ignore all database options in the configuration.
|
||||||
pub fn in_memory(config: Config, spec: &Spec, io_channel: IoChannel<ClientIoMessage>) -> Self {
|
pub fn in_memory(config: Config, spec: &Spec, io_channel: IoChannel<ClientIoMessage>, cache: Arc<Mutex<Cache>>) -> Self {
|
||||||
let db = ::util::kvdb::in_memory(0);
|
let db = ::util::kvdb::in_memory(0);
|
||||||
|
|
||||||
Client::new(config, Arc::new(db), None, spec, io_channel).expect("New DB creation infallible; qed")
|
Client::new(config, Arc::new(db), None, spec, io_channel, cache).expect("New DB creation infallible; qed")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Import a header to the queue for additional verification.
|
/// Import a header to the queue for additional verification.
|
||||||
|
@ -27,6 +27,9 @@ use ethcore::spec::Spec;
|
|||||||
use io::{IoContext, IoError, IoHandler, IoService};
|
use io::{IoContext, IoError, IoHandler, IoService};
|
||||||
use util::kvdb::{Database, DatabaseConfig};
|
use util::kvdb::{Database, DatabaseConfig};
|
||||||
|
|
||||||
|
use cache::Cache;
|
||||||
|
use util::Mutex;
|
||||||
|
|
||||||
use super::{Client, Config as ClientConfig};
|
use super::{Client, Config as ClientConfig};
|
||||||
|
|
||||||
/// Errors on service initialization.
|
/// Errors on service initialization.
|
||||||
@ -55,7 +58,8 @@ pub struct Service {
|
|||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
/// Start the service: initialize I/O workers and client itself.
|
/// Start the service: initialize I/O workers and client itself.
|
||||||
pub fn start(config: ClientConfig, spec: &Spec, path: &Path) -> Result<Self, Error> {
|
pub fn start(config: ClientConfig, spec: &Spec, path: &Path, cache: Arc<Mutex<Cache>>) -> Result<Self, Error> {
|
||||||
|
|
||||||
// initialize database.
|
// initialize database.
|
||||||
let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS);
|
let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS);
|
||||||
|
|
||||||
@ -78,6 +82,7 @@ impl Service {
|
|||||||
db::COL_LIGHT_CHAIN,
|
db::COL_LIGHT_CHAIN,
|
||||||
spec,
|
spec,
|
||||||
io_service.channel(),
|
io_service.channel(),
|
||||||
|
cache,
|
||||||
).map_err(Error::Database)?);
|
).map_err(Error::Database)?);
|
||||||
io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?;
|
io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?;
|
||||||
Ok(Service {
|
Ok(Service {
|
||||||
@ -112,11 +117,18 @@ mod tests {
|
|||||||
use super::Service;
|
use super::Service;
|
||||||
use devtools::RandomTempPath;
|
use devtools::RandomTempPath;
|
||||||
use ethcore::spec::Spec;
|
use ethcore::spec::Spec;
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use cache::Cache;
|
||||||
|
use time::Duration;
|
||||||
|
use util::Mutex;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn it_works() {
|
fn it_works() {
|
||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let temp_path = RandomTempPath::new();
|
let temp_path = RandomTempPath::new();
|
||||||
Service::start(Default::default(), &spec, temp_path.as_path()).unwrap();
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
|
Service::start(Default::default(), &spec, temp_path.as_path(), cache).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//! PIP Protocol Version 1 implementation.
|
//! PLP Protocol Version 1 implementation.
|
||||||
//!
|
//!
|
||||||
//! This uses a "Provider" to answer requests.
|
//! This uses a "Provider" to answer requests.
|
||||||
|
|
||||||
|
@ -192,6 +192,10 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
|
|||||||
info!("Starting {}", Colour::White.bold().paint(version()));
|
info!("Starting {}", Colour::White.bold().paint(version()));
|
||||||
info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client"));
|
info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client"));
|
||||||
|
|
||||||
|
// TODO: configurable cache size.
|
||||||
|
let cache = LightDataCache::new(Default::default(), ::time::Duration::minutes(GAS_CORPUS_EXPIRATION_MINUTES));
|
||||||
|
let cache = Arc::new(::util::Mutex::new(cache));
|
||||||
|
|
||||||
// start client and create transaction queue.
|
// start client and create transaction queue.
|
||||||
let mut config = light_client::Config {
|
let mut config = light_client::Config {
|
||||||
queue: Default::default(),
|
queue: Default::default(),
|
||||||
@ -204,7 +208,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
|
|||||||
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
||||||
config.queue.verifier_settings = cmd.verifier_settings;
|
config.queue.verifier_settings = cmd.verifier_settings;
|
||||||
|
|
||||||
let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm))
|
let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm), cache.clone())
|
||||||
.map_err(|e| format!("Error starting light client: {}", e))?;
|
.map_err(|e| format!("Error starting light client: {}", e))?;
|
||||||
let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default()));
|
let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default()));
|
||||||
let provider = ::light::provider::LightProvider::new(service.client().clone(), txq.clone());
|
let provider = ::light::provider::LightProvider::new(service.client().clone(), txq.clone());
|
||||||
@ -216,10 +220,6 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
|
|||||||
net_conf.boot_nodes = spec.nodes.clone();
|
net_conf.boot_nodes = spec.nodes.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: configurable cache size.
|
|
||||||
let cache = LightDataCache::new(Default::default(), ::time::Duration::minutes(GAS_CORPUS_EXPIRATION_MINUTES));
|
|
||||||
let cache = Arc::new(::util::Mutex::new(cache));
|
|
||||||
|
|
||||||
// start on_demand service.
|
// start on_demand service.
|
||||||
let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone()));
|
let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone()));
|
||||||
|
|
||||||
|
@ -32,6 +32,9 @@ use light::provider::LightProvider;
|
|||||||
use network::{NodeId, PeerId};
|
use network::{NodeId, PeerId};
|
||||||
use util::RwLock;
|
use util::RwLock;
|
||||||
|
|
||||||
|
use time::Duration;
|
||||||
|
use light::cache::Cache;
|
||||||
|
|
||||||
const NETWORK_ID: u64 = 0xcafebabe;
|
const NETWORK_ID: u64 = 0xcafebabe;
|
||||||
|
|
||||||
struct TestIoContext<'a> {
|
struct TestIoContext<'a> {
|
||||||
@ -207,7 +210,8 @@ impl TestNet<Peer> {
|
|||||||
pub fn light(n_light: usize, n_full: usize) -> Self {
|
pub fn light(n_light: usize, n_full: usize) -> Self {
|
||||||
let mut peers = Vec::with_capacity(n_light + n_full);
|
let mut peers = Vec::with_capacity(n_light + n_full);
|
||||||
for _ in 0..n_light {
|
for _ in 0..n_light {
|
||||||
let client = LightClient::in_memory(Default::default(), &Spec::new_test(), IoChannel::disconnected());
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
let client = LightClient::in_memory(Default::default(), &Spec::new_test(), IoChannel::disconnected(), cache);
|
||||||
peers.push(Arc::new(Peer::new_light(Arc::new(client))))
|
peers.push(Arc::new(Peer::new_light(Arc::new(client))))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user