Cache manager.

Closes #135
This commit is contained in:
Gav Wood 2016-01-18 19:23:28 +01:00
parent bd21c6c327
commit 76cded453b
4 changed files with 82 additions and 15 deletions

View File

@ -53,7 +53,9 @@ impl IoHandler<NetSyncMessage> for ClientIoHandler {
fn timeout<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>, timer: TimerToken) {
if self.timer == timer {
println!("Chain info: {:?}", self.client.read().unwrap().deref().chain_info());
self.client.tick();
println!("Chain info: {}", self.client.read().unwrap().deref().chain_info());
println!("Cache info: {:?}", self.client.read().unwrap().deref().cache_info());
}
}
}

View File

@ -30,6 +30,11 @@ pub struct CacheSize {
pub blocks_blooms: usize
}
impl CacheSize {
/// Total amount used by the cache.
fn total(&self) -> usize { self.blocks + self.block_details + self.transaction_addresses + self.block_logs + self.blocks_blooms }
}
/// Information about best block gathered together
struct BestBlock {
pub hash: H256,
@ -97,9 +102,9 @@ pub trait BlockProvider {
}
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
struct CacheID {
id: H256,
extra: usize
enum CacheID {
Block(H256),
Extras(ExtrasIndex, H256),
}
struct CacheManager {
@ -149,6 +154,8 @@ impl BlockProvider for BlockChain {
let opt = self.blocks_db.get(hash)
.expect("Low level database error. Some issue with disk?");
self.note_used(CacheID::Block(hash.clone()));
match opt {
Some(b) => {
let bytes: Bytes = b.to_vec();
@ -214,6 +221,9 @@ impl BlockChain {
blocks_path.push("blocks");
let blocks_db = DB::open_default(blocks_path.to_str().unwrap()).unwrap();
let mut cache_man = CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()};
(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new()));
let bc = BlockChain {
best_block: RwLock::new(BestBlock::new()),
blocks: RwLock::new(HashMap::new()),
@ -224,7 +234,7 @@ impl BlockChain {
blocks_blooms: RwLock::new(HashMap::new()),
extras_db: extras_db,
blocks_db: blocks_db,
cache_man: RwLock::new(CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()}),
cache_man: RwLock::new(cache_man),
};
// load best block
@ -515,6 +525,10 @@ impl BlockChain {
}
}
if let Some(h) = hash.as_h256() {
self.note_used(CacheID::Extras(T::extras_index(), h.clone()));
}
self.extras_db.get_extras(hash).map(| t: T | {
let mut write = cache.write().unwrap();
write.insert(hash.clone(), t.clone());
@ -556,21 +570,54 @@ impl BlockChain {
self.blocks_blooms.write().unwrap().squeeze(size.blocks_blooms);
}
/// Let the cache system know that a cacheable item has been used.
fn note_used(&self, id: CacheID) {
let mut cache_man = self.cache_man.write().unwrap();
if !cache_man.cache_usage[0].contains(&id) {
cache_man.cache_usage[0].insert(id.clone());
// TODO: check more than just the first?
if cache_man.cache_usage[1].contains(&id) {
cache_man.cache_usage[1].remove(&id);
if cache_man.in_use.contains(&id) {
if let Some(c) = cache_man.cache_usage.iter_mut().skip(1).find(|e|e.contains(&id)) {
c.remove(&id);
}
else {
} else {
cache_man.in_use.insert(id);
}
}
}
/// Ticks our cache system and throws out any old data.
pub fn tick(&self) {
pub fn collect_garbage(&self, force: bool) {
// TODO: check time.
let timeout = true;
let t = self.cache_size().total();
if t < MIN_CACHE_SIZE || (!timeout && (!force || t < MAX_CACHE_SIZE)) { return; }
let mut cache_man = self.cache_man.write().unwrap();
let mut blocks = self.blocks.write().unwrap();
let mut block_details = self.block_details.write().unwrap();
let mut block_hashes = self.block_hashes.write().unwrap();
let mut transaction_addresses = self.transaction_addresses.write().unwrap();
let mut block_logs = self.block_logs.write().unwrap();
let mut blocks_blooms = self.blocks_blooms.write().unwrap();
for id in cache_man.cache_usage.pop_back().unwrap().into_iter() {
cache_man.in_use.remove(&id);
match id {
CacheID::Block(h) => { blocks.remove(&h); },
CacheID::Extras(ExtrasIndex::BlockDetails, h) => { block_details.remove(&h); },
CacheID::Extras(ExtrasIndex::TransactionAddress, h) => { transaction_addresses.remove(&h); },
CacheID::Extras(ExtrasIndex::BlockLogBlooms, h) => { block_logs.remove(&h); },
CacheID::Extras(ExtrasIndex::BlocksBlooms, h) => { blocks_blooms.remove(&h); },
_ => panic!(),
}
}
cache_man.cache_usage.push_front(HashSet::new());
// TODO: handle block_hashes properly.
block_hashes.clear();
// TODO: m_lastCollection = chrono::system_clock::now();
}
}

View File

@ -1,6 +1,6 @@
use util::*;
use rocksdb::{Options, DB};
use blockchain::{BlockChain, BlockProvider};
use blockchain::{BlockChain, BlockProvider, CacheSize};
use views::BlockView;
use error::*;
use header::BlockNumber;
@ -40,6 +40,12 @@ pub struct BlockChainInfo {
pub best_block_number: BlockNumber
}
impl fmt::Display for BlockChainInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#{}.{}", self.best_block_number, self.best_block_hash)
}
}
/// Block queue status
#[derive(Debug)]
pub struct BlockQueueStatus {
@ -208,6 +214,16 @@ impl Client {
}
debug!(target: "client", "Imported #{} ({})", header.number(), header.hash());
}
/// Get info on the cache.
pub fn cache_info(&self) -> CacheSize {
self.chain.read().unwrap().cache_size()
}
/// Tick the client.
pub fn tick(&self) {
self.chain.read().unwrap().collect_garbage(false);
}
}
impl BlockChainClient for Client {

View File

@ -3,7 +3,7 @@ use header::BlockNumber;
use rocksdb::{DB, Writable};
/// Represents index of extra data in database
#[derive(Copy, Clone)]
#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)]
pub enum ExtrasIndex {
BlockDetails = 0,
BlockHash = 1,
@ -59,6 +59,7 @@ impl ExtrasReadable for DB {
/// Implementations should convert arbitrary type to database key slice
pub trait ExtrasSliceConvertable {
fn to_extras_slice(&self, i: ExtrasIndex) -> H264;
fn as_h256(&self) -> Option<&H256> { None }
}
impl ExtrasSliceConvertable for H256 {
@ -67,6 +68,7 @@ impl ExtrasSliceConvertable for H256 {
slice[32] = i as u8;
slice
}
fn as_h256(&self) -> Option<&H256> { Some(self) }
}
impl ExtrasSliceConvertable for U256 {