* Consolidation migration

* Started db amalgamation

* Using client constants for columns

* Adding with_columns constructor

* Migrating to single db

* Fixing tests.

* test.sh without verbose

* Fixing warnings

* add migration tests that catch the bug

* make multiple migrations more robust

* add moved v9

* Merge branch 'noop-migrations' into single-db

* spurious line

* clean up migrations ordering

* update comment [ci skip]

* Bumping default number of max_open_files & re-ordering columns.

* fix merge

* fix ignored analysis tests

* Caching best block content

* Faster best_block_header

* Adding progress to v8 migration

* clean up warnings

* Separate hashes and bodies in the DB

* Separate hashes and bodies in the DB

* Fixed tests
This commit is contained in:
Tomasz Drwięga 2016-07-28 23:46:24 +02:00 committed by Gav Wood
parent 0934a283b2
commit e4f0c0b215
42 changed files with 1578 additions and 1058 deletions

View File

@ -64,7 +64,7 @@ install:
)
script:
- if [ "$RUN_TESTS" = "true" ]; then ./test.sh; fi
- if [ "$RUN_TESTS" = "true" ]; then ./test.sh --verbose; fi
- if [ "$RUN_COVERAGE" = "true" ]; then ./scripts/cov.sh "$KCOV_CMD"; fi
after_success: |

4
Cargo.lock generated
View File

@ -1093,7 +1093,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rocksdb"
version = "0.4.5"
source = "git+https://github.com/ethcore/rust-rocksdb#dd597245bfcb621c6ffc45478e1fda0b05d2f409"
source = "git+https://github.com/ethcore/rust-rocksdb#eadce7f74cfe92b99ce63a77af425b47857239b8"
dependencies = [
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)",
@ -1102,7 +1102,7 @@ dependencies = [
[[package]]
name = "rocksdb-sys"
version = "0.3.0"
source = "git+https://github.com/ethcore/rust-rocksdb#dd597245bfcb621c6ffc45478e1fda0b05d2f409"
source = "git+https://github.com/ethcore/rust-rocksdb#eadce7f74cfe92b99ce63a77af425b47857239b8"
dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::bytes::Bytes;
use util::numbers::{U256,H256};
use header::BlockNumber;
@ -25,5 +26,7 @@ pub struct BestBlock {
/// Best block number.
pub number: BlockNumber,
/// Best block total difficulty.
pub total_difficulty: U256
pub total_difficulty: U256,
/// Best block uncompressed bytes
pub block: Bytes,
}

View File

@ -31,6 +31,7 @@ use types::tree_route::TreeRoute;
use blockchain::update::ExtrasUpdate;
use blockchain::{CacheSize, ImportRoute, Config};
use db::{Writable, Readable, CacheUpdatePolicy};
use client::{DB_COL_EXTRA, DB_COL_HEADERS, DB_COL_BODIES};
const LOG_BLOOMS_LEVELS: usize = 3;
const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16;
@ -58,29 +59,37 @@ pub trait BlockProvider {
/// Get the partial-header of a block.
fn block_header(&self, hash: &H256) -> Option<Header> {
self.block(hash).map(|bytes| BlockView::new(&bytes).header())
self.block_header_data(hash).map(|header| decode(&header))
}
/// Get the header RLP of a block.
fn block_header_data(&self, hash: &H256) -> Option<Bytes>;
/// Get the block body (uncles and transactions).
fn block_body(&self, hash: &H256) -> Option<Bytes>;
/// Get a list of uncles for a given block.
/// Returns None if block does not exist.
fn uncles(&self, hash: &H256) -> Option<Vec<Header>> {
self.block(hash).map(|bytes| BlockView::new(&bytes).uncles())
self.block_body(hash).map(|bytes| BodyView::new(&bytes).uncles())
}
/// Get a list of uncle hashes for a given block.
/// Returns None if block does not exist.
fn uncle_hashes(&self, hash: &H256) -> Option<Vec<H256>> {
self.block(hash).map(|bytes| BlockView::new(&bytes).uncle_hashes())
self.block_body(hash).map(|bytes| BodyView::new(&bytes).uncle_hashes())
}
/// Get the number of given block's hash.
fn block_number(&self, hash: &H256) -> Option<BlockNumber> {
self.block(hash).map(|bytes| BlockView::new(&bytes).header_view().number())
self.block_details(hash).map(|details| details.number)
}
/// Get transaction with given transaction hash.
fn transaction(&self, address: &TransactionAddress) -> Option<LocalizedTransaction> {
self.block(&address.block_hash).and_then(|bytes| BlockView::new(&bytes).localized_transaction_at(address.index))
self.block_body(&address.block_hash)
.and_then(|bytes| self.block_number(&address.block_hash)
.and_then(|n| BodyView::new(&bytes).localized_transaction_at(&address.block_hash, n, address.index)))
}
/// Get transaction receipt.
@ -91,7 +100,9 @@ pub trait BlockProvider {
/// Get a list of transactions for a given block.
/// Returns None if block does not exist.
fn transactions(&self, hash: &H256) -> Option<Vec<LocalizedTransaction>> {
self.block(hash).map(|bytes| BlockView::new(&bytes).localized_transactions())
self.block_body(hash)
.and_then(|bytes| self.block_number(hash)
.map(|n| BodyView::new(&bytes).localized_transactions(hash, n)))
}
/// Returns reference to genesis hash.
@ -110,7 +121,8 @@ pub trait BlockProvider {
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
enum CacheID {
Block(H256),
BlockHeader(H256),
BlockBody(H256),
BlockDetails(H256),
BlockHashes(BlockNumber),
TransactionAddresses(H256),
@ -127,7 +139,7 @@ impl bc::group::BloomGroupDatabase for BlockChain {
fn blooms_at(&self, position: &bc::group::GroupPosition) -> Option<bc::group::BloomGroup> {
let position = LogGroupPosition::from(position.clone());
self.note_used(CacheID::BlocksBlooms(position.clone()));
self.extras_db.read_with_cache(&self.blocks_blooms, &position).map(Into::into)
self.db.read_with_cache(DB_COL_EXTRA, &self.blocks_blooms, &position).map(Into::into)
}
}
@ -143,7 +155,8 @@ pub struct BlockChain {
best_block: RwLock<BestBlock>,
// block cache
blocks: RwLock<HashMap<H256, Bytes>>,
block_headers: RwLock<HashMap<H256, Bytes>>,
block_bodies: RwLock<HashMap<H256, Bytes>>,
// extra caches
block_details: RwLock<HashMap<H256, BlockDetails>>,
@ -152,39 +165,96 @@ pub struct BlockChain {
blocks_blooms: RwLock<HashMap<LogGroupPosition, BloomGroup>>,
block_receipts: RwLock<HashMap<H256, BlockReceipts>>,
extras_db: Database,
blocks_db: Database,
db: Arc<Database>,
cache_man: RwLock<CacheManager>,
insert_lock: Mutex<()>
}
impl BlockProvider for BlockChain {
/// Returns true if the given block is known
/// (though not necessarily a part of the canon chain).
fn is_known(&self, hash: &H256) -> bool {
self.extras_db.exists_with_cache(&self.block_details, hash)
self.db.exists_with_cache(DB_COL_EXTRA, &self.block_details, hash)
}
/// Get raw block data
fn block(&self, hash: &H256) -> Option<Bytes> {
match (self.block_header_data(hash), self.block_body(hash)) {
(Some(header), Some(body)) => {
let mut block = RlpStream::new_list(3);
let body_rlp = Rlp::new(&body);
block.append_raw(&header, 1);
block.append_raw(body_rlp.at(0).as_raw(), 1);
block.append_raw(body_rlp.at(1).as_raw(), 1);
Some(block.out())
},
_ => None,
}
}
/// Get block header data
fn block_header_data(&self, hash: &H256) -> Option<Bytes> {
// Check cache first
{
let read = self.blocks.read();
let read = self.block_headers.read();
if let Some(v) = read.get(hash) {
return Some(v.clone());
}
}
let opt = self.blocks_db.get(hash)
// Check if it's the best block
{
let best_block = self.best_block.read();
if &best_block.hash == hash {
return Some(Rlp::new(&best_block.block).at(0).as_raw().to_vec());
}
}
// Read from DB and populate cache
let opt = self.db.get(DB_COL_HEADERS, hash)
.expect("Low level database error. Some issue with disk?");
self.note_used(CacheID::Block(hash.clone()));
self.note_used(CacheID::BlockHeader(hash.clone()));
match opt {
Some(b) => {
let bytes: Bytes = UntrustedRlp::new(&b).decompress(RlpType::Blocks).to_vec();
let mut write = self.blocks.write();
let mut write = self.block_headers.write();
write.insert(hash.clone(), bytes.clone());
Some(bytes)
},
None => None
}
}
/// Get block body data
fn block_body(&self, hash: &H256) -> Option<Bytes> {
// Check cache first
{
let read = self.block_bodies.read();
if let Some(v) = read.get(hash) {
return Some(v.clone());
}
}
// Check if it's the best block
{
let best_block = self.best_block.read();
if &best_block.hash == hash {
return Some(Self::block_to_body(&best_block.block));
}
}
// Read from DB and populate cache
let opt = self.db.get(DB_COL_BODIES, hash)
.expect("Low level database error. Some issue with disk?");
self.note_used(CacheID::BlockBody(hash.clone()));
match opt {
Some(b) => {
let bytes: Bytes = UntrustedRlp::new(&b).decompress(RlpType::Blocks).to_vec();
let mut write = self.block_bodies.write();
write.insert(hash.clone(), bytes.clone());
Some(bytes)
},
@ -195,25 +265,25 @@ impl BlockProvider for BlockChain {
/// Get the familial details concerning a block.
fn block_details(&self, hash: &H256) -> Option<BlockDetails> {
self.note_used(CacheID::BlockDetails(hash.clone()));
self.extras_db.read_with_cache(&self.block_details, hash)
self.db.read_with_cache(DB_COL_EXTRA, &self.block_details, hash)
}
/// Get the hash of given block's number.
fn block_hash(&self, index: BlockNumber) -> Option<H256> {
self.note_used(CacheID::BlockHashes(index));
self.extras_db.read_with_cache(&self.block_hashes, &index)
self.db.read_with_cache(DB_COL_EXTRA, &self.block_hashes, &index)
}
/// Get the address of transaction with given hash.
fn transaction_address(&self, hash: &H256) -> Option<TransactionAddress> {
self.note_used(CacheID::TransactionAddresses(hash.clone()));
self.extras_db.read_with_cache(&self.transaction_addresses, hash)
self.db.read_with_cache(DB_COL_EXTRA, &self.transaction_addresses, hash)
}
/// Get receipts of block with given hash.
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> {
self.note_used(CacheID::BlockReceipts(hash.clone()));
self.extras_db.read_with_cache(&self.block_receipts, hash)
self.db.read_with_cache(DB_COL_EXTRA, &self.block_receipts, hash)
}
/// Returns numbers of blocks containing given bloom.
@ -249,27 +319,7 @@ impl<'a> Iterator for AncestryIter<'a> {
impl BlockChain {
/// Create new instance of blockchain from given Genesis
pub fn new(config: Config, genesis: &[u8], path: &Path) -> BlockChain {
// open extras db
let mut extras_path = path.to_path_buf();
extras_path.push("extras");
let extras_db = match config.db_cache_size {
None => Database::open_default(extras_path.to_str().unwrap()).unwrap(),
Some(cache_size) => Database::open(
&DatabaseConfig::with_cache(cache_size/2),
extras_path.to_str().unwrap()).unwrap(),
};
// open blocks db
let mut blocks_path = path.to_path_buf();
blocks_path.push("blocks");
let blocks_db = match config.db_cache_size {
None => Database::open_default(blocks_path.to_str().unwrap()).unwrap(),
Some(cache_size) => Database::open(
&DatabaseConfig::with_cache(cache_size/2),
blocks_path.to_str().unwrap()).unwrap(),
};
pub fn new(config: Config, genesis: &[u8], db: Arc<Database>) -> BlockChain {
let mut cache_man = CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()};
(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new()));
@ -281,39 +331,21 @@ impl BlockChain {
elements_per_index: LOG_BLOOMS_ELEMENTS_PER_INDEX,
},
best_block: RwLock::new(BestBlock::default()),
blocks: RwLock::new(HashMap::new()),
block_headers: RwLock::new(HashMap::new()),
block_bodies: RwLock::new(HashMap::new()),
block_details: RwLock::new(HashMap::new()),
block_hashes: RwLock::new(HashMap::new()),
transaction_addresses: RwLock::new(HashMap::new()),
blocks_blooms: RwLock::new(HashMap::new()),
block_receipts: RwLock::new(HashMap::new()),
extras_db: extras_db,
blocks_db: blocks_db,
db: db.clone(),
cache_man: RwLock::new(cache_man),
insert_lock: Mutex::new(()),
};
// load best block
let best_block_hash = match bc.extras_db.get(b"best").unwrap() {
let best_block_hash = match bc.db.get(DB_COL_EXTRA, b"best").unwrap() {
Some(best) => {
let new_best = H256::from_slice(&best);
if !bc.blocks_db.get(&new_best).unwrap().is_some() {
warn!("Best block {} not found", new_best.hex());
}
/* TODO: enable this once the best block issue is resolved
while !bc.blocks_db.get(&new_best).unwrap().is_some() {
match bc.rewind() {
Some(h) => {
new_best = h;
}
None => {
warn!("Can't rewind blockchain");
break;
}
}
info!("Restored mismatched best block. Was: {}, new: {}", H256::from_slice(&best).hex(), new_best.hex());
}*/
new_best
H256::from_slice(&best)
}
None => {
// best block does not exist
@ -329,25 +361,32 @@ impl BlockChain {
children: vec![]
};
let block_batch = DBTransaction::new();
block_batch.put(&hash, genesis).unwrap();
bc.blocks_db.write(block_batch).expect("Low level database error. Some issue with disk?");
let batch = DBTransaction::new();
batch.write(&hash, &details);
batch.write(&header.number(), &hash);
batch.put(b"best", &hash).unwrap();
bc.extras_db.write(batch).unwrap();
let batch = DBTransaction::new(&db);
batch.put(DB_COL_HEADERS, &hash, block.header_rlp().as_raw()).unwrap();
batch.put(DB_COL_BODIES, &hash, &Self::block_to_body(&genesis)).unwrap();
batch.write(DB_COL_EXTRA, &hash, &details);
batch.write(DB_COL_EXTRA, &header.number(), &hash);
batch.put(DB_COL_EXTRA, b"best", &hash).unwrap();
bc.db.write(batch).expect("Low level database error. Some issue with disk?");
hash
}
};
{
// Fetch best block details
let best_block_number = bc.block_number(&best_block_hash).unwrap();
let best_block_total_difficulty = bc.block_details(&best_block_hash).unwrap().total_difficulty;
let best_block_rlp = bc.block(&best_block_hash).unwrap();
// and write them
let mut best_block = bc.best_block.write();
best_block.number = bc.block_number(&best_block_hash).unwrap();
best_block.total_difficulty = bc.block_details(&best_block_hash).unwrap().total_difficulty;
best_block.hash = best_block_hash;
*best_block = BestBlock {
number: best_block_number,
total_difficulty: best_block_total_difficulty,
hash: best_block_hash,
block: best_block_rlp,
};
}
bc
@ -356,44 +395,52 @@ impl BlockChain {
/// Returns true if the given parent block has given child
/// (though not necessarily a part of the canon chain).
fn is_known_child(&self, parent: &H256, hash: &H256) -> bool {
self.extras_db.read_with_cache(&self.block_details, parent).map_or(false, |d| d.children.contains(hash))
self.db.read_with_cache(DB_COL_EXTRA, &self.block_details, parent).map_or(false, |d| d.children.contains(hash))
}
/// Rewind to a previous block
#[cfg(test)]
fn rewind(&self) -> Option<H256> {
use db::Key;
let batch = DBTransaction::new();
let batch = self.db.transaction();
// track back to the best block we have in the blocks database
if let Some(best_block_hash) = self.extras_db.get(b"best").unwrap() {
if let Some(best_block_hash) = self.db.get(DB_COL_EXTRA, b"best").unwrap() {
let best_block_hash = H256::from_slice(&best_block_hash);
if best_block_hash == self.genesis_hash() {
return None;
}
if let Some(extras) = self.extras_db.read(&best_block_hash) as Option<BlockDetails> {
if let Some(extras) = self.db.read(DB_COL_EXTRA, &best_block_hash) as Option<BlockDetails> {
type DetailsKey = Key<BlockDetails, Target=H264>;
batch.delete(&(DetailsKey::key(&best_block_hash))).unwrap();
batch.delete(DB_COL_EXTRA, &(DetailsKey::key(&best_block_hash))).unwrap();
let hash = extras.parent;
let range = extras.number as bc::Number .. extras.number as bc::Number;
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
let changes = chain.replace(&range, vec![]);
for (k, v) in changes.into_iter() {
batch.write(&LogGroupPosition::from(k), &BloomGroup::from(v));
batch.write(DB_COL_EXTRA, &LogGroupPosition::from(k), &BloomGroup::from(v));
}
batch.put(b"best", &hash).unwrap();
batch.put(DB_COL_EXTRA, b"best", &hash).unwrap();
let best_block_total_difficulty = self.block_details(&hash).unwrap().total_difficulty;
let best_block_rlp = self.block(&hash).unwrap();
let mut best_block = self.best_block.write();
best_block.number = extras.number - 1;
best_block.total_difficulty = self.block_details(&hash).unwrap().total_difficulty;
best_block.hash = hash;
*best_block = BestBlock {
number: extras.number - 1,
total_difficulty: best_block_total_difficulty,
hash: hash,
block: best_block_rlp,
};
// update parent extras
if let Some(mut details) = self.extras_db.read(&hash) as Option<BlockDetails> {
if let Some(mut details) = self.db.read(DB_COL_EXTRA, &hash) as Option<BlockDetails> {
details.children.clear();
batch.write(&hash, &details);
batch.write(DB_COL_EXTRA, &hash, &details);
}
self.extras_db.write(batch).unwrap();
self.db.write(batch).expect("Writing to db failed");
self.block_details.write().clear();
self.block_hashes.write().clear();
self.blocks.write().clear();
self.block_headers.write().clear();
self.block_bodies.write().clear();
self.block_receipts.write().clear();
return Some(hash);
}
@ -500,7 +547,7 @@ impl BlockChain {
/// Inserts the block into backing cache database.
/// Expects the block to be valid and already verified.
/// If the block is already known, does nothing.
pub fn insert_block(&self, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute {
pub fn insert_block(&self, batch: &DBTransaction, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute {
// create views onto rlp
let block = BlockView::new(bytes);
let header = block.header_view();
@ -510,11 +557,13 @@ impl BlockChain {
return ImportRoute::none();
}
let compressed = UntrustedRlp::new(bytes).compress(RlpType::Blocks).to_vec();
let block_rlp = UntrustedRlp::new(bytes);
let compressed_header = block_rlp.at(0).unwrap().compress(RlpType::Blocks);
let compressed_body = UntrustedRlp::new(&Self::block_to_body(bytes)).compress(RlpType::Blocks);
let _lock = self.insert_lock.lock();
// store block in db
self.blocks_db.put(&hash, &compressed).unwrap();
batch.put(DB_COL_HEADERS, &hash, &compressed_header).unwrap();
batch.put(DB_COL_BODIES, &hash, &compressed_body).unwrap();
let info = self.block_info(bytes);
@ -527,13 +576,14 @@ impl BlockChain {
);
}
self.apply_update(ExtrasUpdate {
self.apply_update(batch, ExtrasUpdate {
block_hashes: self.prepare_block_hashes_update(bytes, &info),
block_details: self.prepare_block_details_update(bytes, &info),
block_receipts: self.prepare_block_receipts_update(receipts, &info),
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
info: info.clone(),
block: bytes,
});
ImportRoute::from(info)
@ -582,26 +632,24 @@ impl BlockChain {
}
/// Applies extras update.
fn apply_update(&self, update: ExtrasUpdate) {
let batch = DBTransaction::new();
fn apply_update(&self, batch: &DBTransaction, update: ExtrasUpdate) {
{
for hash in update.block_details.keys().cloned() {
self.note_used(CacheID::BlockDetails(hash));
}
let mut write_details = self.block_details.write();
batch.extend_with_cache(&mut *write_details, update.block_details, CacheUpdatePolicy::Overwrite);
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_details, update.block_details, CacheUpdatePolicy::Overwrite);
}
{
let mut write_receipts = self.block_receipts.write();
batch.extend_with_cache(&mut *write_receipts, update.block_receipts, CacheUpdatePolicy::Remove);
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_receipts, update.block_receipts, CacheUpdatePolicy::Remove);
}
{
let mut write_blocks_blooms = self.blocks_blooms.write();
batch.extend_with_cache(&mut *write_blocks_blooms, update.blocks_blooms, CacheUpdatePolicy::Remove);
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_blocks_blooms, update.blocks_blooms, CacheUpdatePolicy::Remove);
}
// These cached values must be updated last with all three locks taken to avoid
@ -612,11 +660,12 @@ impl BlockChain {
match update.info.location {
BlockLocation::Branch => (),
_ => {
batch.put(b"best", &update.info.hash).unwrap();
batch.put(DB_COL_EXTRA, b"best", &update.info.hash).unwrap();
*best_block = BestBlock {
hash: update.info.hash,
number: update.info.number,
total_difficulty: update.info.total_difficulty
total_difficulty: update.info.total_difficulty,
block: update.block.to_vec(),
};
}
}
@ -624,11 +673,8 @@ impl BlockChain {
let mut write_hashes = self.block_hashes.write();
let mut write_txs = self.transaction_addresses.write();
batch.extend_with_cache(&mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Remove);
batch.extend_with_cache(&mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Remove);
// update extras database
self.extras_db.write(batch).unwrap();
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Remove);
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Remove);
}
}
@ -681,7 +727,7 @@ impl BlockChain {
block_hashes.insert(number, info.hash.clone());
},
BlockLocation::BranchBecomingCanonChain(ref data) => {
let ancestor_number = self.block_number(&data.ancestor).unwrap();
let ancestor_number = self.block_number(&data.ancestor).expect("Block number of ancestor is always in DB");
let start_number = ancestor_number + 1;
for (index, hash) in data.enacted.iter().cloned().enumerate() {
@ -775,8 +821,8 @@ impl BlockChain {
let range = start_number as bc::Number..self.best_block_number() as bc::Number;
let mut blooms: Vec<bc::Bloom> = data.enacted.iter()
.map(|hash| self.block(hash).unwrap())
.map(|bytes| BlockView::new(&bytes).header_view().log_bloom())
.map(|hash| self.block_header_data(hash).unwrap())
.map(|bytes| HeaderView::new(&bytes).log_bloom())
.map(Bloom::from)
.map(Into::into)
.collect();
@ -808,10 +854,16 @@ impl BlockChain {
self.best_block.read().total_difficulty
}
/// Get best block header
pub fn best_block_header(&self) -> Bytes {
let block = self.best_block.read();
BlockView::new(&block.block).header_view().rlp().as_raw().to_vec()
}
/// Get current cache size.
pub fn cache_size(&self) -> CacheSize {
CacheSize {
blocks: self.blocks.read().heap_size_of_children(),
blocks: self.block_headers.read().heap_size_of_children() + self.block_bodies.read().heap_size_of_children(),
block_details: self.block_details.read().heap_size_of_children(),
transaction_addresses: self.transaction_addresses.read().heap_size_of_children(),
blocks_blooms: self.blocks_blooms.read().heap_size_of_children(),
@ -851,7 +903,8 @@ impl BlockChain {
for i in 0..COLLECTION_QUEUE_SIZE {
{
trace!("Cache cleanup round started {}, cache_size = {}", i, self.cache_size().total());
let mut blocks = self.blocks.write();
let mut block_headers = self.block_headers.write();
let mut block_bodies = self.block_bodies.write();
let mut block_details = self.block_details.write();
let mut block_hashes = self.block_hashes.write();
let mut transaction_addresses = self.transaction_addresses.write();
@ -862,7 +915,8 @@ impl BlockChain {
for id in cache_man.cache_usage.pop_back().unwrap().into_iter() {
cache_man.in_use.remove(&id);
match id {
CacheID::Block(h) => { blocks.remove(&h); },
CacheID::BlockHeader(h) => { block_headers.remove(&h); },
CacheID::BlockBody(h) => { block_bodies.remove(&h); },
CacheID::BlockDetails(h) => { block_details.remove(&h); }
CacheID::BlockHashes(h) => { block_hashes.remove(&h); }
CacheID::TransactionAddresses(h) => { transaction_addresses.remove(&h); }
@ -875,7 +929,8 @@ impl BlockChain {
// TODO: handle block_hashes properly.
block_hashes.clear();
blocks.shrink_to_fit();
block_headers.shrink_to_fit();
block_bodies.shrink_to_fit();
block_details.shrink_to_fit();
block_hashes.shrink_to_fit();
transaction_addresses.shrink_to_fit();
@ -888,20 +943,60 @@ impl BlockChain {
// TODO: m_lastCollection = chrono::system_clock::now();
}
/// Create a block body from a block.
pub fn block_to_body(block: &[u8]) -> Bytes {
let mut body = RlpStream::new_list(2);
let block_rlp = Rlp::new(block);
body.append_raw(block_rlp.at(1).as_raw(), 1);
body.append_raw(block_rlp.at(2).as_raw(), 1);
body.out()
}
}
#[cfg(test)]
mod tests {
#![cfg_attr(feature="dev", allow(similar_names))]
use std::str::FromStr;
use std::sync::Arc;
use rustc_serialize::hex::FromHex;
use util::{Database, DatabaseConfig};
use util::hash::*;
use util::sha3::Hashable;
use receipt::Receipt;
use blockchain::{BlockProvider, BlockChain, Config, ImportRoute};
use tests::helpers::*;
use devtools::*;
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
use views::BlockView;
use client;
fn new_db(path: &str) -> Arc<Database> {
Arc::new(Database::open(&DatabaseConfig::with_columns(client::DB_NO_OF_COLUMNS), path).unwrap())
}
#[test]
fn should_cache_best_block() {
// given
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let first = canon_chain.generate(&mut finalizer).unwrap();
let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
assert_eq!(bc.best_block_number(), 0);
// when
let batch = db.transaction();
bc.insert_block(&batch, &first, vec![]);
// NOTE no db.write here (we want to check if best block is cached)
// then
assert_eq!(bc.best_block_number(), 1);
assert!(bc.block(&bc.best_block_hash()).is_some(), "Best block should be queryable even without DB write.");
}
#[test]
fn basic_blockchain_insert() {
@ -913,16 +1008,18 @@ mod tests {
let first_hash = BlockView::new(&first).header_view().sha3();
let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path());
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
assert_eq!(bc.genesis_hash(), genesis_hash.clone());
assert_eq!(bc.best_block_number(), 0);
assert_eq!(bc.best_block_hash(), genesis_hash.clone());
assert_eq!(bc.block_hash(0), Some(genesis_hash.clone()));
assert_eq!(bc.block_hash(1), None);
assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![]);
bc.insert_block(&first, vec![]);
let batch = db.transaction();
bc.insert_block(&batch, &first, vec![]);
db.write(batch).unwrap();
assert_eq!(bc.block_hash(0), Some(genesis_hash.clone()));
assert_eq!(bc.best_block_number(), 1);
@ -941,14 +1038,17 @@ mod tests {
let genesis_hash = BlockView::new(&genesis).header_view().sha3();
let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path());
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let mut block_hashes = vec![genesis_hash.clone()];
let batch = db.transaction();
for _ in 0..10 {
let block = canon_chain.generate(&mut finalizer).unwrap();
block_hashes.push(BlockView::new(&block).header_view().sha3());
bc.insert_block(&block, vec![]);
bc.insert_block(&batch, &block, vec![]);
}
db.write(batch).unwrap();
block_hashes.reverse();
@ -973,17 +1073,21 @@ mod tests {
let b5a = canon_chain.generate(&mut finalizer).unwrap();
let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path());
bc.insert_block(&b1a, vec![]);
bc.insert_block(&b1b, vec![]);
bc.insert_block(&b2a, vec![]);
bc.insert_block(&b2b, vec![]);
bc.insert_block(&b3a, vec![]);
bc.insert_block(&b3b, vec![]);
bc.insert_block(&b4a, vec![]);
bc.insert_block(&b4b, vec![]);
bc.insert_block(&b5a, vec![]);
bc.insert_block(&b5b, vec![]);
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let batch = db.transaction();
bc.insert_block(&batch, &b1a, vec![]);
bc.insert_block(&batch, &b1b, vec![]);
bc.insert_block(&batch, &b2a, vec![]);
bc.insert_block(&batch, &b2b, vec![]);
bc.insert_block(&batch, &b3a, vec![]);
bc.insert_block(&batch, &b3b, vec![]);
bc.insert_block(&batch, &b4a, vec![]);
bc.insert_block(&batch, &b4b, vec![]);
bc.insert_block(&batch, &b5a, vec![]);
bc.insert_block(&batch, &b5b, vec![]);
db.write(batch).unwrap();
assert_eq!(
[&b4b, &b3b, &b2b].iter().map(|b| BlockView::new(b).header()).collect::<Vec<_>>(),
@ -1014,11 +1118,17 @@ mod tests {
let best_block_hash = b3a_hash.clone();
let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path());
let ir1 = bc.insert_block(&b1, vec![]);
let ir2 = bc.insert_block(&b2, vec![]);
let ir3b = bc.insert_block(&b3b, vec![]);
let ir3a = bc.insert_block(&b3a, vec![]);
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let batch = db.transaction();
let ir1 = bc.insert_block(&batch, &b1, vec![]);
let ir2 = bc.insert_block(&batch, &b2, vec![]);
let ir3b = bc.insert_block(&batch, &b3b, vec![]);
db.write(batch).unwrap();
let batch = db.transaction();
let ir3a = bc.insert_block(&batch, &b3a, vec![]);
db.write(batch).unwrap();
assert_eq!(ir1, ImportRoute {
enacted: vec![b1_hash],
@ -1119,14 +1229,19 @@ mod tests {
let temp = RandomTempPath::new();
{
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path());
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
assert_eq!(bc.best_block_hash(), genesis_hash);
bc.insert_block(&first, vec![]);
let batch = db.transaction();
bc.insert_block(&batch, &first, vec![]);
db.write(batch).unwrap();
assert_eq!(bc.best_block_hash(), first_hash);
}
{
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path());
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
assert_eq!(bc.best_block_hash(), first_hash);
}
}
@ -1179,8 +1294,11 @@ mod tests {
let b1_hash = H256::from_str("f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3").unwrap();
let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path());
bc.insert_block(&b1, vec![]);
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let batch = db.transaction();
bc.insert_block(&batch, &b1, vec![]);
db.write(batch).unwrap();
let transactions = bc.transactions(&b1_hash).unwrap();
assert_eq!(transactions.len(), 7);
@ -1189,6 +1307,13 @@ mod tests {
}
}
fn insert_block(db: &Arc<Database>, bc: &BlockChain, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute {
let batch = db.transaction();
let res = bc.insert_block(&batch, bytes, receipts);
db.write(batch).unwrap();
res
}
#[test]
fn test_bloom_filter_simple() {
// TODO: From here
@ -1210,27 +1335,28 @@ mod tests {
let b2a = canon_chain.with_bloom(bloom_ba.clone()).generate(&mut finalizer).unwrap();
let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path());
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
assert_eq!(blocks_b1, vec![]);
assert_eq!(blocks_b2, vec![]);
bc.insert_block(&b1, vec![]);
insert_block(&db, &bc, &b1, vec![]);
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
assert_eq!(blocks_b1, vec![1]);
assert_eq!(blocks_b2, vec![]);
bc.insert_block(&b2, vec![]);
insert_block(&db, &bc, &b2, vec![]);
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
assert_eq!(blocks_b1, vec![1]);
assert_eq!(blocks_b2, vec![2]);
// hasn't been forked yet
bc.insert_block(&b1a, vec![]);
insert_block(&db, &bc, &b1a, vec![]);
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5);
@ -1239,7 +1365,7 @@ mod tests {
assert_eq!(blocks_ba, vec![]);
// fork has happend
bc.insert_block(&b2a, vec![]);
insert_block(&db, &bc, &b2a, vec![]);
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5);
@ -1248,7 +1374,7 @@ mod tests {
assert_eq!(blocks_ba, vec![1, 2]);
// fork back
bc.insert_block(&b3, vec![]);
insert_block(&db, &bc, &b3, vec![]);
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5);
@ -1266,21 +1392,25 @@ mod tests {
let temp = RandomTempPath::new();
{
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path());
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let uncle = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
let batch = db.transaction();
// create a longer fork
for _ in 0..5 {
let canon_block = canon_chain.generate(&mut finalizer).unwrap();
bc.insert_block(&canon_block, vec![]);
bc.insert_block(&batch, &canon_block, vec![]);
}
assert_eq!(bc.best_block_number(), 5);
bc.insert_block(&uncle, vec![]);
bc.insert_block(&batch, &uncle, vec![]);
db.write(batch).unwrap();
}
// re-loading the blockchain should load the correct best block.
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path());
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
assert_eq!(bc.best_block_number(), 5);
}
@ -1296,10 +1426,13 @@ mod tests {
let second_hash = BlockView::new(&second).header_view().sha3();
let temp = RandomTempPath::new();
let bc = BlockChain::new(Config::default(), &genesis, temp.as_path());
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
bc.insert_block(&first, vec![]);
bc.insert_block(&second, vec![]);
let batch = db.transaction();
bc.insert_block(&batch, &first, vec![]);
bc.insert_block(&batch, &second, vec![]);
db.write(batch).unwrap();
assert_eq!(bc.rewind(), Some(first_hash.clone()));
assert!(!bc.is_known(&second_hash));

View File

@ -6,9 +6,11 @@ use blooms::BloomGroup;
use super::extras::{BlockDetails, BlockReceipts, TransactionAddress, LogGroupPosition};
/// Block extras update info.
pub struct ExtrasUpdate {
pub struct ExtrasUpdate<'a> {
/// Block info.
pub info: BlockInfo,
/// Current block uncompressed rlp bytes
pub block: &'a [u8],
/// Modified block hashes.
pub block_hashes: HashMap<BlockNumber, H256>,
/// Modified block details.

View File

@ -24,9 +24,9 @@ use std::time::{Instant};
use time::precise_time_ns;
// util
use util::{journaldb, rlp, Bytes, Stream, View, PerfTimer, Itertools, Mutex, RwLock};
use util::{journaldb, rlp, Bytes, View, PerfTimer, Itertools, Mutex, RwLock};
use util::journaldb::JournalDB;
use util::rlp::{RlpStream, Rlp, UntrustedRlp};
use util::rlp::{UntrustedRlp};
use util::numbers::*;
use util::panics::*;
use util::io::*;
@ -34,14 +34,13 @@ use util::sha3::*;
use util::kvdb::*;
// other
use views::BlockView;
use views::{BlockView, HeaderView, BodyView};
use error::{ImportError, ExecutionError, ReplayError, BlockError, ImportResult};
use header::BlockNumber;
use state::State;
use spec::Spec;
use basic_types::Seal;
use engines::Engine;
use views::HeaderView;
use service::ClientIoMessage;
use env_info::LastHashes;
use verification;
@ -123,6 +122,7 @@ pub struct Client {
chain: Arc<BlockChain>,
tracedb: Arc<TraceDB<BlockChain>>,
engine: Arc<Box<Engine>>,
db: Arc<Database>,
state_db: Mutex<Box<JournalDB>>,
block_queue: BlockQueue,
report: RwLock<ClientReport>,
@ -141,6 +141,19 @@ pub struct Client {
}
const HISTORY: u64 = 1200;
// database columns
/// Column for State
pub const DB_COL_STATE: Option<u32> = Some(0);
/// Column for Block headers
pub const DB_COL_HEADERS: Option<u32> = Some(1);
/// Column for Block bodies
pub const DB_COL_BODIES: Option<u32> = Some(2);
/// Column for Extras
pub const DB_COL_EXTRA: Option<u32> = Some(3);
/// Column for Traces
pub const DB_COL_TRACE: Option<u32> = Some(4);
/// Number of columns in DB
pub const DB_NO_OF_COLUMNS: Option<u32> = Some(5);
/// Append a path element to the given path and return the string.
pub fn append_path<P>(path: P, item: &str) -> String where P: AsRef<Path> {
@ -160,36 +173,25 @@ impl Client {
) -> Result<Arc<Client>, ClientError> {
let path = path.to_path_buf();
let gb = spec.genesis_block();
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path));
let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone())));
let mut db_config = DatabaseConfig::with_columns(DB_NO_OF_COLUMNS);
db_config.cache_size = config.db_cache_size;
db_config.compaction = config.db_compaction.compaction_profile();
let mut state_db_config = match config.db_cache_size {
None => DatabaseConfig::default(),
Some(cache_size) => DatabaseConfig::with_cache(cache_size),
};
state_db_config = state_db_config.compaction(config.db_compaction.compaction_profile());
let mut state_db = journaldb::new(
&append_path(&path, "state"),
config.pruning,
state_db_config
);
let db = Arc::new(Database::open(&db_config, &path.to_str().unwrap()).expect("Error opening database"));
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, db.clone()));
let tracedb = Arc::new(try!(TraceDB::new(config.tracing, db.clone(), chain.clone())));
let mut state_db = journaldb::new(db.clone(), config.pruning, DB_COL_STATE);
if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) {
state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
let batch = DBTransaction::new(&db);
state_db.commit(&batch, 0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
db.write(batch).expect("Error writing genesis state to state DB");
}
if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) {
warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex());
}
/* TODO: enable this once the best block issue is resolved
while !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) {
warn!("State root not found for block #{} ({}), recovering...", chain.best_block_number(), chain.best_block_hash().hex());
chain.rewind();
}*/
let engine = Arc::new(spec.engine);
let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone());
@ -204,6 +206,7 @@ impl Client {
chain: chain,
tracedb: tracedb,
engine: engine,
db: db,
state_db: Mutex::new(state_db),
block_queue: block_queue,
report: RwLock::new(Default::default()),
@ -432,21 +435,23 @@ impl Client {
//let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new));
let batch = DBTransaction::new(&self.db);
// CHECK! I *think* this is fine, even if the state_root is equal to another
// already-imported block of the same number.
// TODO: Prove it with a test.
block.drain().commit(number, hash, ancient).expect("State DB commit failed.");
block.drain().commit(&batch, number, hash, ancient).expect("State DB commit failed.");
// And update the chain after commit to prevent race conditions
// (when something is in chain but you are not able to fetch details)
let route = self.chain.insert_block(block_data, receipts);
self.tracedb.import(TraceImportRequest {
let route = self.chain.insert_block(&batch, block_data, receipts);
self.tracedb.import(&batch, TraceImportRequest {
traces: traces.into(),
block_hash: hash.clone(),
block_number: number,
enacted: route.enacted.clone(),
retracted: route.retracted.len()
});
// Final commit to the DB
self.db.write(batch).expect("State DB write failed.");
self.update_last_hashes(&parent, hash);
route
}
@ -674,17 +679,17 @@ impl BlockChainClient for Client {
fn replay(&self, id: TransactionID, analytics: CallAnalytics) -> Result<Executed, ReplayError> {
let address = try!(self.transaction_address(id).ok_or(ReplayError::TransactionNotFound));
let block_data = try!(self.block(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned));
let header_data = try!(self.block_header(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned));
let body_data = try!(self.block_body(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned));
let mut state = try!(self.state_at_beginning(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned));
let block = BlockView::new(&block_data);
let txs = block.transactions();
let txs = BodyView::new(&body_data).transactions();
if address.index >= txs.len() {
return Err(ReplayError::TransactionNotFound);
}
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
let view = block.header_view();
let view = HeaderView::new(&header_data);
let last_hashes = self.build_last_hashes(view.hash());
let mut env_info = EnvInfo {
number: view.number(),
@ -719,20 +724,16 @@ impl BlockChainClient for Client {
}
}
fn best_block_header(&self) -> Bytes {
self.chain.best_block_header()
}
fn block_header(&self, id: BlockID) -> Option<Bytes> {
Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec()))
Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_header_data(&hash))
}
fn block_body(&self, id: BlockID) -> Option<Bytes> {
Self::block_hash(&self.chain, id).and_then(|hash| {
self.chain.block(&hash).map(|bytes| {
let rlp = Rlp::new(&bytes);
let mut body = RlpStream::new_list(2);
body.append_raw(rlp.at(1).as_raw(), 1);
body.append_raw(rlp.at(2).as_raw(), 1);
body.out()
})
})
Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_body(&hash))
}
fn block(&self, id: BlockID) -> Option<Bytes> {
@ -789,13 +790,13 @@ impl BlockChainClient for Client {
fn uncle(&self, id: UncleID) -> Option<Bytes> {
let index = id.position;
self.block(id.block).and_then(|block| BlockView::new(&block).uncle_rlp_at(index))
self.block_body(id.block).and_then(|body| BodyView::new(&body).uncle_rlp_at(index))
}
fn transaction_receipt(&self, id: TransactionID) -> Option<LocalizedReceipt> {
self.transaction_address(id).and_then(|address| {
let t = self.chain.block(&address.block_hash)
.and_then(|block| BlockView::new(&block).localized_transaction_at(address.index));
self.transaction_address(id).and_then(|address| self.chain.block_number(&address.block_hash).and_then(|block_number| {
let t = self.chain.block_body(&address.block_hash)
.and_then(|block| BodyView::new(&block).localized_transaction_at(&address.block_hash, block_number, address.index));
match (t, self.chain.transaction_receipt(&address)) {
(Some(tx), Some(receipt)) => {
@ -834,7 +835,7 @@ impl BlockChainClient for Client {
},
_ => None
}
})
}))
}
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
@ -910,7 +911,7 @@ impl BlockChainClient for Client {
blocks.into_iter()
.filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash)))
.filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
.filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes())))
.filter_map(|(number, hash, receipts)| self.chain.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes())))
.flat_map(|(number, hash, receipts, hashes)| {
let mut log_index = 0;
receipts.into_iter()

View File

@ -248,7 +248,8 @@ impl TestBlockChainClient {
pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
let temp = RandomTempPath::new();
let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default());
let db = Database::open_default(temp.as_str()).unwrap();
let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, None);
GuardedTempResult {
_temp: temp,
result: Some(journal_db)
@ -363,6 +364,10 @@ impl BlockChainClient for TestBlockChainClient {
unimplemented!();
}
fn best_block_header(&self) -> Bytes {
self.block_header(BlockID::Hash(self.chain_info().best_block_hash)).expect("Best block always have header.")
}
fn block_header(&self, id: BlockID) -> Option<Bytes> {
self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec()))
}

View File

@ -145,10 +145,7 @@ pub trait BlockChainClient : Sync + Send {
fn chain_info(&self) -> BlockChainInfo;
/// Get the best block header.
fn best_block_header(&self) -> Bytes {
// TODO: lock blockchain only once
self.block_header(BlockID::Hash(self.chain_info().best_block_hash)).unwrap()
}
fn best_block_header(&self) -> Bytes;
/// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option<Vec<BlockNumber>>;

View File

@ -62,14 +62,14 @@ pub trait Key<T> {
/// Should be used to write value into database.
pub trait Writable {
/// Writes the value into the database.
fn write<T, R>(&self, key: &Key<T, Target = R>, value: &T) where T: Encodable, R: Deref<Target = [u8]>;
fn write<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: Encodable, R: Deref<Target = [u8]>;
/// Writes the value into the database and updates the cache.
fn write_with_cache<K, T, R>(&self, cache: &mut Cache<K, T>, key: K, value: T, policy: CacheUpdatePolicy) where
fn write_with_cache<K, T, R>(&self, col: Option<u32>, cache: &mut Cache<K, T>, key: K, value: T, policy: CacheUpdatePolicy) where
K: Key<T, Target = R> + Hash + Eq,
T: Encodable,
R: Deref<Target = [u8]> {
self.write(&key, &value);
self.write(col, &key, &value);
match policy {
CacheUpdatePolicy::Overwrite => {
cache.insert(key, value);
@ -81,20 +81,20 @@ pub trait Writable {
}
/// Writes the values into the database and updates the cache.
fn extend_with_cache<K, T, R>(&self, cache: &mut Cache<K, T>, values: HashMap<K, T>, policy: CacheUpdatePolicy) where
fn extend_with_cache<K, T, R>(&self, col: Option<u32>, cache: &mut Cache<K, T>, values: HashMap<K, T>, policy: CacheUpdatePolicy) where
K: Key<T, Target = R> + Hash + Eq,
T: Encodable,
R: Deref<Target = [u8]> {
match policy {
CacheUpdatePolicy::Overwrite => {
for (key, value) in values.into_iter() {
self.write(&key, &value);
self.write(col, &key, &value);
cache.insert(key, value);
}
},
CacheUpdatePolicy::Remove => {
for (key, value) in &values {
self.write(key, value);
self.write(col, key, value);
cache.remove(key);
}
},
@ -105,12 +105,12 @@ pub trait Writable {
/// Should be used to read values from database.
pub trait Readable {
/// Returns value for given key.
fn read<T, R>(&self, key: &Key<T, Target = R>) -> Option<T> where
fn read<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> Option<T> where
T: Decodable,
R: Deref<Target = [u8]>;
/// Returns value for given key either in cache or in database.
fn read_with_cache<K, T, C>(&self, cache: &RwLock<C>, key: &K) -> Option<T> where
fn read_with_cache<K, T, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> Option<T> where
K: Key<T> + Eq + Hash + Clone,
T: Clone + Decodable,
C: Cache<K, T> {
@ -121,7 +121,7 @@ pub trait Readable {
}
}
self.read(key).map(|value: T|{
self.read(col, key).map(|value: T|{
let mut write = cache.write();
write.insert(key.clone(), value.clone());
value
@ -129,10 +129,10 @@ pub trait Readable {
}
/// Returns true if given value exists.
fn exists<T, R>(&self, key: &Key<T, Target = R>) -> bool where R: Deref<Target= [u8]>;
fn exists<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> bool where R: Deref<Target= [u8]>;
/// Returns true if given value exists either in cache or in database.
fn exists_with_cache<K, T, R, C>(&self, cache: &RwLock<C>, key: &K) -> bool where
fn exists_with_cache<K, T, R, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> bool where
K: Eq + Hash + Key<T, Target = R>,
R: Deref<Target = [u8]>,
C: Cache<K, T> {
@ -143,13 +143,13 @@ pub trait Readable {
}
}
self.exists::<T, R>(key)
self.exists::<T, R>(col, key)
}
}
impl Writable for DBTransaction {
fn write<T, R>(&self, key: &Key<T, Target = R>, value: &T) where T: Encodable, R: Deref<Target = [u8]> {
let result = self.put(&key.key(), &encode(value));
fn write<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: Encodable, R: Deref<Target = [u8]> {
let result = self.put(col, &key.key(), &encode(value));
if let Err(err) = result {
panic!("db put failed, key: {:?}, err: {:?}", &key.key() as &[u8], err);
}
@ -157,8 +157,8 @@ impl Writable for DBTransaction {
}
impl Readable for Database {
fn read<T, R>(&self, key: &Key<T, Target = R>) -> Option<T> where T: Decodable, R: Deref<Target = [u8]> {
let result = self.get(&key.key());
fn read<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> Option<T> where T: Decodable, R: Deref<Target = [u8]> {
let result = self.get(col, &key.key());
match result {
Ok(option) => option.map(|v| decode(&v)),
@ -168,8 +168,8 @@ impl Readable for Database {
}
}
fn exists<T, R>(&self, key: &Key<T, Target = R>) -> bool where R: Deref<Target = [u8]> {
let result = self.get(&key.key());
fn exists<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) -> bool where R: Deref<Target = [u8]> {
let result = self.get(col, &key.key());
match result {
Ok(v) => v.is_some(),

View File

@ -38,7 +38,7 @@ struct CallCreate {
impl From<ethjson::vm::Call> for CallCreate {
fn from(c: ethjson::vm::Call) -> Self {
let dst: Option<_> = c.destination.into();
let dst: Option<ethjson::hash::Address> = c.destination.into();
CallCreate {
data: c.data.into(),
destination: dst.map(Into::into),

View File

@ -49,7 +49,7 @@ fn do_json_test(json_data: &[u8]) -> Vec<String> {
fail_unless(t.gas_price == tx.gas_price.into());
fail_unless(t.nonce == tx.nonce.into());
fail_unless(t.value == tx.value.into());
let to: Option<_> = tx.to.into();
let to: Option<ethjson::hash::Address> = tx.to.into();
let to: Option<Address> = to.map(Into::into);
match t.action {
Action::Call(dest) => fail_unless(Some(dest) == to),

View File

@ -16,19 +16,22 @@
//! This migration compresses the state db.
use util::migration::SimpleMigration;
use util::migration::{SimpleMigration, Progress};
use util::rlp::{Compressible, UntrustedRlp, View, RlpType};
/// Compressing migration.
#[derive(Default)]
pub struct V8;
pub struct V8(Progress);
impl SimpleMigration for V8 {
fn version(&self) -> u32 {
8
}
fn columns(&self) -> Option<u32> { None }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
self.0.tick();
Some((key,UntrustedRlp::new(&value).compress(RlpType::Blocks).to_vec()))
}
}

View File

@ -34,9 +34,10 @@ impl ToV6 {
}
impl SimpleMigration for ToV6 {
fn version(&self) -> u32 {
6
}
fn columns(&self) -> Option<u32> { None }
fn version(&self) -> u32 { 6 }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {

View File

@ -19,3 +19,7 @@
pub mod state;
pub mod blocks;
pub mod extras;
mod v9;
pub use self::v9::ToV9;
pub use self::v9::Extract;

View File

@ -22,7 +22,7 @@ use std::collections::HashMap;
use util::Bytes;
use util::hash::{Address, FixedHash, H256};
use util::kvdb::Database;
use util::migration::{Batch, Config, Error, Migration, SimpleMigration};
use util::migration::{Batch, Config, Error, Migration, SimpleMigration, Progress};
use util::rlp::{decode, Rlp, RlpStream, Stream, View};
use util::sha3::Hashable;
@ -63,19 +63,16 @@ fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Option<H256> {
/// Version for `ArchiveDB`.
#[derive(Default)]
pub struct ArchiveV7(usize);
pub struct ArchiveV7(Progress);
impl SimpleMigration for ArchiveV7 {
fn version(&self) -> u32 {
7
}
fn columns(&self) -> Option<u32> { None }
fn version(&self) -> u32 { 7 }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
self.0 += 1;
if self.0 == 100_000 {
self.0 = 0;
flush!(".");
}
self.0.tick();
if key.len() != 32 {
// metadata key, ignore.
@ -109,7 +106,7 @@ impl OverlayRecentV7 {
// walk all journal entries in the database backwards.
// find migrations for any possible inserted keys.
fn walk_journal(&mut self, source: &Database) -> Result<(), Error> {
if let Some(val) = try!(source.get(V7_LATEST_ERA_KEY).map_err(Error::Custom)) {
if let Some(val) = try!(source.get(None, V7_LATEST_ERA_KEY).map_err(Error::Custom)) {
let mut era = decode::<u64>(&val);
loop {
let mut index: usize = 0;
@ -120,7 +117,7 @@ impl OverlayRecentV7 {
r.out()
};
if let Some(journal_raw) = try!(source.get(&entry_key).map_err(Error::Custom)) {
if let Some(journal_raw) = try!(source.get(None, &entry_key).map_err(Error::Custom)) {
let rlp = Rlp::new(&journal_raw);
// migrate all inserted keys.
@ -153,7 +150,7 @@ impl OverlayRecentV7 {
// replace all possible inserted/deleted keys with their migrated counterparts
// and commit the altered entries.
fn migrate_journal(&self, source: &Database, mut batch: Batch, dest: &mut Database) -> Result<(), Error> {
if let Some(val) = try!(source.get(V7_LATEST_ERA_KEY).map_err(Error::Custom)) {
if let Some(val) = try!(source.get(None, V7_LATEST_ERA_KEY).map_err(Error::Custom)) {
try!(batch.insert(V7_LATEST_ERA_KEY.into(), val.to_owned(), dest));
let mut era = decode::<u64>(&val);
@ -166,7 +163,7 @@ impl OverlayRecentV7 {
r.out()
};
if let Some(journal_raw) = try!(source.get(&entry_key).map_err(Error::Custom)) {
if let Some(journal_raw) = try!(source.get(None, &entry_key).map_err(Error::Custom)) {
let rlp = Rlp::new(&journal_raw);
let id: H256 = rlp.val_at(0);
let mut inserted_keys: Vec<(H256, Bytes)> = Vec::new();
@ -221,22 +218,25 @@ impl OverlayRecentV7 {
}
impl Migration for OverlayRecentV7 {
fn columns(&self) -> Option<u32> { None }
fn version(&self) -> u32 { 7 }
// walk all records in the database, attempting to migrate any possible and
// keeping records of those that we do. then migrate the journal using
// this information.
fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database) -> Result<(), Error> {
let mut batch = Batch::new(config);
fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> {
let mut batch = Batch::new(config, col);
// check version metadata.
match try!(source.get(V7_VERSION_KEY).map_err(Error::Custom)) {
match try!(source.get(None, V7_VERSION_KEY).map_err(Error::Custom)) {
Some(ref version) if decode::<u32>(&*version) == DB_VERSION => {}
_ => return Err(Error::MigrationImpossible), // missing or wrong version
}
let mut count = 0;
for (key, value) in source.iter() {
for (key, value) in source.iter(None) {
count += 1;
if count == 100_000 {
count = 0;

View File

@ -0,0 +1,82 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! This migration consolidates all databases into single one using Column Families.
use util::{Rlp, RlpStream, View, Stream};
use util::kvdb::Database;
use util::migration::{Batch, Config, Error, Migration, Progress};
/// Which part of block to preserve
pub enum Extract {
/// Extract block header RLP.
Header,
/// Extract block body RLP.
Body,
/// Don't change the value.
All,
}
/// Consolidation of extras/block/state databases into single one.
pub struct ToV9 {
progress: Progress,
column: Option<u32>,
extract: Extract,
}
impl ToV9 {
/// Creates new V9 migration and assigns all `(key,value)` pairs from `source` DB to given Column Family
pub fn new(column: Option<u32>, extract: Extract) -> Self {
ToV9 {
progress: Progress::default(),
column: column,
extract: extract,
}
}
}
impl Migration for ToV9 {
fn columns(&self) -> Option<u32> { Some(5) }
fn version(&self) -> u32 { 9 }
fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> {
let mut batch = Batch::new(config, self.column);
for (key, value) in source.iter(col) {
self.progress.tick();
match self.extract {
Extract::Header => {
try!(batch.insert(key.to_vec(), Rlp::new(&value).at(0).as_raw().to_vec(), dest))
},
Extract::Body => {
let mut body = RlpStream::new_list(2);
let block_rlp = Rlp::new(&value);
body.append_raw(block_rlp.at(1).as_raw(), 1);
body.append_raw(block_rlp.at(2).as_raw(), 1);
try!(batch.insert(key.to_vec(), body.out(), dest))
},
Extract::All => {
try!(batch.insert(key.to_vec(), value.to_vec(), dest))
}
}
}
batch.commit(dest)
}
}

View File

@ -27,7 +27,8 @@ use error::Error;
use ids::BlockID;
use views::{BlockView, HeaderView};
use util::{Bytes, Hashable, HashDB, JournalDB, snappy, TrieDB, TrieDBMut, TrieMut};
use util::{Bytes, Hashable, HashDB, JournalDB, snappy, TrieDB, TrieDBMut, TrieMut, DBTransaction};
use util::error::UtilError;
use util::hash::{FixedHash, H256};
use util::rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType};
@ -359,7 +360,9 @@ impl StateRebuilder {
try!(rebuild_account_trie(db.as_hashdb_mut(), account_chunk, out_pairs_chunk));
// commit the db changes we made in this thread.
try!(db.commit(0, &H256::zero(), None));
let batch = DBTransaction::new(&db.backing());
try!(db.commit(&batch, 0, &H256::zero(), None));
try!(db.backing().write(batch).map_err(UtilError::SimpleString));
Ok(())
});
@ -388,7 +391,9 @@ impl StateRebuilder {
}
}
try!(self.db.commit(0, &H256::zero(), None));
let batch = DBTransaction::new(&self.db.backing());
try!(self.db.commit(&batch, 0, &H256::zero(), None));
try!(self.db.backing().write(batch).map_err(|e| Error::Util(e.into())));
Ok(())
}

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use client::{BlockChainClient, Client, ClientConfig};
use client::{self, BlockChainClient, Client, ClientConfig};
use common::*;
use spec::*;
use block::{OpenBlock, Drain};
@ -246,12 +246,23 @@ pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> GuardedTempResult<Arc<
}
}
fn new_db(path: &str) -> Arc<Database> {
Arc::new(
Database::open(&DatabaseConfig::with_columns(client::DB_NO_OF_COLUMNS), path)
.expect("Opening database for tests should always work.")
)
}
pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockChain> {
let temp = RandomTempPath::new();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path());
let db = new_db(temp.as_str());
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let batch = db.transaction();
for block_order in 1..block_number {
bc.insert_block(&create_unverifiable_block(block_order, bc.best_block_hash()), vec![]);
bc.insert_block(&batch, &create_unverifiable_block(block_order, bc.best_block_hash()), vec![]);
}
db.write(batch).unwrap();
GuardedTempResult::<BlockChain> {
_temp: temp,
@ -261,10 +272,15 @@ pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockCh
pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempResult<BlockChain> {
let temp = RandomTempPath::new();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path());
let db = new_db(temp.as_str());
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let batch = db.transaction();
for block_order in 1..block_number {
bc.insert_block(&create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]);
bc.insert_block(&batch, &create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]);
}
db.write(batch).unwrap();
GuardedTempResult::<BlockChain> {
_temp: temp,
@ -274,7 +290,8 @@ pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempRes
pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> {
let temp = RandomTempPath::new();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path());
let db = new_db(temp.as_str());
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
GuardedTempResult::<BlockChain> {
_temp: temp,
@ -284,7 +301,8 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> {
pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
let temp = RandomTempPath::new();
let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default());
let journal_db = get_temp_journal_db_in(temp.as_path());
GuardedTempResult {
_temp: temp,
result: Some(journal_db)
@ -294,6 +312,7 @@ pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
pub fn get_temp_state() -> GuardedTempResult<State> {
let temp = RandomTempPath::new();
let journal_db = get_temp_journal_db_in(temp.as_path());
GuardedTempResult {
_temp: temp,
result: Some(State::new(journal_db, U256::from(0), Default::default())),
@ -301,7 +320,8 @@ pub fn get_temp_state() -> GuardedTempResult<State> {
}
pub fn get_temp_journal_db_in(path: &Path) -> Box<JournalDB> {
journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default())
let db = new_db(path.to_str().expect("Only valid utf8 paths for tests."));
journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, None)
}
pub fn get_temp_state_in(path: &Path) -> State {

View File

@ -18,15 +18,15 @@
use std::ops::{Deref, DerefMut};
use std::collections::HashMap;
use std::sync::Arc;
use std::path::Path;
use bloomchain::{Number, Config as BloomConfig};
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
use util::{H256, H264, Database, DatabaseConfig, DBTransaction, RwLock};
use util::{H256, H264, Database, DBTransaction, RwLock};
use header::BlockNumber;
use trace::{LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras, Error};
use db::{Key, Writable, Readable, CacheUpdatePolicy};
use blooms;
use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
use client::DB_COL_TRACE;
const TRACE_DB_VER: &'static [u8] = b"1.0";
@ -94,7 +94,7 @@ pub struct TraceDB<T> where T: DatabaseExtras {
traces: RwLock<HashMap<H256, FlatBlockTraces>>,
blooms: RwLock<HashMap<TraceGroupPosition, blooms::BloomGroup>>,
// db
tracesdb: Database,
tracesdb: Arc<Database>,
// config,
bloom_config: BloomConfig,
// tracing enabled
@ -106,24 +106,15 @@ pub struct TraceDB<T> where T: DatabaseExtras {
impl<T> BloomGroupDatabase for TraceDB<T> where T: DatabaseExtras {
fn blooms_at(&self, position: &GroupPosition) -> Option<BloomGroup> {
let position = TraceGroupPosition::from(position.clone());
self.tracesdb.read_with_cache(&self.blooms, &position).map(Into::into)
self.tracesdb.read_with_cache(DB_COL_TRACE, &self.blooms, &position).map(Into::into)
}
}
impl<T> TraceDB<T> where T: DatabaseExtras {
/// Creates new instance of `TraceDB`.
pub fn new(config: Config, path: &Path, extras: Arc<T>) -> Result<Self, Error> {
let mut tracedb_path = path.to_path_buf();
tracedb_path.push("tracedb");
let tracesdb = match config.db_cache_size {
None => Database::open_default(tracedb_path.to_str().unwrap()).unwrap(),
Some(db_cache) => Database::open(
&DatabaseConfig::with_cache(db_cache),
tracedb_path.to_str().unwrap()).unwrap(),
};
pub fn new(config: Config, tracesdb: Arc<Database>, extras: Arc<T>) -> Result<Self, Error> {
// check if in previously tracing was enabled
let old_tracing = match tracesdb.get(b"enabled").unwrap() {
let old_tracing = match tracesdb.get(DB_COL_TRACE, b"enabled").unwrap() {
Some(ref value) if value as &[u8] == &[0x1] => Switch::On,
Some(ref value) if value as &[u8] == &[0x0] => Switch::Off,
Some(_) => { panic!("tracesdb is corrupted") },
@ -137,8 +128,10 @@ impl<T> TraceDB<T> where T: DatabaseExtras {
false => [0x0]
};
tracesdb.put(b"enabled", &encoded_tracing).unwrap();
tracesdb.put(b"version", TRACE_DB_VER).unwrap();
let batch = DBTransaction::new(&tracesdb);
batch.put(DB_COL_TRACE, b"enabled", &encoded_tracing).unwrap();
batch.put(DB_COL_TRACE, b"version", TRACE_DB_VER).unwrap();
tracesdb.write(batch).unwrap();
let db = TraceDB {
traces: RwLock::new(HashMap::new()),
@ -154,7 +147,7 @@ impl<T> TraceDB<T> where T: DatabaseExtras {
/// Returns traces for block with hash.
fn traces(&self, block_hash: &H256) -> Option<FlatBlockTraces> {
self.tracesdb.read_with_cache(&self.traces, block_hash)
self.tracesdb.read_with_cache(DB_COL_TRACE, &self.traces, block_hash)
}
/// Returns vector of transaction traces for given block.
@ -217,20 +210,18 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
/// Traces of import request's enacted blocks are expected to be already in database
/// or to be the currently inserted trace.
fn import(&self, request: ImportRequest) {
fn import(&self, batch: &DBTransaction, request: ImportRequest) {
// fast return if tracing is disabled
if !self.tracing_enabled() {
return;
}
let batch = DBTransaction::new();
// at first, let's insert new block traces
{
let mut traces = self.traces.write();
// it's important to use overwrite here,
// cause this value might be queried by hash later
batch.write_with_cache(traces.deref_mut(), request.block_hash, request.traces, CacheUpdatePolicy::Overwrite);
batch.write_with_cache(DB_COL_TRACE, traces.deref_mut(), request.block_hash, request.traces, CacheUpdatePolicy::Overwrite);
}
// now let's rebuild the blooms
@ -256,10 +247,8 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
.collect::<HashMap<TraceGroupPosition, blooms::BloomGroup>>();
let mut blooms = self.blooms.write();
batch.extend_with_cache(blooms.deref_mut(), blooms_to_insert, CacheUpdatePolicy::Remove);
batch.extend_with_cache(DB_COL_TRACE, blooms.deref_mut(), blooms_to_insert, CacheUpdatePolicy::Remove);
}
self.tracesdb.write(batch).unwrap();
}
fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec<usize>) -> Option<LocalizedTrace> {
@ -362,13 +351,14 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
mod tests {
use std::collections::HashMap;
use std::sync::Arc;
use util::{Address, U256, H256};
use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction};
use devtools::RandomTempPath;
use header::BlockNumber;
use trace::{Config, Switch, TraceDB, Database, DatabaseExtras, ImportRequest};
use trace::{Config, Switch, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest};
use trace::{Filter, LocalizedTrace, AddressesFilter};
use trace::trace::{Call, Action, Res};
use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
use client::DB_NO_OF_COLUMNS;
use types::executed::CallType;
struct NoopExtras;
@ -408,28 +398,33 @@ mod tests {
}
}
fn new_db(path: &str) -> Arc<Database> {
Arc::new(Database::open(&DatabaseConfig::with_columns(DB_NO_OF_COLUMNS), path).unwrap())
}
#[test]
fn test_reopening_db_with_tracing_off() {
let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let mut config = Config::default();
// set autotracing
config.enabled = Switch::Auto;
{
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false);
}
{
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false);
}
config.enabled = Switch::Off;
{
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false);
}
}
@ -437,32 +432,33 @@ mod tests {
#[test]
fn test_reopening_db_with_tracing_on() {
let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let mut config = Config::default();
// set tracing on
config.enabled = Switch::On;
{
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
{
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
config.enabled = Switch::Auto;
{
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
config.enabled = Switch::Off;
{
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false);
}
}
@ -471,18 +467,19 @@ mod tests {
#[should_panic]
fn test_invalid_reopening_db() {
let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let mut config = Config::default();
// set tracing on
config.enabled = Switch::Off;
{
let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
config.enabled = Switch::On;
TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); // should panic!
TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); // should panic!
}
fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest {
@ -531,6 +528,7 @@ mod tests {
#[test]
fn test_import() {
let temp = RandomTempPath::new();
let db = Arc::new(Database::open(&DatabaseConfig::with_columns(DB_NO_OF_COLUMNS), temp.as_str()).unwrap());
let mut config = Config::default();
config.enabled = Switch::On;
let block_0 = H256::from(0xa1);
@ -544,11 +542,13 @@ mod tests {
extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
extras.transaction_hashes.insert(1, vec![tx_1.clone()]);
let tracedb = TraceDB::new(config, temp.as_path(), Arc::new(extras)).unwrap();
let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)).unwrap();
// import block 0
let request = create_simple_import_request(0, block_0.clone());
tracedb.import(request);
let batch = DBTransaction::new(&db);
tracedb.import(&batch, request);
db.write(batch).unwrap();
let filter = Filter {
range: (0..0),
@ -562,7 +562,9 @@ mod tests {
// import block 1
let request = create_simple_import_request(1, block_1.clone());
tracedb.import(request);
let batch = DBTransaction::new(&db);
tracedb.import(&batch, request);
db.write(batch).unwrap();
let filter = Filter {
range: (0..1),

View File

@ -35,7 +35,7 @@ pub use self::executive_tracer::{ExecutiveTracer, ExecutiveVMTracer};
pub use types::trace_types::filter::{Filter, AddressesFilter};
pub use self::import::ImportRequest;
pub use self::localized::LocalizedTrace;
use util::{Bytes, Address, U256, H256};
use util::{Bytes, Address, U256, H256, DBTransaction};
use self::trace::{Call, Create};
use action_params::ActionParams;
use header::BlockNumber;
@ -121,7 +121,7 @@ pub trait Database {
fn tracing_enabled(&self) -> bool;
/// Imports new block traces.
fn import(&self, request: ImportRequest);
fn import(&self, batch: &DBTransaction, request: ImportRequest);
/// Returns localized trace at given position.
fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec<usize>) -> Option<LocalizedTrace>;

View File

@ -287,6 +287,14 @@ mod tests {
self.blocks.get(hash).cloned()
}
fn block_header_data(&self, hash: &H256) -> Option<Bytes> {
self.block(hash).map(|b| BlockView::new(&b).header_rlp().as_raw().to_vec())
}
fn block_body(&self, hash: &H256) -> Option<Bytes> {
self.block(hash).map(|b| BlockChain::block_to_body(&b))
}
/// Get the familial details concerning a block.
fn block_details(&self, hash: &H256) -> Option<BlockDetails> {
self.blocks.get(hash).map(|bytes| {

View File

@ -56,6 +56,11 @@ impl<'a> BlockView<'a> {
self.rlp.val_at(0)
}
/// Return header rlp.
pub fn header_rlp(&self) -> Rlp {
self.rlp.at(0)
}
/// Create new header view obto block head rlp.
pub fn header_view(&self) -> HeaderView<'a> {
HeaderView::new_from_rlp(self.rlp.at(0))

144
ethcore/src/views/body.rs Normal file
View File

@ -0,0 +1,144 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! View onto block body rlp.
use util::*;
use header::*;
use transaction::*;
use super::{TransactionView, HeaderView};
/// View onto block rlp.
pub struct BodyView<'a> {
rlp: Rlp<'a>
}
impl<'a> BodyView<'a> {
/// Creates new view onto block from raw bytes.
pub fn new(bytes: &'a [u8]) -> BodyView<'a> {
BodyView {
rlp: Rlp::new(bytes)
}
}
/// Creates new view onto block from rlp.
pub fn new_from_rlp(rlp: Rlp<'a>) -> BodyView<'a> {
BodyView {
rlp: rlp
}
}
/// Return reference to underlaying rlp.
pub fn rlp(&self) -> &Rlp<'a> {
&self.rlp
}
/// Return List of transactions in given block.
pub fn transactions(&self) -> Vec<SignedTransaction> {
self.rlp.val_at(0)
}
/// Return List of transactions with additional localization info.
pub fn localized_transactions(&self, block_hash: &H256, block_number: BlockNumber) -> Vec<LocalizedTransaction> {
self.transactions()
.into_iter()
.enumerate()
.map(|(i, t)| LocalizedTransaction {
signed: t,
block_hash: block_hash.clone(),
block_number: block_number,
transaction_index: i
}).collect()
}
/// Return number of transactions in given block, without deserializing them.
pub fn transactions_count(&self) -> usize {
self.rlp.at(0).item_count()
}
/// Return List of transactions in given block.
pub fn transaction_views(&self) -> Vec<TransactionView> {
self.rlp.at(0).iter().map(TransactionView::new_from_rlp).collect()
}
/// Return transaction hashes.
pub fn transaction_hashes(&self) -> Vec<H256> {
self.rlp.at(0).iter().map(|rlp| rlp.as_raw().sha3()).collect()
}
/// Returns transaction at given index without deserializing unnecessary data.
pub fn transaction_at(&self, index: usize) -> Option<SignedTransaction> {
self.rlp.at(0).iter().nth(index).map(|rlp| rlp.as_val())
}
/// Returns localized transaction at given index.
pub fn localized_transaction_at(&self, block_hash: &H256, block_number: BlockNumber, index: usize) -> Option<LocalizedTransaction> {
self.transaction_at(index).map(|t| LocalizedTransaction {
signed: t,
block_hash: block_hash.clone(),
block_number: block_number,
transaction_index: index
})
}
/// Return list of uncles of given block.
pub fn uncles(&self) -> Vec<Header> {
self.rlp.val_at(1)
}
/// Return number of uncles in given block, without deserializing them.
pub fn uncles_count(&self) -> usize {
self.rlp.at(1).item_count()
}
/// Return List of transactions in given block.
pub fn uncle_views(&self) -> Vec<HeaderView> {
self.rlp.at(1).iter().map(HeaderView::new_from_rlp).collect()
}
/// Return list of uncle hashes of given block.
pub fn uncle_hashes(&self) -> Vec<H256> {
self.rlp.at(1).iter().map(|rlp| rlp.as_raw().sha3()).collect()
}
/// Return nth uncle.
pub fn uncle_at(&self, index: usize) -> Option<Header> {
self.rlp.at(1).iter().nth(index).map(|rlp| rlp.as_val())
}
/// Return nth uncle rlp.
pub fn uncle_rlp_at(&self, index: usize) -> Option<Bytes> {
self.rlp.at(1).iter().nth(index).map(|rlp| rlp.as_raw().to_vec())
}
}
#[cfg(test)]
mod tests {
use util::*;
use super::BodyView;
use blockchain::BlockChain;
#[test]
fn test_block_view() {
// that's rlp of block created with ethash engine.
let rlp = "f90261f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23f862f86002018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d01801ba03a347e72953c860f32b1eb2c78a680d8734b2ea08085d949d729479796f218d5a047ea6239d9e31ccac8af3366f5ca37184d26e7646e3191a3aeb81c4cf74de500c0".from_hex().unwrap();
let body = BlockChain::block_to_body(&rlp);
let view = BodyView::new(&body);
assert_eq!(view.transactions_count(), 1);
assert_eq!(view.uncles_count(), 0);
}
}

View File

@ -19,7 +19,9 @@
mod block;
mod header;
mod transaction;
mod body;
pub use self::block::BlockView;
pub use self::header::HeaderView;
pub use self::body::BodyView;
pub use self::transaction::TransactionView;

25
evmbin/Cargo.lock generated
View File

@ -159,6 +159,7 @@ name = "ethash"
version = "1.3.0"
dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"sha3 0.1.0",
]
@ -250,8 +251,9 @@ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)",
"nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)",
"rocksdb 0.4.5",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
@ -301,6 +303,7 @@ dependencies = [
"serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -622,6 +625,17 @@ name = "odds"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "parking_lot"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "primal"
version = "0.2.3"
@ -724,16 +738,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rocksdb"
version = "0.4.5"
source = "git+https://github.com/ethcore/rust-rocksdb#9be41e05923616dfa28741c58b22776d479751e6"
dependencies = [
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)",
"rocksdb-sys 0.3.0",
]
[[package]]
name = "rocksdb-sys"
version = "0.3.0"
source = "git+https://github.com/ethcore/rust-rocksdb#9be41e05923616dfa28741c58b22776d479751e6"
dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
@ -831,6 +843,11 @@ name = "slab"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "smallvec"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "solicit"
version = "0.4.4"

View File

@ -52,13 +52,20 @@ impl Directories {
Ok(())
}
/// Get the path for the databases given the root path and information on the databases.
pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
/// Get the root path for database
pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
let mut dir = Path::new(&self.db).to_path_buf();
dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default()));
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
dir
}
/// Get the path for the databases given the genesis_hash and information on the databases.
pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
let mut dir = self.db_version_path(genesis_hash, fork_name, pruning);
dir.push("db");
dir
}
}
#[cfg(test)]

View File

@ -238,7 +238,7 @@ pub fn execute_upgrades(
_ => {},
}
let client_path = dirs.client_path(genesis_hash, fork_name, pruning);
let client_path = dirs.db_version_path(genesis_hash, fork_name, pruning);
migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e))
}

View File

@ -20,14 +20,18 @@ use std::io::{Read, Write, Error as IoError, ErrorKind};
use std::path::{Path, PathBuf};
use std::fmt::{Display, Formatter, Error as FmtError};
use util::journaldb::Algorithm;
use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError};
use util::kvdb::CompactionProfile;
use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError, Migration};
use util::kvdb::{CompactionProfile, Database, DatabaseConfig};
use ethcore::migrations;
use ethcore::client;
use ethcore::migrations::Extract;
/// Database is assumed to be at default version, when no version file is found.
const DEFAULT_VERSION: u32 = 5;
/// Current version of database models.
const CURRENT_VERSION: u32 = 8;
const CURRENT_VERSION: u32 = 9;
/// First version of the consolidated database.
const CONSOLIDATION_VERSION: u32 = 9;
/// Defines how many items are migrated to the new version of database at once.
const BATCH_SIZE: usize = 1024;
/// Version file name.
@ -111,27 +115,13 @@ fn update_version(path: &Path) -> Result<(), Error> {
Ok(())
}
/// State database path.
fn state_database_path(path: &Path) -> PathBuf {
/// Consolidated database path
fn consolidated_database_path(path: &Path) -> PathBuf {
let mut state_path = path.to_owned();
state_path.push("state");
state_path.push("db");
state_path
}
/// Blocks database path.
fn blocks_database_path(path: &Path) -> PathBuf {
let mut blocks_path = path.to_owned();
blocks_path.push("blocks");
blocks_path
}
/// Extras database path.
fn extras_database_path(path: &Path) -> PathBuf {
let mut extras_path = path.to_owned();
extras_path.push("extras");
extras_path
}
/// Database backup
fn backup_database_path(path: &Path) -> PathBuf {
let mut backup_path = path.to_owned();
@ -141,40 +131,55 @@ fn backup_database_path(path: &Path) -> PathBuf {
}
/// Default migration settings.
fn default_migration_settings(compaction_profile: CompactionProfile) -> MigrationConfig {
pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> MigrationConfig {
MigrationConfig {
batch_size: BATCH_SIZE,
compaction_profile: compaction_profile,
compaction_profile: *compaction_profile,
}
}
/// Migrations on the blocks database.
fn blocks_database_migrations(compaction_profile: CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
try!(manager.add_migration(migrations::blocks::V8::default()).map_err(|_| Error::MigrationImpossible));
/// Migrations on the consolidated database.
fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let manager = MigrationManager::new(default_migration_settings(compaction_profile));
Ok(manager)
}
/// Migrations on the extras database.
fn extras_database_migrations(compaction_profile: CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
try!(manager.add_migration(migrations::extras::ToV6).map_err(|_| Error::MigrationImpossible));
Ok(manager)
}
/// Consolidates legacy databases into single one.
fn consolidate_database(
old_db_path: PathBuf,
new_db_path: PathBuf,
column: Option<u32>,
extract: Extract,
compaction_profile: &CompactionProfile) -> Result<(), Error> {
fn db_error(e: String) -> Error {
warn!("Cannot open Database for consolidation: {:?}", e);
Error::MigrationFailed
}
/// Migrations on the state database.
fn state_database_migrations(pruning: Algorithm, compaction_profile: CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
let res = match pruning {
Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()),
Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()),
_ => return Err(Error::UnsuportedPruningMethod),
let mut migration = migrations::ToV9::new(column, extract);
let config = default_migration_settings(compaction_profile);
let mut db_config = DatabaseConfig {
max_open_files: 64,
cache_size: None,
compaction: config.compaction_profile.clone(),
columns: None,
};
try!(res.map_err(|_| Error::MigrationImpossible));
Ok(manager)
let old_path_str = try!(old_db_path.to_str().ok_or(Error::MigrationImpossible));
let new_path_str = try!(new_db_path.to_str().ok_or(Error::MigrationImpossible));
let cur_db = try!(Database::open(&db_config, old_path_str).map_err(db_error));
// open new DB with proper number of columns
db_config.columns = migration.columns();
let mut new_db = try!(Database::open(&db_config, new_path_str).map_err(db_error));
// Migrate to new database (default column only)
try!(migration.migrate(&cur_db, &config, &mut new_db, None));
Ok(())
}
/// Migrates database at given position with given migration rules.
fn migrate_database(version: u32, db_path: PathBuf, mut migrations: MigrationManager) -> Result<(), Error> {
// check if migration is needed
@ -216,17 +221,108 @@ pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionPr
// migrate the databases.
// main db directory may already exists, so let's check if we have blocks dir
if version < CURRENT_VERSION && exists(&blocks_database_path(path)) {
println!("Migrating database from version {} to {}", version, CURRENT_VERSION);
try!(migrate_database(version, blocks_database_path(path), try!(blocks_database_migrations(compaction_profile.clone()))));
try!(migrate_database(version, extras_database_path(path), try!(extras_database_migrations(compaction_profile.clone()))));
try!(migrate_database(version, state_database_path(path), try!(state_database_migrations(pruning, compaction_profile))));
println!("Migration finished");
} else if version > CURRENT_VERSION {
if version > CURRENT_VERSION {
return Err(Error::FutureDBVersion);
}
// We are in the latest version, yay!
if version == CURRENT_VERSION {
return Ok(())
}
// Perform pre-consolidation migrations
if version < CONSOLIDATION_VERSION && exists(&legacy::blocks_database_path(path)) {
println!("Migrating database from version {} to {}", version, CONSOLIDATION_VERSION);
try!(migrate_database(version, legacy::blocks_database_path(path), try!(legacy::blocks_database_migrations(&compaction_profile))));
try!(migrate_database(version, legacy::extras_database_path(path), try!(legacy::extras_database_migrations(&compaction_profile))));
try!(migrate_database(version, legacy::state_database_path(path), try!(legacy::state_database_migrations(pruning, &compaction_profile))));
let db_path = consolidated_database_path(path);
// Remove the database dir (it shouldn't exist anyway, but it might when migration was interrupted)
let _ = fs::remove_dir_all(db_path.clone());
try!(consolidate_database(legacy::blocks_database_path(path), db_path.clone(), client::DB_COL_HEADERS, Extract::Header, &compaction_profile));
try!(consolidate_database(legacy::blocks_database_path(path), db_path.clone(), client::DB_COL_BODIES, Extract::Header, &compaction_profile));
try!(consolidate_database(legacy::extras_database_path(path), db_path.clone(), client::DB_COL_EXTRA, Extract::All, &compaction_profile));
try!(consolidate_database(legacy::state_database_path(path), db_path.clone(), client::DB_COL_STATE, Extract::All, &compaction_profile));
try!(consolidate_database(legacy::trace_database_path(path), db_path.clone(), client::DB_COL_TRACE, Extract::All, &compaction_profile));
let _ = fs::remove_dir_all(legacy::blocks_database_path(path));
let _ = fs::remove_dir_all(legacy::extras_database_path(path));
let _ = fs::remove_dir_all(legacy::state_database_path(path));
let _ = fs::remove_dir_all(legacy::trace_database_path(path));
println!("Migration finished");
}
// Further migrations
if version >= CONSOLIDATION_VERSION && version < CURRENT_VERSION && exists(&consolidated_database_path(path)) {
println!("Migrating database from version {} to {}", ::std::cmp::max(CONSOLIDATION_VERSION, version), CURRENT_VERSION);
try!(migrate_database(version, consolidated_database_path(path), try!(consolidated_database_migrations(&compaction_profile))));
println!("Migration finished");
}
// update version file.
update_version(path)
}
/// Old migrations utilities
mod legacy {
use super::*;
use std::path::{Path, PathBuf};
use util::journaldb::Algorithm;
use util::migration::{Manager as MigrationManager};
use util::kvdb::CompactionProfile;
use ethcore::migrations;
/// Blocks database path.
pub fn blocks_database_path(path: &Path) -> PathBuf {
let mut blocks_path = path.to_owned();
blocks_path.push("blocks");
blocks_path
}
/// Extras database path.
pub fn extras_database_path(path: &Path) -> PathBuf {
let mut extras_path = path.to_owned();
extras_path.push("extras");
extras_path
}
/// State database path.
pub fn state_database_path(path: &Path) -> PathBuf {
let mut state_path = path.to_owned();
state_path.push("state");
state_path
}
/// Trace database path.
pub fn trace_database_path(path: &Path) -> PathBuf {
let mut blocks_path = path.to_owned();
blocks_path.push("tracedb");
blocks_path
}
/// Migrations on the blocks database.
pub fn blocks_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
try!(manager.add_migration(migrations::blocks::V8::default()).map_err(|_| Error::MigrationImpossible));
Ok(manager)
}
/// Migrations on the extras database.
pub fn extras_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
try!(manager.add_migration(migrations::extras::ToV6).map_err(|_| Error::MigrationImpossible));
Ok(manager)
}
/// Migrations on the state database.
pub fn state_database_migrations(pruning: Algorithm, compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
let res = match pruning {
Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()),
Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()),
_ => return Err(Error::UnsuportedPruningMethod),
};
try!(res.map_err(|_| Error::MigrationImpossible));
Ok(manager)
}
}

View File

@ -15,9 +15,11 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use util::{contents, DatabaseConfig, journaldb, H256, Address, U256, version_data};
use util::{contents, Database, DatabaseConfig, journaldb, H256, Address, U256, version_data};
use util::journaldb::Algorithm;
use ethcore::client;
use ethcore::spec::Spec;
use ethcore::ethereum;
use ethcore::miner::{GasPricer, GasPriceCalibratorOptions};
@ -103,11 +105,15 @@ impl Pruning {
algo_types.push(Algorithm::default());
algo_types.into_iter().max_by_key(|i| {
let mut client_path = dirs.client_path(genesis_hash, fork_name, *i);
client_path.push("state");
let db = journaldb::new(client_path.to_str().unwrap(), *i, DatabaseConfig::default());
let client_path = dirs.client_path(genesis_hash, fork_name, *i);
let config = DatabaseConfig::with_columns(client::DB_NO_OF_COLUMNS);
let db = match Database::open(&config, client_path.to_str().unwrap()) {
Ok(db) => db,
Err(_) => return 0,
};
let db = journaldb::new(Arc::new(db), *i, client::DB_COL_STATE);
trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era());
db.latest_era()
db.latest_era().unwrap_or(0)
}).unwrap()
}
}

View File

@ -14,5 +14,5 @@ case $1 in
esac
. ./scripts/targets.sh
cargo test --release --verbose $FEATURES $TARGETS $1 \
cargo test --release $FEATURES $TARGETS $1 \

View File

@ -20,9 +20,9 @@ use common::*;
use rlp::*;
use hashdb::*;
use memorydb::*;
use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, VERSION_KEY};
use super::{DB_PREFIX_LEN, LATEST_ERA_KEY};
use super::traits::JournalDB;
use kvdb::{Database, DBTransaction, DatabaseConfig};
use kvdb::{Database, DBTransaction};
#[cfg(test)]
use std::env;
@ -30,9 +30,6 @@ use std::env;
/// Would be nich to use rocksdb columns for this eventually.
const AUX_FLAG: u8 = 255;
/// Database version.
const DB_VERSION : u32 = 0x103;
/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay
/// and latent-removal semantics.
///
@ -44,28 +41,18 @@ pub struct ArchiveDB {
overlay: MemoryDB,
backing: Arc<Database>,
latest_era: Option<u64>,
column: Option<u32>,
}
impl ArchiveDB {
/// Create a new instance from file
pub fn new(path: &str, config: DatabaseConfig) -> ArchiveDB {
let backing = Database::open(&config, path).unwrap_or_else(|e| {
panic!("Error opening state db: {}", e);
});
if !backing.is_empty() {
match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::<u32>(&v))) {
Ok(Some(DB_VERSION)) => {},
v => panic!("Incompatible DB version, expected {}, got {:?}; to resolve, remove {} and restart.", DB_VERSION, v, path)
}
} else {
backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database");
}
let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::<u64>(&val));
pub fn new(backing: Arc<Database>, col: Option<u32>) -> ArchiveDB {
let latest_era = backing.get(col, &LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::<u64>(&val));
ArchiveDB {
overlay: MemoryDB::new(),
backing: Arc::new(backing),
backing: backing,
latest_era: latest_era,
column: col,
}
}
@ -74,18 +61,19 @@ impl ArchiveDB {
fn new_temp() -> ArchiveDB {
let mut dir = env::temp_dir();
dir.push(H32::random().hex());
Self::new(dir.to_str().unwrap(), DatabaseConfig::default())
let backing = Arc::new(Database::open_default(dir.to_str().unwrap()).unwrap());
Self::new(backing, None)
}
fn payload(&self, key: &H256) -> Option<Bytes> {
self.backing.get(key).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
}
}
impl HashDB for ArchiveDB {
fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iter() {
for (key, _) in self.backing.iter(self.column) {
let h = H256::from_slice(key.deref());
ret.insert(h, 1);
}
@ -140,7 +128,7 @@ impl HashDB for ArchiveDB {
let mut db_hash = hash.to_vec();
db_hash.push(AUX_FLAG);
self.backing.get(&db_hash)
self.backing.get(self.column, &db_hash)
.expect("Low-level database error. Some issue with your hard disk?")
.map(|v| v.to_vec())
}
@ -156,6 +144,7 @@ impl JournalDB for ArchiveDB {
overlay: self.overlay.clone(),
backing: self.backing.clone(),
latest_era: self.latest_era,
column: self.column.clone(),
})
}
@ -167,8 +156,7 @@ impl JournalDB for ArchiveDB {
self.latest_era.is_none()
}
fn commit(&mut self, now: u64, _: &H256, _: Option<(u64, H256)>) -> Result<u32, UtilError> {
let batch = DBTransaction::new();
fn commit(&mut self, batch: &DBTransaction, now: u64, _id: &H256, _end: Option<(u64, H256)>) -> Result<u32, UtilError> {
let mut inserts = 0usize;
let mut deletes = 0usize;
@ -176,7 +164,7 @@ impl JournalDB for ArchiveDB {
let (key, (value, rc)) = i;
if rc > 0 {
assert!(rc == 1);
batch.put(&key, &value).expect("Low-level database error. Some issue with your hard disk?");
batch.put(self.column, &key, &value).expect("Low-level database error. Some issue with your hard disk?");
inserts += 1;
}
if rc < 0 {
@ -187,24 +175,27 @@ impl JournalDB for ArchiveDB {
for (mut key, value) in self.overlay.drain_aux().into_iter() {
key.push(AUX_FLAG);
batch.put(&key, &value).expect("Low-level database error. Some issue with your hard disk?");
batch.put(self.column, &key, &value).expect("Low-level database error. Some issue with your hard disk?");
}
if self.latest_era.map_or(true, |e| now > e) {
try!(batch.put(&LATEST_ERA_KEY, &encode(&now)));
try!(batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)));
self.latest_era = Some(now);
}
try!(self.backing.write(batch));
Ok((inserts + deletes) as u32)
}
fn latest_era(&self) -> Option<u64> { self.latest_era }
fn state(&self, id: &H256) -> Option<Bytes> {
self.backing.get_by_prefix(&id[0..DB_PREFIX_LEN]).map(|b| b.to_vec())
self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.to_vec())
}
fn is_pruned(&self) -> bool { false }
fn backing(&self) -> &Arc<Database> {
&self.backing
}
}
#[cfg(test)]
@ -216,7 +207,7 @@ mod tests {
use super::*;
use hashdb::*;
use journaldb::traits::JournalDB;
use kvdb::DatabaseConfig;
use kvdb::Database;
#[test]
fn insert_same_in_fork() {
@ -224,18 +215,18 @@ mod tests {
let mut jdb = ArchiveDB::new_temp();
let x = jdb.insert(b"X");
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2".sha3(), None).unwrap();
jdb.commit_batch(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.remove(&x);
jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap();
let x = jdb.insert(b"X");
jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap();
jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap();
jdb.commit_batch(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap();
jdb.commit_batch(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap();
assert!(jdb.contains(&x));
}
@ -245,16 +236,16 @@ mod tests {
// history is 3
let mut jdb = ArchiveDB::new_temp();
let h = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.contains(&h));
jdb.remove(&h);
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.contains(&h));
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2".sha3(), None).unwrap();
assert!(jdb.contains(&h));
jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.contains(&h));
jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
}
#[test]
@ -264,29 +255,29 @@ mod tests {
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
jdb.remove(&foo);
jdb.remove(&bar);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
assert!(jdb.contains(&baz));
let foo = jdb.insert(b"foo");
jdb.remove(&baz);
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.contains(&foo));
assert!(jdb.contains(&baz));
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.contains(&foo));
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
}
#[test]
@ -296,22 +287,22 @@ mod tests {
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
jdb.remove(&foo);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.remove(&bar);
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
assert!(jdb.contains(&baz));
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.contains(&foo));
}
@ -321,16 +312,16 @@ mod tests {
let mut jdb = ArchiveDB::new_temp();
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.contains(&foo));
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.insert(b"foo");
assert!(jdb.contains(&foo));
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.contains(&foo));
jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap();
jdb.commit_batch(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap();
assert!(jdb.contains(&foo));
}
@ -338,19 +329,24 @@ mod tests {
fn fork_same_key() {
// history is 1
let mut jdb = ArchiveDB::new_temp();
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.insert(b"foo");
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.contains(&foo));
jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap();
jdb.commit_batch(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap();
assert!(jdb.contains(&foo));
}
fn new_db(dir: &Path) -> ArchiveDB {
let db = Database::open_default(dir.to_str().unwrap()).unwrap();
ArchiveDB::new(Arc::new(db), None)
}
#[test]
fn reopen() {
let mut dir = ::std::env::temp_dir();
@ -358,25 +354,25 @@ mod tests {
let bar = H256::random();
let foo = {
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
// history is 1
let foo = jdb.insert(b"foo");
jdb.emplace(bar.clone(), b"bar".to_vec());
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
foo
};
{
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
}
{
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
}
}
@ -386,27 +382,27 @@ mod tests {
dir.push(H32::random().hex());
let foo = {
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
// history is 1
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
// foo is ancient history.
jdb.insert(b"foo");
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
foo
};
{
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.contains(&foo));
jdb.remove(&foo);
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit_batch(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap();
}
}
@ -415,23 +411,23 @@ mod tests {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let (foo, _, _) = {
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
// history is 1
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
jdb.remove(&foo);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.remove(&bar);
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
(foo, bar, baz)
};
{
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
let mut jdb = new_db(&dir);
jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.contains(&foo));
}
}
@ -441,14 +437,14 @@ mod tests {
let temp = ::devtools::RandomTempPath::new();
let key = {
let mut jdb = ArchiveDB::new(temp.as_str(), DatabaseConfig::default());
let mut jdb = new_db(temp.as_path().as_path());
let key = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
key
};
{
let jdb = ArchiveDB::new(temp.as_str(), DatabaseConfig::default());
let jdb = new_db(temp.as_path().as_path());
let state = jdb.state(&key);
assert!(state.is_some());
}

View File

@ -20,9 +20,9 @@ use common::*;
use rlp::*;
use hashdb::*;
use memorydb::*;
use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, VERSION_KEY};
use super::{DB_PREFIX_LEN, LATEST_ERA_KEY};
use super::traits::JournalDB;
use kvdb::{Database, DBTransaction, DatabaseConfig};
use kvdb::{Database, DBTransaction};
#[cfg(test)]
use std::env;
@ -66,33 +66,22 @@ pub struct EarlyMergeDB {
backing: Arc<Database>,
refs: Option<Arc<RwLock<HashMap<H256, RefInfo>>>>,
latest_era: Option<u64>,
column: Option<u32>,
}
const DB_VERSION : u32 = 0x003;
const PADDING : [u8; 10] = [ 0u8; 10 ];
impl EarlyMergeDB {
/// Create a new instance from file
pub fn new(path: &str, config: DatabaseConfig) -> EarlyMergeDB {
let backing = Database::open(&config, path).unwrap_or_else(|e| {
panic!("Error opening state db: {}", e);
});
if !backing.is_empty() {
match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::<u32>(&v))) {
Ok(Some(DB_VERSION)) => {},
v => panic!("Incompatible DB version, expected {}, got {:?}; to resolve, remove {} and restart.", DB_VERSION, v, path)
}
} else {
backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database");
}
let (latest_era, refs) = EarlyMergeDB::read_refs(&backing);
pub fn new(backing: Arc<Database>, col: Option<u32>) -> EarlyMergeDB {
let (latest_era, refs) = EarlyMergeDB::read_refs(&backing, col);
let refs = Some(Arc::new(RwLock::new(refs)));
EarlyMergeDB {
overlay: MemoryDB::new(),
backing: Arc::new(backing),
backing: backing,
refs: refs,
latest_era: latest_era,
column: col,
}
}
@ -101,7 +90,8 @@ impl EarlyMergeDB {
fn new_temp() -> EarlyMergeDB {
let mut dir = env::temp_dir();
dir.push(H32::random().hex());
Self::new(dir.to_str().unwrap(), DatabaseConfig::default())
let backing = Arc::new(Database::open_default(dir.to_str().unwrap()).unwrap());
Self::new(backing, None)
}
fn morph_key(key: &H256, index: u8) -> Bytes {
@ -111,13 +101,13 @@ impl EarlyMergeDB {
}
// The next three are valid only as long as there is an insert operation of `key` in the journal.
fn set_already_in(batch: &DBTransaction, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); }
fn reset_already_in(batch: &DBTransaction, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); }
fn is_already_in(backing: &Database, key: &H256) -> bool {
backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
fn set_already_in(batch: &DBTransaction, col: Option<u32>, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); }
fn reset_already_in(batch: &DBTransaction, col: Option<u32>, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); }
fn is_already_in(backing: &Database, col: Option<u32>, key: &H256) -> bool {
backing.get(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
}
fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, refs: &mut HashMap<H256, RefInfo>, batch: &DBTransaction, trace: bool) {
fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, col: Option<u32>, refs: &mut HashMap<H256, RefInfo>, batch: &DBTransaction, trace: bool) {
for &(ref h, ref d) in inserts {
if let Some(c) = refs.get_mut(h) {
// already counting. increment.
@ -129,9 +119,9 @@ impl EarlyMergeDB {
}
// this is the first entry for this node in the journal.
if backing.get(h).expect("Low-level database error. Some issue with your hard disk?").is_some() {
if backing.get(col, h).expect("Low-level database error. Some issue with your hard disk?").is_some() {
// already in the backing DB. start counting, and remember it was already in.
Self::set_already_in(batch, h);
Self::set_already_in(batch, col, h);
refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: true});
if trace {
trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h);
@ -141,8 +131,8 @@ impl EarlyMergeDB {
// Gets removed when a key leaves the journal, so should never be set when we're placing a new key.
//Self::reset_already_in(&h);
assert!(!Self::is_already_in(backing, &h));
batch.put(h, d).expect("Low-level database error. Some issue with your hard disk?");
assert!(!Self::is_already_in(backing, col, &h));
batch.put(col, h, d).expect("Low-level database error. Some issue with your hard disk?");
refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: false});
if trace {
trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h);
@ -150,7 +140,7 @@ impl EarlyMergeDB {
}
}
fn replay_keys(inserts: &[H256], backing: &Database, refs: &mut HashMap<H256, RefInfo>) {
fn replay_keys(inserts: &[H256], backing: &Database, col: Option<u32>, refs: &mut HashMap<H256, RefInfo>) {
trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs);
for h in inserts {
if let Some(c) = refs.get_mut(h) {
@ -161,12 +151,12 @@ impl EarlyMergeDB {
// this is the first entry for this node in the journal.
// it is initialised to 1 if it was already in.
refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: Self::is_already_in(backing, h)});
refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: Self::is_already_in(backing, col, h)});
}
trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs);
}
fn remove_keys(deletes: &[H256], refs: &mut HashMap<H256, RefInfo>, batch: &DBTransaction, from: RemoveFrom, trace: bool) {
fn remove_keys(deletes: &[H256], refs: &mut HashMap<H256, RefInfo>, batch: &DBTransaction, col: Option<u32>, from: RemoveFrom, trace: bool) {
// with a remove on {queue_refs: 1, in_archive: true}, we have two options:
// - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive)
// - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue)
@ -178,7 +168,7 @@ impl EarlyMergeDB {
if let Some(c) = refs.get_mut(h) {
if c.in_archive && from == RemoveFrom::Archive {
c.in_archive = false;
Self::reset_already_in(batch, h);
Self::reset_already_in(batch, col, h);
if trace {
trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Reducing to queue only and recording", h);
}
@ -196,14 +186,14 @@ impl EarlyMergeDB {
match n {
Some(RefInfo{queue_refs: 1, in_archive: true}) => {
refs.remove(h);
Self::reset_already_in(batch, h);
Self::reset_already_in(batch, col, h);
if trace {
trace!(target: "jdb.fine", " remove({}): In archive, 1 in queue: Removing from queue and leaving in archive", h);
}
}
Some(RefInfo{queue_refs: 1, in_archive: false}) => {
refs.remove(h);
batch.delete(h).expect("Low-level database error. Some issue with your hard disk?");
batch.delete(col, h).expect("Low-level database error. Some issue with your hard disk?");
if trace {
trace!(target: "jdb.fine", " remove({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h);
}
@ -211,7 +201,7 @@ impl EarlyMergeDB {
None => {
// Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs.
//assert!(!Self::is_already_in(db, &h));
batch.delete(h).expect("Low-level database error. Some issue with your hard disk?");
batch.delete(col, h).expect("Low-level database error. Some issue with your hard disk?");
if trace {
trace!(target: "jdb.fine", " remove({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h);
}
@ -223,7 +213,7 @@ impl EarlyMergeDB {
#[cfg(test)]
fn can_reconstruct_refs(&self) -> bool {
let (latest_era, reconstructed) = Self::read_refs(&self.backing);
let (latest_era, reconstructed) = Self::read_refs(&self.backing, self.column);
let refs = self.refs.as_ref().unwrap().write();
if *refs != reconstructed || latest_era != self.latest_era {
let clean_refs = refs.iter().filter_map(|(k, v)| if reconstructed.get(k) == Some(v) {None} else {Some((k.clone(), v.clone()))}).collect::<HashMap<_, _>>();
@ -236,18 +226,18 @@ impl EarlyMergeDB {
}
fn payload(&self, key: &H256) -> Option<Bytes> {
self.backing.get(key).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
}
fn read_refs(db: &Database) -> (Option<u64>, HashMap<H256, RefInfo>) {
fn read_refs(db: &Database, col: Option<u32>) -> (Option<u64>, HashMap<H256, RefInfo>) {
let mut refs = HashMap::new();
let mut latest_era = None;
if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") {
if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") {
let mut era = decode::<u64>(&val);
latest_era = Some(era);
loop {
let mut index = 0usize;
while let Some(rlp_data) = db.get({
while let Some(rlp_data) = db.get(col, {
let mut r = RlpStream::new_list(3);
r.append(&era);
r.append(&index);
@ -256,7 +246,7 @@ impl EarlyMergeDB {
}).expect("Low-level database error.") {
let rlp = Rlp::new(&rlp_data);
let inserts: Vec<H256> = rlp.val_at(1);
Self::replay_keys(&inserts, db, &mut refs);
Self::replay_keys(&inserts, db, col, &mut refs);
index += 1;
};
if index == 0 || era == 0 {
@ -267,12 +257,12 @@ impl EarlyMergeDB {
}
(latest_era, refs)
}
}
}
impl HashDB for EarlyMergeDB {
fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iter() {
for (key, _) in self.backing.iter(self.column) {
let h = H256::from_slice(key.deref());
ret.insert(h, 1);
}
@ -321,11 +311,16 @@ impl JournalDB for EarlyMergeDB {
backing: self.backing.clone(),
refs: self.refs.clone(),
latest_era: self.latest_era.clone(),
column: self.column.clone(),
})
}
fn is_empty(&self) -> bool {
self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none()
self.backing.get(self.column, &LATEST_ERA_KEY).expect("Low level database error").is_none()
}
fn backing(&self) -> &Arc<Database> {
&self.backing
}
fn latest_era(&self) -> Option<u64> { self.latest_era }
@ -338,11 +333,11 @@ impl JournalDB for EarlyMergeDB {
}
fn state(&self, id: &H256) -> Option<Bytes> {
self.backing.get_by_prefix(&id[0..DB_PREFIX_LEN]).map(|b| b.to_vec())
self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.to_vec())
}
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
// journal format:
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
@ -389,13 +384,12 @@ impl JournalDB for EarlyMergeDB {
// record new commit's details.
let mut refs = self.refs.as_ref().unwrap().write();
let batch = DBTransaction::new();
let trace = false;
{
let mut index = 0usize;
let mut last;
while try!(self.backing.get({
while try!(self.backing.get(self.column, {
let mut r = RlpStream::new_list(3);
r.append(&now);
r.append(&index);
@ -436,15 +430,15 @@ impl JournalDB for EarlyMergeDB {
r.begin_list(inserts.len());
inserts.iter().foreach(|&(k, _)| {r.append(&k);});
r.append(&removes);
Self::insert_keys(&inserts, &self.backing, &mut refs, &batch, trace);
Self::insert_keys(&inserts, &self.backing, self.column, &mut refs, &batch, trace);
if trace {
let ins = inserts.iter().map(|&(k, _)| k).collect::<Vec<_>>();
trace!(target: "jdb.ops", " Inserts: {:?}", ins);
trace!(target: "jdb.ops", " Deletes: {:?}", removes);
}
try!(batch.put(&last, r.as_raw()));
try!(batch.put(self.column, &last, r.as_raw()));
if self.latest_era.map_or(true, |e| now > e) {
try!(batch.put(&LATEST_ERA_KEY, &encode(&now)));
try!(batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)));
self.latest_era = Some(now);
}
}
@ -453,7 +447,7 @@ impl JournalDB for EarlyMergeDB {
if let Some((end_era, canon_id)) = end {
let mut index = 0usize;
let mut last;
while let Some(rlp_data) = try!(self.backing.get({
while let Some(rlp_data) = try!(self.backing.get(self.column, {
let mut r = RlpStream::new_list(3);
r.append(&end_era);
r.append(&index);
@ -470,7 +464,7 @@ impl JournalDB for EarlyMergeDB {
if trace {
trace!(target: "jdb.ops", " Expunging: {:?}", deletes);
}
Self::remove_keys(&deletes, &mut refs, &batch, RemoveFrom::Archive, trace);
Self::remove_keys(&deletes, &mut refs, &batch, self.column, RemoveFrom::Archive, trace);
if trace {
trace!(target: "jdb.ops", " Finalising: {:?}", inserts);
@ -488,7 +482,7 @@ impl JournalDB for EarlyMergeDB {
}
Some( RefInfo{queue_refs: x, in_archive: false} ) => {
// must set already in; ,
Self::set_already_in(&batch, k);
Self::set_already_in(&batch, self.column, k);
refs.insert(k.clone(), RefInfo{ queue_refs: x - 1, in_archive: true });
}
Some( RefInfo{in_archive: true, ..} ) => {
@ -502,10 +496,10 @@ impl JournalDB for EarlyMergeDB {
if trace {
trace!(target: "jdb.ops", " Reverting: {:?}", inserts);
}
Self::remove_keys(&inserts, &mut refs, &batch, RemoveFrom::Queue, trace);
Self::remove_keys(&inserts, &mut refs, &batch, self.column, RemoveFrom::Queue, trace);
}
try!(batch.delete(&last));
try!(batch.delete(self.column, &last));
index += 1;
}
if trace {
@ -513,10 +507,6 @@ impl JournalDB for EarlyMergeDB {
}
}
try!(self.backing.write(batch));
// Comment out for now. TODO: automatically enable in tests.
if trace {
trace!(target: "jdb", "OK: {:?}", refs.clone());
}
@ -535,7 +525,7 @@ mod tests {
use super::super::traits::JournalDB;
use hashdb::*;
use log::init_log;
use kvdb::DatabaseConfig;
use kvdb::{Database, DatabaseConfig};
#[test]
fn insert_same_in_fork() {
@ -543,25 +533,25 @@ mod tests {
let mut jdb = EarlyMergeDB::new_temp();
let x = jdb.insert(b"X");
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&x);
jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
let x = jdb.insert(b"X");
jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap();
jdb.commit_batch(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap();
jdb.commit_batch(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&x));
@ -571,17 +561,17 @@ mod tests {
fn insert_older_era() {
let mut jdb = EarlyMergeDB::new_temp();
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0a".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0a".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
let bar = jdb.insert(b"bar");
jdb.commit(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&bar);
jdb.commit(0, &b"0b".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0b".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
@ -592,20 +582,20 @@ mod tests {
// history is 3
let mut jdb = EarlyMergeDB::new_temp();
let h = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&h));
jdb.remove(&h);
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&h));
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&h));
jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&h));
jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(!jdb.contains(&h));
}
@ -617,7 +607,7 @@ mod tests {
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
@ -625,7 +615,7 @@ mod tests {
jdb.remove(&foo);
jdb.remove(&bar);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
@ -633,20 +623,20 @@ mod tests {
let foo = jdb.insert(b"foo");
jdb.remove(&baz);
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(!jdb.contains(&bar));
assert!(jdb.contains(&baz));
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(!jdb.contains(&bar));
assert!(!jdb.contains(&baz));
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(!jdb.contains(&foo));
assert!(!jdb.contains(&bar));
@ -660,25 +650,25 @@ mod tests {
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
jdb.remove(&foo);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&bar);
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
assert!(jdb.contains(&baz));
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(!jdb.contains(&baz));
@ -691,115 +681,113 @@ mod tests {
let mut jdb = EarlyMergeDB::new_temp();
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
assert!(jdb.contains(&foo));
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap();
jdb.commit_batch(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
}
#[test]
fn fork_same_key_one() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
jdb.commit(0, &b"0".sha3(), None).unwrap();
let mut jdb = EarlyMergeDB::new_temp();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap();
jdb.commit_batch(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
}
#[test]
fn fork_same_key_other() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
jdb.commit(0, &b"0".sha3(), None).unwrap();
let mut jdb = EarlyMergeDB::new_temp();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
}
#[test]
fn fork_ins_del_ins() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
jdb.commit(0, &b"0".sha3(), None).unwrap();
let mut jdb = EarlyMergeDB::new_temp();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap();
jdb.commit_batch(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap();
jdb.commit_batch(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
fn new_db(path: &Path) -> EarlyMergeDB {
let config = DatabaseConfig::with_columns(Some(1));
let backing = Arc::new(Database::open(&config, path.to_str().unwrap()).unwrap());
EarlyMergeDB::new(backing, Some(0))
}
#[test]
fn reopen() {
let mut dir = ::std::env::temp_dir();
@ -807,27 +795,27 @@ mod tests {
let bar = H256::random();
let foo = {
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
// history is 1
let foo = jdb.insert(b"foo");
jdb.emplace(bar.clone(), b"bar".to_vec());
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
foo
};
{
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
{
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(!jdb.contains(&foo));
}
@ -836,145 +824,136 @@ mod tests {
#[test]
fn insert_delete_insert_delete_insert_expunge() {
init_log();
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = EarlyMergeDB::new_temp();
// history is 4
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), None).unwrap();
jdb.commit_batch(3, &b"3".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
// expunge foo
jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
#[test]
fn forked_insert_delete_insert_delete_insert_expunge() {
init_log();
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = EarlyMergeDB::new_temp();
// history is 4
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(1, &b"1a".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(1, &b"1b".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(2, &b"2a".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2a".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(2, &b"2b".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2b".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(3, &b"3a".sha3(), None).unwrap();
jdb.commit_batch(3, &b"3a".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(3, &b"3b".sha3(), None).unwrap();
jdb.commit_batch(3, &b"3b".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
// expunge foo
jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap();
jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
#[test]
fn broken_assert() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = EarlyMergeDB::new_temp();
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
// history is 1
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
// foo is ancient history.
jdb.remove(&foo);
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN
jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
jdb.remove(&foo);
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap();
jdb.commit_batch(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(!jdb.contains(&foo));
}
#[test]
fn reopen_test() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = EarlyMergeDB::new_temp();
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
// history is 4
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(3, &b"3".sha3(), None).unwrap();
jdb.commit_batch(3, &b"3".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
// foo is ancient history.
jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.remove(&bar);
jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.insert(b"bar");
jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit_batch(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
@ -988,45 +967,48 @@ mod tests {
let foo = b"foo".sha3();
{
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
// history is 1
jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
// foo is ancient history.
jdb.remove(&foo);
jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
jdb.insert(b"foo");
jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
// incantation to reopen the db
}; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
}; {
let mut jdb = new_db(&dir);
jdb.remove(&foo);
jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
// incantation to reopen the db
}; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
}; {
let mut jdb = new_db(&dir);
jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit_batch(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
// incantation to reopen the db
}; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
}; {
let mut jdb = new_db(&dir);
jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap();
jdb.commit_batch(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(!jdb.contains(&foo));
}
@ -1037,26 +1019,26 @@ mod tests {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let (foo, bar, baz) = {
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
// history is 1
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&bar);
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
(foo, bar, baz)
};
{
let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
let mut jdb = new_db(&dir);
jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(!jdb.contains(&baz));

View File

@ -17,7 +17,7 @@
//! `JournalDB` interface and implementation.
use common::*;
use kvdb::DatabaseConfig;
use kvdb::Database;
/// Export the journaldb module.
pub mod traits;
@ -116,19 +116,18 @@ impl fmt::Display for Algorithm {
}
/// Create a new `JournalDB` trait object.
pub fn new(path: &str, algorithm: Algorithm, config: DatabaseConfig) -> Box<JournalDB> {
pub fn new(backing: Arc<Database>, algorithm: Algorithm, col: Option<u32>) -> Box<JournalDB> {
match algorithm {
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path, config)),
Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(path, config)),
Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(path, config)),
Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(path, config)),
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(backing, col)),
Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(backing, col)),
Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(backing, col)),
Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(backing, col)),
}
}
// all keys must be at least 12 bytes
const DB_PREFIX_LEN : usize = 12;
const LATEST_ERA_KEY : [u8; DB_PREFIX_LEN] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ];
const VERSION_KEY : [u8; DB_PREFIX_LEN] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ];
#[cfg(test)]
mod tests {

View File

@ -20,8 +20,8 @@ use common::*;
use rlp::*;
use hashdb::*;
use memorydb::*;
use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, VERSION_KEY};
use kvdb::{Database, DBTransaction, DatabaseConfig};
use super::{DB_PREFIX_LEN, LATEST_ERA_KEY};
use kvdb::{Database, DBTransaction};
#[cfg(test)]
use std::env;
use super::JournalDB;
@ -61,6 +61,7 @@ pub struct OverlayRecentDB {
transaction_overlay: MemoryDB,
backing: Arc<Database>,
journal_overlay: Arc<RwLock<JournalOverlay>>,
column: Option<u32>,
}
#[derive(PartialEq)]
@ -89,38 +90,22 @@ impl Clone for OverlayRecentDB {
transaction_overlay: self.transaction_overlay.clone(),
backing: self.backing.clone(),
journal_overlay: self.journal_overlay.clone(),
column: self.column.clone(),
}
}
}
const DB_VERSION : u32 = 0x203;
const PADDING : [u8; 10] = [ 0u8; 10 ];
impl OverlayRecentDB {
/// Create a new instance from file
pub fn new(path: &str, config: DatabaseConfig) -> OverlayRecentDB {
Self::from_prefs(path, config)
}
/// Create a new instance from file
pub fn from_prefs(path: &str, config: DatabaseConfig) -> OverlayRecentDB {
let backing = Database::open(&config, path).unwrap_or_else(|e| {
panic!("Error opening state db: {}", e);
});
if !backing.is_empty() {
match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::<u32>(&v))) {
Ok(Some(DB_VERSION)) => {}
v => panic!("Incompatible DB version, expected {}, got {:?}; to resolve, remove {} and restart.", DB_VERSION, v, path)
}
} else {
backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database");
}
let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&backing)));
/// Create a new instance.
pub fn new(backing: Arc<Database>, col: Option<u32>) -> OverlayRecentDB {
let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&backing, col)));
OverlayRecentDB {
transaction_overlay: MemoryDB::new(),
backing: Arc::new(backing),
backing: backing,
journal_overlay: journal_overlay,
column: col,
}
}
@ -129,31 +114,32 @@ impl OverlayRecentDB {
pub fn new_temp() -> OverlayRecentDB {
let mut dir = env::temp_dir();
dir.push(H32::random().hex());
Self::new(dir.to_str().unwrap(), DatabaseConfig::default())
let backing = Arc::new(Database::open_default(dir.to_str().unwrap()).unwrap());
Self::new(backing, None)
}
#[cfg(test)]
fn can_reconstruct_refs(&self) -> bool {
let reconstructed = Self::read_overlay(&self.backing);
let reconstructed = Self::read_overlay(&self.backing, self.column);
let journal_overlay = self.journal_overlay.read();
*journal_overlay == reconstructed
}
fn payload(&self, key: &H256) -> Option<Bytes> {
self.backing.get(key).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
self.backing.get(self.column, key).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
}
fn read_overlay(db: &Database) -> JournalOverlay {
fn read_overlay(db: &Database, col: Option<u32>) -> JournalOverlay {
let mut journal = HashMap::new();
let mut overlay = MemoryDB::new();
let mut count = 0;
let mut latest_era = None;
if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") {
if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") {
let mut era = decode::<u64>(&val);
latest_era = Some(era);
loop {
let mut index = 0usize;
while let Some(rlp_data) = db.get({
while let Some(rlp_data) = db.get(col, {
let mut r = RlpStream::new_list(3);
r.append(&era);
r.append(&index);
@ -212,21 +198,24 @@ impl JournalDB for OverlayRecentDB {
}
fn is_empty(&self) -> bool {
self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none()
self.backing.get(self.column, &LATEST_ERA_KEY).expect("Low level database error").is_none()
}
fn backing(&self) -> &Arc<Database> {
&self.backing
}
fn latest_era(&self) -> Option<u64> { self.journal_overlay.read().latest_era }
fn state(&self, key: &H256) -> Option<Bytes> {
let v = self.journal_overlay.read().backing_overlay.get(&OverlayRecentDB::to_short_key(key)).map(|v| v.to_vec());
v.or_else(|| self.backing.get_by_prefix(&key[0..DB_PREFIX_LEN]).map(|b| b.to_vec()))
v.or_else(|| self.backing.get_by_prefix(self.column, &key[0..DB_PREFIX_LEN]).map(|b| b.to_vec()))
}
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
// record new commit's details.
trace!("commit: #{} ({}), end era: {:?}", now, id, end);
let mut journal_overlay = self.journal_overlay.write();
let batch = DBTransaction::new();
{
let mut r = RlpStream::new_list(3);
let mut tx = self.transaction_overlay.drain();
@ -249,9 +238,9 @@ impl JournalDB for OverlayRecentDB {
k.append(&now);
k.append(&index);
k.append(&&PADDING[..]);
try!(batch.put(&k.drain(), r.as_raw()));
try!(batch.put(self.column, &k.drain(), r.as_raw()));
if journal_overlay.latest_era.map_or(true, |e| now > e) {
try!(batch.put(&LATEST_ERA_KEY, &encode(&now)));
try!(batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)));
journal_overlay.latest_era = Some(now);
}
journal_overlay.journal.entry(now).or_insert_with(Vec::new).push(JournalEntry { id: id.clone(), insertions: inserted_keys, deletions: removed_keys });
@ -271,7 +260,7 @@ impl JournalDB for OverlayRecentDB {
r.append(&end_era);
r.append(&index);
r.append(&&PADDING[..]);
try!(batch.delete(&r.drain()));
try!(batch.delete(self.column, &r.drain()));
trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries", end_era, index, journal.id, canon_id, journal.insertions.len(), journal.deletions.len());
{
if canon_id == journal.id {
@ -290,7 +279,7 @@ impl JournalDB for OverlayRecentDB {
}
// apply canon inserts first
for (k, v) in canon_insertions {
try!(batch.put(&k, &v));
try!(batch.put(self.column, &k, &v));
}
// update the overlay
for k in overlay_deletions {
@ -299,13 +288,12 @@ impl JournalDB for OverlayRecentDB {
// apply canon deletions
for k in canon_deletions {
if !journal_overlay.backing_overlay.contains(&OverlayRecentDB::to_short_key(&k)) {
try!(batch.delete(&k));
try!(batch.delete(self.column, &k));
}
}
}
journal_overlay.journal.remove(&end_era);
}
try!(self.backing.write(batch));
Ok(0)
}
@ -314,7 +302,7 @@ impl JournalDB for OverlayRecentDB {
impl HashDB for OverlayRecentDB {
fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iter() {
for (key, _) in self.backing.iter(self.column) {
let h = H256::from_slice(key.deref());
ret.insert(h, 1);
}
@ -374,7 +362,12 @@ mod tests {
use hashdb::*;
use log::init_log;
use journaldb::JournalDB;
use kvdb::DatabaseConfig;
use kvdb::Database;
fn new_db(path: &Path) -> OverlayRecentDB {
let backing = Arc::new(Database::open_default(path.to_str().unwrap()).unwrap());
OverlayRecentDB::new(backing, None)
}
#[test]
fn insert_same_in_fork() {
@ -382,25 +375,25 @@ mod tests {
let mut jdb = OverlayRecentDB::new_temp();
let x = jdb.insert(b"X");
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&x);
jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
let x = jdb.insert(b"X");
jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap();
jdb.commit_batch(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap();
jdb.commit_batch(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&x));
@ -411,20 +404,20 @@ mod tests {
// history is 3
let mut jdb = OverlayRecentDB::new_temp();
let h = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&h));
jdb.remove(&h);
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&h));
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&h));
jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&h));
jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(!jdb.contains(&h));
}
@ -436,7 +429,7 @@ mod tests {
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
@ -444,7 +437,7 @@ mod tests {
jdb.remove(&foo);
jdb.remove(&bar);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
@ -452,20 +445,20 @@ mod tests {
let foo = jdb.insert(b"foo");
jdb.remove(&baz);
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(!jdb.contains(&bar));
assert!(jdb.contains(&baz));
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(!jdb.contains(&bar));
assert!(!jdb.contains(&baz));
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(!jdb.contains(&foo));
assert!(!jdb.contains(&bar));
@ -479,25 +472,25 @@ mod tests {
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
jdb.remove(&foo);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&bar);
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
assert!(jdb.contains(&baz));
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(!jdb.contains(&baz));
@ -510,112 +503,105 @@ mod tests {
let mut jdb = OverlayRecentDB::new_temp();
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
assert!(jdb.contains(&foo));
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap();
jdb.commit_batch(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
}
#[test]
fn fork_same_key_one() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
jdb.commit(0, &b"0".sha3(), None).unwrap();
let mut jdb = OverlayRecentDB::new_temp();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap();
jdb.commit_batch(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
}
#[test]
fn fork_same_key_other() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = OverlayRecentDB::new_temp();
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
}
#[test]
fn fork_ins_del_ins() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = OverlayRecentDB::new_temp();
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap();
jdb.commit_batch(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap();
jdb.commit_batch(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
@ -626,27 +612,27 @@ mod tests {
let bar = H256::random();
let foo = {
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
// history is 1
let foo = jdb.insert(b"foo");
jdb.emplace(bar.clone(), b"bar".to_vec());
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
foo
};
{
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
{
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(!jdb.contains(&foo));
}
@ -655,145 +641,133 @@ mod tests {
#[test]
fn insert_delete_insert_delete_insert_expunge() {
init_log();
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = OverlayRecentDB::new_temp();
// history is 4
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), None).unwrap();
jdb.commit_batch(3, &b"3".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
// expunge foo
jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
#[test]
fn forked_insert_delete_insert_delete_insert_expunge() {
init_log();
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = OverlayRecentDB::new_temp();
// history is 4
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(1, &b"1a".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(1, &b"1b".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(2, &b"2a".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2a".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(2, &b"2b".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2b".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(3, &b"3a".sha3(), None).unwrap();
jdb.commit_batch(3, &b"3a".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.commit(3, &b"3b".sha3(), None).unwrap();
jdb.commit_batch(3, &b"3b".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
// expunge foo
jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap();
jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
#[test]
fn broken_assert() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = OverlayRecentDB::new_temp();
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
// history is 1
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
// foo is ancient history.
jdb.remove(&foo);
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN
jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
jdb.remove(&foo);
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap();
jdb.commit_batch(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(!jdb.contains(&foo));
}
#[test]
fn reopen_test() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = OverlayRecentDB::new_temp();
// history is 4
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(3, &b"3".sha3(), None).unwrap();
jdb.commit_batch(3, &b"3".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
// foo is ancient history.
jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
jdb.remove(&bar);
jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.insert(b"foo");
jdb.insert(b"bar");
jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit_batch(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
@ -807,45 +781,48 @@ mod tests {
let foo = b"foo".sha3();
{
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
// history is 1
jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
// foo is ancient history.
jdb.remove(&foo);
jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
jdb.insert(b"foo");
jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
// incantation to reopen the db
}; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
}; {
let mut jdb = new_db(&dir);
jdb.remove(&foo);
jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
// incantation to reopen the db
}; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
}; {
let mut jdb = new_db(&dir);
jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit_batch(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
// incantation to reopen the db
}; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
}; {
let mut jdb = new_db(&dir);
jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap();
jdb.commit_batch(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(!jdb.contains(&foo));
}
@ -856,26 +833,26 @@ mod tests {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let (foo, bar, baz) = {
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
let mut jdb = new_db(&dir);
// history is 1
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&foo);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&bar);
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
(foo, bar, baz)
};
{
let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default());
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
let mut jdb = new_db(&dir);
jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo));
assert!(!jdb.contains(&baz));
@ -887,17 +864,17 @@ mod tests {
fn insert_older_era() {
let mut jdb = OverlayRecentDB::new_temp();
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0a".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0a".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
let bar = jdb.insert(b"bar");
jdb.commit(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.remove(&bar);
jdb.commit(0, &b"0b".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0b".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs());
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));

View File

@ -20,9 +20,9 @@ use common::*;
use rlp::*;
use hashdb::*;
use overlaydb::*;
use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, VERSION_KEY};
use super::{DB_PREFIX_LEN, LATEST_ERA_KEY};
use super::traits::JournalDB;
use kvdb::{Database, DBTransaction, DatabaseConfig};
use kvdb::{Database, DBTransaction};
#[cfg(test)]
use std::env;
@ -39,35 +39,23 @@ pub struct RefCountedDB {
latest_era: Option<u64>,
inserts: Vec<H256>,
removes: Vec<H256>,
column: Option<u32>,
}
const DB_VERSION : u32 = 0x200;
const PADDING : [u8; 10] = [ 0u8; 10 ];
impl RefCountedDB {
/// Create a new instance given a `backing` database.
pub fn new(path: &str, config: DatabaseConfig) -> RefCountedDB {
let backing = Database::open(&config, path).unwrap_or_else(|e| {
panic!("Error opening state db: {}", e);
});
if !backing.is_empty() {
match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::<u32>(&v))) {
Ok(Some(DB_VERSION)) => {},
v => panic!("Incompatible DB version, expected {}, got {:?}; to resolve, remove {} and restart.", DB_VERSION, v, path)
}
} else {
backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database");
}
let backing = Arc::new(backing);
let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::<u64>(&val));
pub fn new(backing: Arc<Database>, col: Option<u32>) -> RefCountedDB {
let latest_era = backing.get(col, &LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::<u64>(&val));
RefCountedDB {
forward: OverlayDB::new_with_arc(backing.clone()),
forward: OverlayDB::new(backing.clone(), col),
backing: backing,
inserts: vec![],
removes: vec![],
latest_era: latest_era,
column: col,
}
}
@ -76,7 +64,8 @@ impl RefCountedDB {
fn new_temp() -> RefCountedDB {
let mut dir = env::temp_dir();
dir.push(H32::random().hex());
Self::new(dir.to_str().unwrap(), DatabaseConfig::default())
let backing = Arc::new(Database::open_default(dir.to_str().unwrap()).unwrap());
Self::new(backing, None)
}
}
@ -97,6 +86,7 @@ impl JournalDB for RefCountedDB {
latest_era: self.latest_era,
inserts: self.inserts.clone(),
removes: self.removes.clone(),
column: self.column.clone(),
})
}
@ -108,13 +98,17 @@ impl JournalDB for RefCountedDB {
self.latest_era.is_none()
}
fn backing(&self) -> &Arc<Database> {
&self.backing
}
fn latest_era(&self) -> Option<u64> { self.latest_era }
fn state(&self, id: &H256) -> Option<Bytes> {
self.backing.get_by_prefix(&id[0..DB_PREFIX_LEN]).map(|b| b.to_vec())
self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.to_vec())
}
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
// journal format:
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
@ -128,12 +122,11 @@ impl JournalDB for RefCountedDB {
// of its inserts otherwise.
// record new commit's details.
let batch = DBTransaction::new();
{
let mut index = 0usize;
let mut last;
while try!(self.backing.get({
while try!(self.backing.get(self.column, {
let mut r = RlpStream::new_list(3);
r.append(&now);
r.append(&index);
@ -148,7 +141,7 @@ impl JournalDB for RefCountedDB {
r.append(id);
r.append(&self.inserts);
r.append(&self.removes);
try!(batch.put(&last, r.as_raw()));
try!(batch.put(self.column, &last, r.as_raw()));
trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, index, id, self.inserts, self.removes);
@ -156,7 +149,7 @@ impl JournalDB for RefCountedDB {
self.removes.clear();
if self.latest_era.map_or(true, |e| now > e) {
try!(batch.put(&LATEST_ERA_KEY, &encode(&now)));
try!(batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)));
self.latest_era = Some(now);
}
}
@ -167,7 +160,7 @@ impl JournalDB for RefCountedDB {
let mut last;
while let Some(rlp_data) = {
// trace!(target: "rcdb", "checking for journal #{}.{}", end_era, index);
try!(self.backing.get({
try!(self.backing.get(self.column, {
let mut r = RlpStream::new_list(3);
r.append(&end_era);
r.append(&index);
@ -183,13 +176,12 @@ impl JournalDB for RefCountedDB {
for i in &to_remove {
self.forward.remove(i);
}
try!(batch.delete(&last));
try!(batch.delete(self.column, &last));
index += 1;
}
}
let r = try!(self.forward.commit_to_batch(&batch));
try!(self.backing.write(batch));
Ok(r)
}
}
@ -209,16 +201,16 @@ mod tests {
// history is 3
let mut jdb = RefCountedDB::new_temp();
let h = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.contains(&h));
jdb.remove(&h);
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert!(jdb.contains(&h));
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2".sha3(), None).unwrap();
assert!(jdb.contains(&h));
jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.contains(&h));
jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(!jdb.contains(&h));
}
@ -228,16 +220,16 @@ mod tests {
let mut jdb = RefCountedDB::new_temp();
assert_eq!(jdb.latest_era(), None);
let h = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert_eq!(jdb.latest_era(), Some(0));
jdb.remove(&h);
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit_batch(1, &b"1".sha3(), None).unwrap();
assert_eq!(jdb.latest_era(), Some(1));
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit_batch(2, &b"2".sha3(), None).unwrap();
assert_eq!(jdb.latest_era(), Some(2));
jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
assert_eq!(jdb.latest_era(), Some(3));
jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
assert_eq!(jdb.latest_era(), Some(4));
}
@ -248,32 +240,32 @@ mod tests {
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
jdb.remove(&foo);
jdb.remove(&bar);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
assert!(jdb.contains(&baz));
let foo = jdb.insert(b"foo");
jdb.remove(&baz);
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit_batch(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.contains(&foo));
assert!(!jdb.contains(&bar));
assert!(jdb.contains(&baz));
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit_batch(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.contains(&foo));
assert!(!jdb.contains(&bar));
assert!(!jdb.contains(&baz));
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit_batch(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(!jdb.contains(&foo));
assert!(!jdb.contains(&bar));
assert!(!jdb.contains(&baz));
@ -286,22 +278,22 @@ mod tests {
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit_batch(0, &b"0".sha3(), None).unwrap();
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
jdb.remove(&foo);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.remove(&bar);
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.commit_batch(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.contains(&foo));
assert!(jdb.contains(&bar));
assert!(jdb.contains(&baz));
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
jdb.commit_batch(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.contains(&foo));
assert!(!jdb.contains(&baz));
assert!(!jdb.contains(&bar));

View File

@ -18,6 +18,7 @@
use common::*;
use hashdb::*;
use kvdb::{Database, DBTransaction};
/// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually
/// exclusive actions.
@ -36,11 +37,22 @@ pub trait JournalDB : HashDB + Send + Sync {
/// Commit all recent insert operations and canonical historical commits' removals from the
/// old era to the backing database, reverting any non-canonical historical commit's inserts.
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError>;
fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError>;
/// State data query
fn state(&self, _id: &H256) -> Option<Bytes>;
/// Whether this database is pruned.
fn is_pruned(&self) -> bool { true }
/// Get backing database.
fn backing(&self) -> &Arc<Database>;
#[cfg(test)]
/// Commit all changes in a single batch
fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
let batch = self.backing().transaction();
let res = try!(self.commit(&batch, now, id, end));
self.backing().write(batch).map(|_| res).map_err(Into::into)
}
}

View File

@ -18,7 +18,7 @@
use std::default::Default;
use rocksdb::{DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBVector, DBIterator,
Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache};
Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache, Column};
const DB_BACKGROUND_FLUSHES: i32 = 2;
const DB_BACKGROUND_COMPACTIONS: i32 = 2;
@ -26,33 +26,31 @@ const DB_BACKGROUND_COMPACTIONS: i32 = 2;
/// Write transaction. Batches a sequence of put/delete operations for efficiency.
pub struct DBTransaction {
batch: WriteBatch,
}
impl Default for DBTransaction {
fn default() -> Self {
DBTransaction::new()
}
cfs: Vec<Column>,
}
impl DBTransaction {
/// Create new transaction.
pub fn new() -> DBTransaction {
DBTransaction { batch: WriteBatch::new() }
pub fn new(db: &Database) -> DBTransaction {
DBTransaction {
batch: WriteBatch::new(),
cfs: db.cfs.clone(),
}
}
/// Insert a key-value pair in the transaction. Any existing value value will be overwritten upon write.
pub fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
self.batch.put(key, value)
pub fn put(&self, col: Option<u32>, key: &[u8], value: &[u8]) -> Result<(), String> {
col.map_or_else(|| self.batch.put(key, value), |c| self.batch.put_cf(self.cfs[c as usize], key, value))
}
/// Delete value by key.
pub fn delete(&self, key: &[u8]) -> Result<(), String> {
self.batch.delete(key)
pub fn delete(&self, col: Option<u32>, key: &[u8]) -> Result<(), String> {
col.map_or_else(|| self.batch.delete(key), |c| self.batch.delete_cf(self.cfs[c as usize], key))
}
}
/// Compaction profile for the database settings
#[derive(Clone)]
#[derive(Clone, Copy)]
pub struct CompactionProfile {
/// L0-L1 target file size
pub initial_file_size: u64,
@ -85,6 +83,7 @@ impl CompactionProfile {
}
/// Database configuration
#[derive(Clone, Copy)]
pub struct DatabaseConfig {
/// Max number of open files.
pub max_open_files: i32,
@ -92,22 +91,16 @@ pub struct DatabaseConfig {
pub cache_size: Option<usize>,
/// Compaction profile
pub compaction: CompactionProfile,
/// Set number of columns
pub columns: Option<u32>,
}
impl DatabaseConfig {
/// Database with default settings and specified cache size
pub fn with_cache(cache_size: usize) -> DatabaseConfig {
DatabaseConfig {
cache_size: Some(cache_size),
max_open_files: 256,
compaction: CompactionProfile::default(),
}
}
/// Modify the compaction profile
pub fn compaction(mut self, profile: CompactionProfile) -> Self {
self.compaction = profile;
self
/// Create new `DatabaseConfig` with default parameters and specified set of columns.
pub fn with_columns(columns: Option<u32>) -> Self {
let mut config = Self::default();
config.columns = columns;
config
}
}
@ -115,8 +108,9 @@ impl Default for DatabaseConfig {
fn default() -> DatabaseConfig {
DatabaseConfig {
cache_size: None,
max_open_files: 256,
max_open_files: 1024,
compaction: CompactionProfile::default(),
columns: None,
}
}
}
@ -138,6 +132,7 @@ impl<'a> Iterator for DatabaseIterator {
pub struct Database {
db: DB,
write_opts: WriteOptions,
cfs: Vec<Column>,
}
impl Database {
@ -171,10 +166,35 @@ impl Database {
opts.set_block_based_table_factory(&block_opts);
}
let write_opts = WriteOptions::new();
//write_opts.disable_wal(true); // TODO: make sure this is safe
let mut write_opts = WriteOptions::new();
write_opts.disable_wal(true); // TODO: make sure this is safe
let db = match DB::open(&opts, path) {
let mut cfs: Vec<Column> = Vec::new();
let db = match config.columns {
Some(columns) => {
let cfnames: Vec<_> = (0..columns).map(|c| format!("col{}", c)).collect();
let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect();
match DB::open_cf(&opts, path, &cfnames) {
Ok(db) => {
cfs = cfnames.iter().map(|n| db.cf_handle(n).unwrap()).collect();
assert!(cfs.len() == columns as usize);
Ok(db)
}
Err(_) => {
// retry and create CFs
match DB::open_cf(&opts, path, &[]) {
Ok(mut db) => {
cfs = cfnames.iter().map(|n| db.create_cf(n, &opts).unwrap()).collect();
Ok(db)
},
err @ Err(_) => err,
}
}
}
},
None => DB::open(&opts, path)
};
let db = match db {
Ok(db) => db,
Err(ref s) if s.starts_with("Corruption:") => {
info!("{}", s);
@ -184,17 +204,12 @@ impl Database {
},
Err(s) => { return Err(s); }
};
Ok(Database { db: db, write_opts: write_opts, })
Ok(Database { db: db, write_opts: write_opts, cfs: cfs })
}
/// Insert a key-value pair in the transaction. Any existing value value will be overwritten.
pub fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
self.db.put_opt(key, value, &self.write_opts)
}
/// Delete value by key.
pub fn delete(&self, key: &[u8]) -> Result<(), String> {
self.db.delete_opt(key, &self.write_opts)
/// Creates new transaction for this database.
pub fn transaction(&self) -> DBTransaction {
DBTransaction::new(self)
}
/// Commit transaction to database.
@ -203,13 +218,14 @@ impl Database {
}
/// Get value by key.
pub fn get(&self, key: &[u8]) -> Result<Option<DBVector>, String> {
self.db.get(key)
pub fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<DBVector>, String> {
col.map_or_else(|| self.db.get(key), |c| self.db.get_cf(self.cfs[c as usize], key))
}
/// Get value by partial key. Prefix size should match configured prefix size.
pub fn get_by_prefix(&self, prefix: &[u8]) -> Option<Box<[u8]>> {
let mut iter = self.db.iterator(IteratorMode::From(prefix, Direction::Forward));
pub fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
let mut iter = col.map_or_else(|| self.db.iterator(IteratorMode::From(prefix, Direction::Forward)),
|c| self.db.iterator_cf(self.cfs[c as usize], IteratorMode::From(prefix, Direction::Forward)).unwrap());
match iter.next() {
// TODO: use prefix_same_as_start read option (not availabele in C API currently)
Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None },
@ -218,13 +234,14 @@ impl Database {
}
/// Check if there is anything in the database.
pub fn is_empty(&self) -> bool {
self.db.iterator(IteratorMode::Start).next().is_none()
pub fn is_empty(&self, col: Option<u32>) -> bool {
self.iter(col).next().is_none()
}
/// Check if there is anything in the database.
pub fn iter(&self) -> DatabaseIterator {
DatabaseIterator { iter: self.db.iterator(IteratorMode::Start) }
/// Get database iterator.
pub fn iter(&self, col: Option<u32>) -> DatabaseIterator {
col.map_or_else(|| DatabaseIterator { iter: self.db.iterator(IteratorMode::Start) },
|c| DatabaseIterator { iter: self.db.iterator_cf(self.cfs[c as usize], IteratorMode::Start).unwrap() })
}
}
@ -243,38 +260,46 @@ mod tests {
let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
db.put(&key1, b"cat").unwrap();
db.put(&key2, b"dog").unwrap();
let batch = db.transaction();
batch.put(None, &key1, b"cat").unwrap();
batch.put(None, &key2, b"dog").unwrap();
db.write(batch).unwrap();
assert_eq!(db.get(&key1).unwrap().unwrap().deref(), b"cat");
assert_eq!(db.get(None, &key1).unwrap().unwrap().deref(), b"cat");
let contents: Vec<_> = db.iter().collect();
let contents: Vec<_> = db.iter(None).collect();
assert_eq!(contents.len(), 2);
assert_eq!(&*contents[0].0, key1.deref());
assert_eq!(&*contents[0].1, b"cat");
assert_eq!(&*contents[1].0, key2.deref());
assert_eq!(&*contents[1].1, b"dog");
db.delete(&key1).unwrap();
assert!(db.get(&key1).unwrap().is_none());
db.put(&key1, b"cat").unwrap();
let batch = db.transaction();
batch.delete(None, &key1).unwrap();
db.write(batch).unwrap();
let transaction = DBTransaction::new();
transaction.put(&key3, b"elephant").unwrap();
transaction.delete(&key1).unwrap();
assert!(db.get(None, &key1).unwrap().is_none());
let batch = db.transaction();
batch.put(None, &key1, b"cat").unwrap();
db.write(batch).unwrap();
let transaction = db.transaction();
transaction.put(None, &key3, b"elephant").unwrap();
transaction.delete(None, &key1).unwrap();
db.write(transaction).unwrap();
assert!(db.get(&key1).unwrap().is_none());
assert_eq!(db.get(&key3).unwrap().unwrap().deref(), b"elephant");
assert!(db.get(None, &key1).unwrap().is_none());
assert_eq!(db.get(None, &key3).unwrap().unwrap().deref(), b"elephant");
assert_eq!(db.get_by_prefix(&key3).unwrap().deref(), b"elephant");
assert_eq!(db.get_by_prefix(&key2).unwrap().deref(), b"dog");
assert_eq!(db.get_by_prefix(None, &key3).unwrap().deref(), b"elephant");
assert_eq!(db.get_by_prefix(None, &key2).unwrap().deref(), b"dog");
}
#[test]
fn kvdb() {
let path = RandomTempPath::create_dir();
let smoke = Database::open_default(path.as_path().to_str().unwrap()).unwrap();
assert!(smoke.is_empty());
assert!(smoke.is_empty(None));
test_db(&DatabaseConfig::default());
}
}

View File

@ -46,14 +46,16 @@ impl Default for Config {
pub struct Batch {
inner: BTreeMap<Vec<u8>, Vec<u8>>,
batch_size: usize,
column: Option<u32>,
}
impl Batch {
/// Make a new batch with the given config.
pub fn new(config: &Config) -> Self {
pub fn new(config: &Config, col: Option<u32>) -> Self {
Batch {
inner: BTreeMap::new(),
batch_size: config.batch_size,
column: col,
}
}
@ -70,10 +72,10 @@ impl Batch {
pub fn commit(&mut self, dest: &mut Database) -> Result<(), Error> {
if self.inner.is_empty() { return Ok(()) }
let transaction = DBTransaction::new();
let transaction = DBTransaction::new(dest);
for keypair in &self.inner {
try!(transaction.put(&keypair.0, &keypair.1).map_err(Error::Custom));
try!(transaction.put(self.column, &keypair.0, &keypair.1).map_err(Error::Custom));
}
self.inner.clear();
@ -102,14 +104,18 @@ impl From<::std::io::Error> for Error {
/// A generalized migration from the given db to a destination db.
pub trait Migration: 'static {
/// Number of columns in database after the migration.
fn columns(&self) -> Option<u32>;
/// Version of the database after the migration.
fn version(&self) -> u32;
/// Migrate a source to a destination.
fn migrate(&mut self, source: &Database, config: &Config, destination: &mut Database) -> Result<(), Error>;
fn migrate(&mut self, source: &Database, config: &Config, destination: &mut Database, col: Option<u32>) -> Result<(), Error>;
}
/// A simple migration over key-value pairs.
pub trait SimpleMigration: 'static {
/// Number of columns in database after the migration.
fn columns(&self) -> Option<u32>;
/// Version of database after the migration.
fn version(&self) -> u32;
/// Should migrate existing object to new database.
@ -118,12 +124,14 @@ pub trait SimpleMigration: 'static {
}
impl<T: SimpleMigration> Migration for T {
fn columns(&self) -> Option<u32> { SimpleMigration::columns(self) }
fn version(&self) -> u32 { SimpleMigration::version(self) }
fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database) -> Result<(), Error> {
let mut batch = Batch::new(config);
fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> {
let mut batch = Batch::new(config, col);
for (key, value) in source.iter() {
for (key, value) in source.iter(col) {
if let Some((key, value)) = self.simple_migrate(key.to_vec(), value.to_vec()) {
try!(batch.insert(key, value, dest));
}
@ -197,12 +205,14 @@ impl Manager {
/// and producing a path where the final migration lives.
pub fn execute(&mut self, old_path: &Path, version: u32) -> Result<PathBuf, Error> {
let config = self.config.clone();
let mut migrations = self.migrations_from(version);
let columns = self.no_of_columns_at(version);
let migrations = self.migrations_from(version);
if migrations.is_empty() { return Err(Error::MigrationImpossible) };
let db_config = DatabaseConfig {
let mut db_config = DatabaseConfig {
max_open_files: 64,
cache_size: None,
compaction: config.compaction_profile.clone(),
compaction: config.compaction_profile,
columns: columns,
};
let db_root = database_path(old_path);
@ -212,14 +222,28 @@ impl Manager {
// start with the old db.
let old_path_str = try!(old_path.to_str().ok_or(Error::MigrationImpossible));
let mut cur_db = try!(Database::open(&db_config, old_path_str).map_err(Error::Custom));
for migration in migrations {
// Change number of columns in new db
let current_columns = db_config.columns;
db_config.columns = migration.columns();
// open the target temporary database.
temp_path = temp_idx.path(&db_root);
let temp_path_str = try!(temp_path.to_str().ok_or(Error::MigrationImpossible));
let mut new_db = try!(Database::open(&db_config, temp_path_str).map_err(Error::Custom));
// perform the migration from cur_db to new_db.
try!(migration.migrate(&cur_db, &config, &mut new_db));
match current_columns {
// migrate only default column
None => try!(migration.migrate(&cur_db, &config, &mut new_db, None)),
Some(v) => {
// Migrate all columns in previous DB
for col in 0..v {
try!(migration.migrate(&cur_db, &config, &mut new_db, Some(col)))
}
}
}
// next iteration, we will migrate from this db into the other temp.
cur_db = new_db;
temp_idx.swap();
@ -242,5 +266,38 @@ impl Manager {
fn migrations_from(&mut self, version: u32) -> Vec<&mut Box<Migration>> {
self.migrations.iter_mut().filter(|m| m.version() > version).collect()
}
fn no_of_columns_at(&self, version: u32) -> Option<u32> {
let migration = self.migrations.iter().find(|m| m.version() == version);
match migration {
Some(m) => m.columns(),
None => None
}
}
}
/// Prints a dot every `max` ticks
pub struct Progress {
current: usize,
max: usize,
}
impl Default for Progress {
fn default() -> Self {
Progress {
current: 0,
max: 100_000,
}
}
}
impl Progress {
/// Tick progress meter.
pub fn tick(&mut self) {
self.current += 1;
if self.current == self.max {
self.current = 0;
flush!(".");
}
}
}

View File

@ -20,7 +20,7 @@
use common::*;
use migration::{Config, SimpleMigration, Manager};
use kvdb::{Database, DBTransaction};
use kvdb::Database;
use devtools::RandomTempPath;
use std::path::PathBuf;
@ -35,9 +35,9 @@ fn db_path(path: &Path) -> PathBuf {
fn make_db(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
let db = Database::open_default(path.to_str().unwrap()).expect("failed to open temp database");
{
let transaction = DBTransaction::new();
let transaction = db.transaction();
for (k, v) in pairs {
transaction.put(&k, &v).expect("failed to add pair to transaction");
transaction.put(None, &k, &v).expect("failed to add pair to transaction");
}
db.write(transaction).expect("failed to write db transaction");
@ -49,7 +49,7 @@ fn verify_migration(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
let db = Database::open_default(path.to_str().unwrap()).unwrap();
for (k, v) in pairs {
let x = db.get(&k).unwrap().unwrap();
let x = db.get(None, &k).unwrap().unwrap();
assert_eq!(&x[..], &v[..]);
}
@ -58,9 +58,9 @@ fn verify_migration(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
struct Migration0;
impl SimpleMigration for Migration0 {
fn version(&self) -> u32 {
1
}
fn columns(&self) -> Option<u32> { None }
fn version(&self) -> u32 { 1 }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
let mut key = key;
@ -74,9 +74,9 @@ impl SimpleMigration for Migration0 {
struct Migration1;
impl SimpleMigration for Migration1 {
fn version(&self) -> u32 {
2
}
fn columns(&self) -> Option<u32> { None }
fn version(&self) -> u32 { 2 }
fn simple_migrate(&mut self, key: Vec<u8>, _value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
Some((key, vec![]))

View File

@ -24,7 +24,6 @@ use hashdb::*;
use memorydb::*;
use std::ops::*;
use std::sync::*;
use std::env;
use std::collections::HashMap;
use kvdb::{Database, DBTransaction};
@ -40,22 +39,29 @@ use kvdb::{Database, DBTransaction};
pub struct OverlayDB {
overlay: MemoryDB,
backing: Arc<Database>,
column: Option<u32>,
}
impl OverlayDB {
/// Create a new instance of OverlayDB given a `backing` database.
pub fn new(backing: Database) -> OverlayDB { Self::new_with_arc(Arc::new(backing)) }
/// Create a new instance of OverlayDB given a `backing` database.
pub fn new_with_arc(backing: Arc<Database>) -> OverlayDB {
OverlayDB{ overlay: MemoryDB::new(), backing: backing }
pub fn new(backing: Arc<Database>, col: Option<u32>) -> OverlayDB {
OverlayDB{ overlay: MemoryDB::new(), backing: backing, column: col }
}
/// Create a new instance of OverlayDB with an anonymous temporary database.
#[cfg(test)]
pub fn new_temp() -> OverlayDB {
let mut dir = env::temp_dir();
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
Self::new(Database::open_default(dir.to_str().unwrap()).unwrap())
Self::new(Arc::new(Database::open_default(dir.to_str().unwrap()).unwrap()), None)
}
/// Commit all operations in a single batch.
#[cfg(test)]
pub fn commit(&mut self) -> Result<u32, UtilError> {
let batch = self.backing.transaction();
let res = try!(self.commit_to_batch(&batch));
self.backing.write(batch).map(|_| res).map_err(|e| e.into())
}
/// Commit all operations to given batch.
@ -88,83 +94,8 @@ impl OverlayDB {
Ok(ret)
}
/// Commit all memory operations to the backing database.
///
/// Returns either an error or the number of items changed in the backing database.
///
/// Will return an error if the number of `remove()`s ever exceeds the number of
/// `insert()`s for any key. This will leave the database in an undeterminate
/// state. Don't ever let it happen.
///
/// # Example
/// ```
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::overlaydb::*;
/// fn main() {
/// let mut m = OverlayDB::new_temp();
/// let key = m.insert(b"foo"); // insert item.
/// assert!(m.contains(&key)); // key exists (in memory).
/// assert_eq!(m.commit().unwrap(), 1); // 1 item changed.
/// assert!(m.contains(&key)); // key still exists (in backing).
/// m.remove(&key); // delete item.
/// assert!(!m.contains(&key)); // key "doesn't exist" (though still does in backing).
/// m.remove(&key); // oh dear... more removes than inserts for the key...
/// //m.commit().unwrap(); // this commit/unwrap would cause a panic.
/// m.revert(); // revert both removes.
/// assert!(m.contains(&key)); // key now still exists.
/// }
/// ```
pub fn commit(&mut self) -> Result<u32, UtilError> {
let mut ret = 0u32;
let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() {
let (key, (value, rc)) = i;
if rc != 0 {
match self.payload(&key) {
Some(x) => {
let (back_value, back_rc) = x;
let total_rc: i32 = back_rc as i32 + rc;
if total_rc < 0 {
return Err(From::from(BaseDataError::NegativelyReferencedHash(key)));
}
deletes += if self.put_payload(&key, (back_value, total_rc as u32)) {1} else {0};
}
None => {
if rc < 0 {
return Err(From::from(BaseDataError::NegativelyReferencedHash(key)));
}
self.put_payload(&key, (value, rc as u32));
}
};
ret += 1;
}
}
trace!("OverlayDB::commit() deleted {} nodes", deletes);
Ok(ret)
}
/// Revert all operations on this object (i.e. `insert()`s and `remove()`s) since the
/// last `commit()`.
///
/// # Example
/// ```
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::overlaydb::*;
/// fn main() {
/// let mut m = OverlayDB::new_temp();
/// let foo = m.insert(b"foo"); // insert foo.
/// m.commit().unwrap(); // commit - new operations begin here...
/// let bar = m.insert(b"bar"); // insert bar.
/// m.remove(&foo); // remove foo.
/// assert!(!m.contains(&foo)); // foo is gone.
/// assert!(m.contains(&bar)); // bar is here.
/// m.revert(); // revert the last two operations.
/// assert!(m.contains(&foo)); // foo is here.
/// assert!(!m.contains(&bar)); // bar is gone.
/// }
/// ```
pub fn revert(&mut self) { self.overlay.clear(); }
/// Get the number of references that would be committed.
@ -172,7 +103,7 @@ impl OverlayDB {
/// Get the refs and value of the given key.
fn payload(&self, key: &H256) -> Option<(Bytes, u32)> {
self.backing.get(key)
self.backing.get(self.column, key)
.expect("Low-level database error. Some issue with your hard disk?")
.map(|d| {
let r = Rlp::new(&d);
@ -186,24 +117,10 @@ impl OverlayDB {
let mut s = RlpStream::new_list(2);
s.append(&payload.1);
s.append(&payload.0);
batch.put(key, s.as_raw()).expect("Low-level database error. Some issue with your hard disk?");
batch.put(self.column, key, s.as_raw()).expect("Low-level database error. Some issue with your hard disk?");
false
} else {
batch.delete(key).expect("Low-level database error. Some issue with your hard disk?");
true
}
}
/// Put the refs and value of the given key, possibly deleting it from the db.
fn put_payload(&self, key: &H256, payload: (Bytes, u32)) -> bool {
if payload.1 > 0 {
let mut s = RlpStream::new_list(2);
s.append(&payload.1);
s.append(&payload.0);
self.backing.put(key, s.as_raw()).expect("Low-level database error. Some issue with your hard disk?");
false
} else {
self.backing.delete(key).expect("Low-level database error. Some issue with your hard disk?");
batch.delete(self.column, key).expect("Low-level database error. Some issue with your hard disk?");
true
}
}
@ -212,7 +129,7 @@ impl OverlayDB {
impl HashDB for OverlayDB {
fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iter() {
for (key, _) in self.backing.iter(self.column) {
let h = H256::from_slice(key.deref());
let r = self.payload(&h).unwrap().1;
ret.insert(h, r as i32);
@ -274,6 +191,22 @@ impl HashDB for OverlayDB {
fn remove(&mut self, key: &H256) { self.overlay.remove(key); }
}
#[test]
fn overlaydb_revert() {
let mut m = OverlayDB::new_temp();
let foo = m.insert(b"foo"); // insert foo.
let batch = m.backing.transaction();
m.commit_to_batch(&batch).unwrap(); // commit - new operations begin here...
m.backing.write(batch).unwrap();
let bar = m.insert(b"bar"); // insert bar.
m.remove(&foo); // remove foo.
assert!(!m.contains(&foo)); // foo is gone.
assert!(m.contains(&bar)); // bar is here.
m.revert(); // revert the last two operations.
assert!(m.contains(&foo)); // foo is here.
assert!(!m.contains(&bar)); // bar is gone.
}
#[test]
fn overlaydb_overlay_insert_and_remove() {
let mut trie = OverlayDB::new_temp();
@ -366,14 +299,18 @@ fn overlaydb_complex() {
fn playpen() {
use std::fs;
{
let db: Database = Database::open_default("/tmp/test").unwrap();
db.put(b"test", b"test2").unwrap();
match db.get(b"test") {
let db = Database::open_default("/tmp/test").unwrap();
let batch = db.transaction();
batch.put(None, b"test", b"test2").unwrap();
db.write(batch).unwrap();
match db.get(None, b"test") {
Ok(Some(value)) => println!("Got value {:?}", value.deref()),
Ok(None) => println!("No value for that key"),
Err(..) => println!("Gah"),
}
db.delete(b"test").unwrap();
let batch = db.transaction();
batch.delete(None, b"test").unwrap();
db.write(batch).unwrap();
}
fs::remove_dir_all("/tmp/test").unwrap();
}

View File

@ -59,7 +59,7 @@ mod tests {
use kvdb::*;
let path = "db path".to_string();
let values: Vec<_> = Database::open_default(&path).unwrap().iter().map(|(_, v)| v).collect();
let values: Vec<_> = Database::open_default(&path).unwrap().iter(Some(2)).map(|(_, v)| v).collect();
let mut rlp_counts: HashMap<_, u32> = HashMap::new();
let mut rlp_sizes: HashMap<_, u32> = HashMap::new();

View File

@ -228,7 +228,7 @@ mod tests {
fn test_compression() {
use kvdb::*;
let path = "db to test".to_string();
let values: Vec<_> = Database::open_default(&path).unwrap().iter().map(|(_, v)| v).collect();
let values: Vec<_> = Database::open_default(&path).unwrap().iter(Some(2)).map(|(_, v)| v).collect();
let mut decomp_size = 0;
let mut comp_size = 0;