remove impossible panickers related to infallible db transaction (#1947)
This commit is contained in:
parent
108024e98d
commit
57dbdaada9
@ -366,12 +366,12 @@ impl BlockChain {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let batch = DBTransaction::new(&db);
|
let batch = DBTransaction::new(&db);
|
||||||
batch.put(DB_COL_HEADERS, &hash, block.header_rlp().as_raw()).unwrap();
|
batch.put(DB_COL_HEADERS, &hash, block.header_rlp().as_raw());
|
||||||
batch.put(DB_COL_BODIES, &hash, &Self::block_to_body(genesis)).unwrap();
|
batch.put(DB_COL_BODIES, &hash, &Self::block_to_body(genesis));
|
||||||
|
|
||||||
batch.write(DB_COL_EXTRA, &hash, &details);
|
batch.write(DB_COL_EXTRA, &hash, &details);
|
||||||
batch.write(DB_COL_EXTRA, &header.number(), &hash);
|
batch.write(DB_COL_EXTRA, &header.number(), &hash);
|
||||||
batch.put(DB_COL_EXTRA, b"best", &hash).unwrap();
|
batch.put(DB_COL_EXTRA, b"best", &hash);
|
||||||
bc.db.write(batch).expect("Low level database error. Some issue with disk?");
|
bc.db.write(batch).expect("Low level database error. Some issue with disk?");
|
||||||
hash
|
hash
|
||||||
}
|
}
|
||||||
@ -415,7 +415,7 @@ impl BlockChain {
|
|||||||
}
|
}
|
||||||
if let Some(extras) = self.db.read(DB_COL_EXTRA, &best_block_hash) as Option<BlockDetails> {
|
if let Some(extras) = self.db.read(DB_COL_EXTRA, &best_block_hash) as Option<BlockDetails> {
|
||||||
type DetailsKey = Key<BlockDetails, Target=H264>;
|
type DetailsKey = Key<BlockDetails, Target=H264>;
|
||||||
batch.delete(DB_COL_EXTRA, &(DetailsKey::key(&best_block_hash))).unwrap();
|
batch.delete(DB_COL_EXTRA, &(DetailsKey::key(&best_block_hash)));
|
||||||
let hash = extras.parent;
|
let hash = extras.parent;
|
||||||
let range = extras.number as bc::Number .. extras.number as bc::Number;
|
let range = extras.number as bc::Number .. extras.number as bc::Number;
|
||||||
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
|
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
|
||||||
@ -423,7 +423,7 @@ impl BlockChain {
|
|||||||
for (k, v) in changes.into_iter() {
|
for (k, v) in changes.into_iter() {
|
||||||
batch.write(DB_COL_EXTRA, &LogGroupPosition::from(k), &BloomGroup::from(v));
|
batch.write(DB_COL_EXTRA, &LogGroupPosition::from(k), &BloomGroup::from(v));
|
||||||
}
|
}
|
||||||
batch.put(DB_COL_EXTRA, b"best", &hash).unwrap();
|
batch.put(DB_COL_EXTRA, b"best", &hash);
|
||||||
|
|
||||||
let best_block_total_difficulty = self.block_details(&hash).unwrap().total_difficulty;
|
let best_block_total_difficulty = self.block_details(&hash).unwrap().total_difficulty;
|
||||||
let best_block_rlp = self.block(&hash).unwrap();
|
let best_block_rlp = self.block(&hash).unwrap();
|
||||||
@ -566,8 +566,8 @@ impl BlockChain {
|
|||||||
let compressed_body = UntrustedRlp::new(&Self::block_to_body(bytes)).compress(RlpType::Blocks);
|
let compressed_body = UntrustedRlp::new(&Self::block_to_body(bytes)).compress(RlpType::Blocks);
|
||||||
|
|
||||||
// store block in db
|
// store block in db
|
||||||
batch.put(DB_COL_HEADERS, &hash, &compressed_header).unwrap();
|
batch.put(DB_COL_HEADERS, &hash, &compressed_header);
|
||||||
batch.put(DB_COL_BODIES, &hash, &compressed_body).unwrap();
|
batch.put(DB_COL_BODIES, &hash, &compressed_body);
|
||||||
|
|
||||||
let maybe_parent = self.block_details(&header.parent_hash());
|
let maybe_parent = self.block_details(&header.parent_hash());
|
||||||
|
|
||||||
@ -669,8 +669,8 @@ impl BlockChain {
|
|||||||
assert!(self.pending_best_block.read().is_none());
|
assert!(self.pending_best_block.read().is_none());
|
||||||
|
|
||||||
// store block in db
|
// store block in db
|
||||||
batch.put_compressed(DB_COL_HEADERS, &hash, block.header_rlp().as_raw().to_vec()).unwrap();
|
batch.put_compressed(DB_COL_HEADERS, &hash, block.header_rlp().as_raw().to_vec());
|
||||||
batch.put_compressed(DB_COL_BODIES, &hash, Self::block_to_body(bytes)).unwrap();
|
batch.put_compressed(DB_COL_BODIES, &hash, Self::block_to_body(bytes));
|
||||||
|
|
||||||
let info = self.block_info(&header);
|
let info = self.block_info(&header);
|
||||||
|
|
||||||
@ -768,7 +768,7 @@ impl BlockChain {
|
|||||||
match update.info.location {
|
match update.info.location {
|
||||||
BlockLocation::Branch => (),
|
BlockLocation::Branch => (),
|
||||||
_ => if is_best {
|
_ => if is_best {
|
||||||
batch.put(DB_COL_EXTRA, b"best", &update.info.hash).unwrap();
|
batch.put(DB_COL_EXTRA, b"best", &update.info.hash);
|
||||||
*best_block = Some(BestBlock {
|
*best_block = Some(BestBlock {
|
||||||
hash: update.info.hash,
|
hash: update.info.hash,
|
||||||
number: update.info.number,
|
number: update.info.number,
|
||||||
|
@ -454,7 +454,7 @@ impl Client {
|
|||||||
retracted: route.retracted.len()
|
retracted: route.retracted.len()
|
||||||
});
|
});
|
||||||
// Final commit to the DB
|
// Final commit to the DB
|
||||||
self.db.write_buffered(batch).expect("DB write failed.");
|
self.db.write_buffered(batch);
|
||||||
self.chain.commit();
|
self.chain.commit();
|
||||||
|
|
||||||
self.update_last_hashes(&parent, hash);
|
self.update_last_hashes(&parent, hash);
|
||||||
|
@ -149,10 +149,7 @@ pub trait Readable {
|
|||||||
|
|
||||||
impl Writable for DBTransaction {
|
impl Writable for DBTransaction {
|
||||||
fn write<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: Encodable, R: Deref<Target = [u8]> {
|
fn write<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: Encodable, R: Deref<Target = [u8]> {
|
||||||
let result = self.put(col, &key.key(), &encode(value));
|
self.put(col, &key.key(), &encode(value));
|
||||||
if let Err(err) = result {
|
|
||||||
panic!("db put failed, key: {:?}, err: {:?}", &key.key() as &[u8], err);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,8 +144,8 @@ impl<T> TraceDB<T> where T: DatabaseExtras {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let batch = DBTransaction::new(&tracesdb);
|
let batch = DBTransaction::new(&tracesdb);
|
||||||
batch.put(DB_COL_TRACE, b"enabled", &encoded_tracing).unwrap();
|
batch.put(DB_COL_TRACE, b"enabled", &encoded_tracing);
|
||||||
batch.put(DB_COL_TRACE, b"version", TRACE_DB_VER).unwrap();
|
batch.put(DB_COL_TRACE, b"version", TRACE_DB_VER);
|
||||||
tracesdb.write(batch).unwrap();
|
tracesdb.write(batch).unwrap();
|
||||||
|
|
||||||
let db = TraceDB {
|
let db = TraceDB {
|
||||||
|
@ -164,7 +164,7 @@ impl JournalDB for ArchiveDB {
|
|||||||
let (key, (value, rc)) = i;
|
let (key, (value, rc)) = i;
|
||||||
if rc > 0 {
|
if rc > 0 {
|
||||||
assert!(rc == 1);
|
assert!(rc == 1);
|
||||||
batch.put(self.column, &key, &value).expect("Low-level database error. Some issue with your hard disk?");
|
batch.put(self.column, &key, &value);
|
||||||
inserts += 1;
|
inserts += 1;
|
||||||
}
|
}
|
||||||
if rc < 0 {
|
if rc < 0 {
|
||||||
@ -175,11 +175,11 @@ impl JournalDB for ArchiveDB {
|
|||||||
|
|
||||||
for (mut key, value) in self.overlay.drain_aux().into_iter() {
|
for (mut key, value) in self.overlay.drain_aux().into_iter() {
|
||||||
key.push(AUX_FLAG);
|
key.push(AUX_FLAG);
|
||||||
batch.put(self.column, &key, &value).expect("Low-level database error. Some issue with your hard disk?");
|
batch.put(self.column, &key, &value);
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.latest_era.map_or(true, |e| now > e) {
|
if self.latest_era.map_or(true, |e| now > e) {
|
||||||
try!(batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)));
|
batch.put(self.column, &LATEST_ERA_KEY, &encode(&now));
|
||||||
self.latest_era = Some(now);
|
self.latest_era = Some(now);
|
||||||
}
|
}
|
||||||
Ok((inserts + deletes) as u32)
|
Ok((inserts + deletes) as u32)
|
||||||
@ -196,7 +196,7 @@ impl JournalDB for ArchiveDB {
|
|||||||
if try!(self.backing.get(self.column, &key)).is_some() {
|
if try!(self.backing.get(self.column, &key)).is_some() {
|
||||||
return Err(BaseDataError::AlreadyExists(key).into());
|
return Err(BaseDataError::AlreadyExists(key).into());
|
||||||
}
|
}
|
||||||
try!(batch.put(self.column, &key, &value));
|
batch.put(self.column, &key, &value);
|
||||||
inserts += 1;
|
inserts += 1;
|
||||||
}
|
}
|
||||||
if rc < 0 {
|
if rc < 0 {
|
||||||
@ -204,14 +204,14 @@ impl JournalDB for ArchiveDB {
|
|||||||
if try!(self.backing.get(self.column, &key)).is_none() {
|
if try!(self.backing.get(self.column, &key)).is_none() {
|
||||||
return Err(BaseDataError::NegativelyReferencedHash(key).into());
|
return Err(BaseDataError::NegativelyReferencedHash(key).into());
|
||||||
}
|
}
|
||||||
try!(batch.delete(self.column, &key));
|
batch.delete(self.column, &key);
|
||||||
deletes += 1;
|
deletes += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (mut key, value) in self.overlay.drain_aux().into_iter() {
|
for (mut key, value) in self.overlay.drain_aux().into_iter() {
|
||||||
key.push(AUX_FLAG);
|
key.push(AUX_FLAG);
|
||||||
try!(batch.put(self.column, &key, &value));
|
batch.put(self.column, &key, &value);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((inserts + deletes) as u32)
|
Ok((inserts + deletes) as u32)
|
||||||
|
@ -101,8 +101,8 @@ impl EarlyMergeDB {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The next three are valid only as long as there is an insert operation of `key` in the journal.
|
// The next three are valid only as long as there is an insert operation of `key` in the journal.
|
||||||
fn set_already_in(batch: &DBTransaction, col: Option<u32>, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); }
|
fn set_already_in(batch: &DBTransaction, col: Option<u32>, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]); }
|
||||||
fn reset_already_in(batch: &DBTransaction, col: Option<u32>, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); }
|
fn reset_already_in(batch: &DBTransaction, col: Option<u32>, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)); }
|
||||||
fn is_already_in(backing: &Database, col: Option<u32>, key: &H256) -> bool {
|
fn is_already_in(backing: &Database, col: Option<u32>, key: &H256) -> bool {
|
||||||
backing.get(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
|
backing.get(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
|
||||||
}
|
}
|
||||||
@ -132,7 +132,7 @@ impl EarlyMergeDB {
|
|||||||
// Gets removed when a key leaves the journal, so should never be set when we're placing a new key.
|
// Gets removed when a key leaves the journal, so should never be set when we're placing a new key.
|
||||||
//Self::reset_already_in(&h);
|
//Self::reset_already_in(&h);
|
||||||
assert!(!Self::is_already_in(backing, col, &h));
|
assert!(!Self::is_already_in(backing, col, &h));
|
||||||
batch.put(col, h, d).expect("Low-level database error. Some issue with your hard disk?");
|
batch.put(col, h, d);
|
||||||
refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: false});
|
refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: false});
|
||||||
if trace {
|
if trace {
|
||||||
trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h);
|
trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h);
|
||||||
@ -193,7 +193,7 @@ impl EarlyMergeDB {
|
|||||||
}
|
}
|
||||||
Some(RefInfo{queue_refs: 1, in_archive: false}) => {
|
Some(RefInfo{queue_refs: 1, in_archive: false}) => {
|
||||||
refs.remove(h);
|
refs.remove(h);
|
||||||
batch.delete(col, h).expect("Low-level database error. Some issue with your hard disk?");
|
batch.delete(col, h);
|
||||||
if trace {
|
if trace {
|
||||||
trace!(target: "jdb.fine", " remove({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h);
|
trace!(target: "jdb.fine", " remove({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h);
|
||||||
}
|
}
|
||||||
@ -201,7 +201,7 @@ impl EarlyMergeDB {
|
|||||||
None => {
|
None => {
|
||||||
// Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs.
|
// Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs.
|
||||||
//assert!(!Self::is_already_in(db, &h));
|
//assert!(!Self::is_already_in(db, &h));
|
||||||
batch.delete(col, h).expect("Low-level database error. Some issue with your hard disk?");
|
batch.delete(col, h);
|
||||||
if trace {
|
if trace {
|
||||||
trace!(target: "jdb.fine", " remove({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h);
|
trace!(target: "jdb.fine", " remove({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h);
|
||||||
}
|
}
|
||||||
@ -436,9 +436,9 @@ impl JournalDB for EarlyMergeDB {
|
|||||||
trace!(target: "jdb.ops", " Inserts: {:?}", ins);
|
trace!(target: "jdb.ops", " Inserts: {:?}", ins);
|
||||||
trace!(target: "jdb.ops", " Deletes: {:?}", removes);
|
trace!(target: "jdb.ops", " Deletes: {:?}", removes);
|
||||||
}
|
}
|
||||||
try!(batch.put(self.column, &last, r.as_raw()));
|
batch.put(self.column, &last, r.as_raw());
|
||||||
if self.latest_era.map_or(true, |e| now > e) {
|
if self.latest_era.map_or(true, |e| now > e) {
|
||||||
try!(batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)));
|
batch.put(self.column, &LATEST_ERA_KEY, &encode(&now));
|
||||||
self.latest_era = Some(now);
|
self.latest_era = Some(now);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -499,7 +499,7 @@ impl JournalDB for EarlyMergeDB {
|
|||||||
Self::remove_keys(&inserts, &mut refs, batch, self.column, RemoveFrom::Queue, trace);
|
Self::remove_keys(&inserts, &mut refs, batch, self.column, RemoveFrom::Queue, trace);
|
||||||
}
|
}
|
||||||
|
|
||||||
try!(batch.delete(self.column, &last));
|
batch.delete(self.column, &last);
|
||||||
index += 1;
|
index += 1;
|
||||||
}
|
}
|
||||||
if trace {
|
if trace {
|
||||||
@ -525,13 +525,13 @@ impl JournalDB for EarlyMergeDB {
|
|||||||
if try!(self.backing.get(self.column, &key)).is_some() {
|
if try!(self.backing.get(self.column, &key)).is_some() {
|
||||||
return Err(BaseDataError::AlreadyExists(key).into());
|
return Err(BaseDataError::AlreadyExists(key).into());
|
||||||
}
|
}
|
||||||
try!(batch.put(self.column, &key, &value))
|
batch.put(self.column, &key, &value)
|
||||||
}
|
}
|
||||||
-1 => {
|
-1 => {
|
||||||
if try!(self.backing.get(self.column, &key)).is_none() {
|
if try!(self.backing.get(self.column, &key)).is_none() {
|
||||||
return Err(BaseDataError::NegativelyReferencedHash(key).into());
|
return Err(BaseDataError::NegativelyReferencedHash(key).into());
|
||||||
}
|
}
|
||||||
try!(batch.delete(self.column, &key))
|
batch.delete(self.column, &key)
|
||||||
}
|
}
|
||||||
_ => panic!("Attempted to inject invalid state."),
|
_ => panic!("Attempted to inject invalid state."),
|
||||||
}
|
}
|
||||||
|
@ -250,9 +250,9 @@ impl JournalDB for OverlayRecentDB {
|
|||||||
k.append(&now);
|
k.append(&now);
|
||||||
k.append(&index);
|
k.append(&index);
|
||||||
k.append(&&PADDING[..]);
|
k.append(&&PADDING[..]);
|
||||||
try!(batch.put_vec(self.column, &k.drain(), r.out()));
|
batch.put_vec(self.column, &k.drain(), r.out());
|
||||||
if journal_overlay.latest_era.map_or(true, |e| now > e) {
|
if journal_overlay.latest_era.map_or(true, |e| now > e) {
|
||||||
try!(batch.put_vec(self.column, &LATEST_ERA_KEY, encode(&now).to_vec()));
|
batch.put_vec(self.column, &LATEST_ERA_KEY, encode(&now).to_vec());
|
||||||
journal_overlay.latest_era = Some(now);
|
journal_overlay.latest_era = Some(now);
|
||||||
}
|
}
|
||||||
journal_overlay.journal.entry(now).or_insert_with(Vec::new).push(JournalEntry { id: id.clone(), insertions: inserted_keys, deletions: removed_keys });
|
journal_overlay.journal.entry(now).or_insert_with(Vec::new).push(JournalEntry { id: id.clone(), insertions: inserted_keys, deletions: removed_keys });
|
||||||
@ -272,7 +272,7 @@ impl JournalDB for OverlayRecentDB {
|
|||||||
r.append(&end_era);
|
r.append(&end_era);
|
||||||
r.append(&index);
|
r.append(&index);
|
||||||
r.append(&&PADDING[..]);
|
r.append(&&PADDING[..]);
|
||||||
try!(batch.delete(self.column, &r.drain()));
|
batch.delete(self.column, &r.drain());
|
||||||
trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries", end_era, index, journal.id, canon_id, journal.insertions.len(), journal.deletions.len());
|
trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries", end_era, index, journal.id, canon_id, journal.insertions.len(), journal.deletions.len());
|
||||||
{
|
{
|
||||||
if canon_id == journal.id {
|
if canon_id == journal.id {
|
||||||
@ -291,7 +291,7 @@ impl JournalDB for OverlayRecentDB {
|
|||||||
}
|
}
|
||||||
// apply canon inserts first
|
// apply canon inserts first
|
||||||
for (k, v) in canon_insertions {
|
for (k, v) in canon_insertions {
|
||||||
try!(batch.put(self.column, &k, &v));
|
batch.put(self.column, &k, &v);
|
||||||
journal_overlay.pending_overlay.insert(to_short_key(&k), v);
|
journal_overlay.pending_overlay.insert(to_short_key(&k), v);
|
||||||
}
|
}
|
||||||
// update the overlay
|
// update the overlay
|
||||||
@ -301,7 +301,7 @@ impl JournalDB for OverlayRecentDB {
|
|||||||
// apply canon deletions
|
// apply canon deletions
|
||||||
for k in canon_deletions {
|
for k in canon_deletions {
|
||||||
if !journal_overlay.backing_overlay.contains(&to_short_key(&k)) {
|
if !journal_overlay.backing_overlay.contains(&to_short_key(&k)) {
|
||||||
try!(batch.delete(self.column, &k));
|
batch.delete(self.column, &k);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -325,13 +325,13 @@ impl JournalDB for OverlayRecentDB {
|
|||||||
if try!(self.backing.get(self.column, &key)).is_some() {
|
if try!(self.backing.get(self.column, &key)).is_some() {
|
||||||
return Err(BaseDataError::AlreadyExists(key).into());
|
return Err(BaseDataError::AlreadyExists(key).into());
|
||||||
}
|
}
|
||||||
try!(batch.put(self.column, &key, &value))
|
batch.put(self.column, &key, &value)
|
||||||
}
|
}
|
||||||
-1 => {
|
-1 => {
|
||||||
if try!(self.backing.get(self.column, &key)).is_none() {
|
if try!(self.backing.get(self.column, &key)).is_none() {
|
||||||
return Err(BaseDataError::NegativelyReferencedHash(key).into());
|
return Err(BaseDataError::NegativelyReferencedHash(key).into());
|
||||||
}
|
}
|
||||||
try!(batch.delete(self.column, &key))
|
batch.delete(self.column, &key)
|
||||||
}
|
}
|
||||||
_ => panic!("Attempted to inject invalid state."),
|
_ => panic!("Attempted to inject invalid state."),
|
||||||
}
|
}
|
||||||
|
@ -141,7 +141,7 @@ impl JournalDB for RefCountedDB {
|
|||||||
r.append(id);
|
r.append(id);
|
||||||
r.append(&self.inserts);
|
r.append(&self.inserts);
|
||||||
r.append(&self.removes);
|
r.append(&self.removes);
|
||||||
try!(batch.put(self.column, &last, r.as_raw()));
|
batch.put(self.column, &last, r.as_raw());
|
||||||
|
|
||||||
trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, index, id, self.inserts, self.removes);
|
trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, index, id, self.inserts, self.removes);
|
||||||
|
|
||||||
@ -149,7 +149,7 @@ impl JournalDB for RefCountedDB {
|
|||||||
self.removes.clear();
|
self.removes.clear();
|
||||||
|
|
||||||
if self.latest_era.map_or(true, |e| now > e) {
|
if self.latest_era.map_or(true, |e| now > e) {
|
||||||
try!(batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)));
|
batch.put(self.column, &LATEST_ERA_KEY, &encode(&now));
|
||||||
self.latest_era = Some(now);
|
self.latest_era = Some(now);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -176,7 +176,7 @@ impl JournalDB for RefCountedDB {
|
|||||||
for i in &to_remove {
|
for i in &to_remove {
|
||||||
self.forward.remove(i);
|
self.forward.remove(i);
|
||||||
}
|
}
|
||||||
try!(batch.delete(self.column, &last));
|
batch.delete(self.column, &last);
|
||||||
index += 1;
|
index += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -57,7 +57,7 @@ impl DBTransaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Insert a key-value pair in the transaction. Any existing value value will be overwritten upon write.
|
/// Insert a key-value pair in the transaction. Any existing value value will be overwritten upon write.
|
||||||
pub fn put(&self, col: Option<u32>, key: &[u8], value: &[u8]) -> Result<(), String> {
|
pub fn put(&self, col: Option<u32>, key: &[u8], value: &[u8]) {
|
||||||
let mut ekey = ElasticArray32::new();
|
let mut ekey = ElasticArray32::new();
|
||||||
ekey.append_slice(key);
|
ekey.append_slice(key);
|
||||||
self.ops.lock().push(DBOp::Insert {
|
self.ops.lock().push(DBOp::Insert {
|
||||||
@ -65,11 +65,10 @@ impl DBTransaction {
|
|||||||
key: ekey,
|
key: ekey,
|
||||||
value: value.to_vec(),
|
value: value.to_vec(),
|
||||||
});
|
});
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Insert a key-value pair in the transaction. Any existing value value will be overwritten upon write.
|
/// Insert a key-value pair in the transaction. Any existing value value will be overwritten upon write.
|
||||||
pub fn put_vec(&self, col: Option<u32>, key: &[u8], value: Bytes) -> Result<(), String> {
|
pub fn put_vec(&self, col: Option<u32>, key: &[u8], value: Bytes) {
|
||||||
let mut ekey = ElasticArray32::new();
|
let mut ekey = ElasticArray32::new();
|
||||||
ekey.append_slice(key);
|
ekey.append_slice(key);
|
||||||
self.ops.lock().push(DBOp::Insert {
|
self.ops.lock().push(DBOp::Insert {
|
||||||
@ -77,12 +76,11 @@ impl DBTransaction {
|
|||||||
key: ekey,
|
key: ekey,
|
||||||
value: value,
|
value: value,
|
||||||
});
|
});
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Insert a key-value pair in the transaction. Any existing value value will be overwritten upon write.
|
/// Insert a key-value pair in the transaction. Any existing value value will be overwritten upon write.
|
||||||
/// Value will be RLP-compressed on flush
|
/// Value will be RLP-compressed on flush
|
||||||
pub fn put_compressed(&self, col: Option<u32>, key: &[u8], value: Bytes) -> Result<(), String> {
|
pub fn put_compressed(&self, col: Option<u32>, key: &[u8], value: Bytes) {
|
||||||
let mut ekey = ElasticArray32::new();
|
let mut ekey = ElasticArray32::new();
|
||||||
ekey.append_slice(key);
|
ekey.append_slice(key);
|
||||||
self.ops.lock().push(DBOp::InsertCompressed {
|
self.ops.lock().push(DBOp::InsertCompressed {
|
||||||
@ -90,18 +88,16 @@ impl DBTransaction {
|
|||||||
key: ekey,
|
key: ekey,
|
||||||
value: value,
|
value: value,
|
||||||
});
|
});
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delete value by key.
|
/// Delete value by key.
|
||||||
pub fn delete(&self, col: Option<u32>, key: &[u8]) -> Result<(), String> {
|
pub fn delete(&self, col: Option<u32>, key: &[u8]) {
|
||||||
let mut ekey = ElasticArray32::new();
|
let mut ekey = ElasticArray32::new();
|
||||||
ekey.append_slice(key);
|
ekey.append_slice(key);
|
||||||
self.ops.lock().push(DBOp::Delete {
|
self.ops.lock().push(DBOp::Delete {
|
||||||
col: col,
|
col: col,
|
||||||
key: ekey,
|
key: ekey,
|
||||||
});
|
});
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -292,7 +288,7 @@ impl Database {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Commit transaction to database.
|
/// Commit transaction to database.
|
||||||
pub fn write_buffered(&self, tr: DBTransaction) -> Result<(), String> {
|
pub fn write_buffered(&self, tr: DBTransaction) {
|
||||||
let mut overlay = self.overlay.write();
|
let mut overlay = self.overlay.write();
|
||||||
let ops = tr.ops.into_inner();
|
let ops = tr.ops.into_inner();
|
||||||
for op in ops {
|
for op in ops {
|
||||||
@ -311,7 +307,6 @@ impl Database {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Commit buffered changes to database.
|
/// Commit buffered changes to database.
|
||||||
@ -422,8 +417,8 @@ mod tests {
|
|||||||
let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
|
let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
|
||||||
|
|
||||||
let batch = db.transaction();
|
let batch = db.transaction();
|
||||||
batch.put(None, &key1, b"cat").unwrap();
|
batch.put(None, &key1, b"cat");
|
||||||
batch.put(None, &key2, b"dog").unwrap();
|
batch.put(None, &key2, b"dog");
|
||||||
db.write(batch).unwrap();
|
db.write(batch).unwrap();
|
||||||
|
|
||||||
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"cat");
|
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"cat");
|
||||||
@ -436,18 +431,18 @@ mod tests {
|
|||||||
assert_eq!(&*contents[1].1, b"dog");
|
assert_eq!(&*contents[1].1, b"dog");
|
||||||
|
|
||||||
let batch = db.transaction();
|
let batch = db.transaction();
|
||||||
batch.delete(None, &key1).unwrap();
|
batch.delete(None, &key1);
|
||||||
db.write(batch).unwrap();
|
db.write(batch).unwrap();
|
||||||
|
|
||||||
assert!(db.get(None, &key1).unwrap().is_none());
|
assert!(db.get(None, &key1).unwrap().is_none());
|
||||||
|
|
||||||
let batch = db.transaction();
|
let batch = db.transaction();
|
||||||
batch.put(None, &key1, b"cat").unwrap();
|
batch.put(None, &key1, b"cat");
|
||||||
db.write(batch).unwrap();
|
db.write(batch).unwrap();
|
||||||
|
|
||||||
let transaction = db.transaction();
|
let transaction = db.transaction();
|
||||||
transaction.put(None, &key3, b"elephant").unwrap();
|
transaction.put(None, &key3, b"elephant");
|
||||||
transaction.delete(None, &key1).unwrap();
|
transaction.delete(None, &key1);
|
||||||
db.write(transaction).unwrap();
|
db.write(transaction).unwrap();
|
||||||
assert!(db.get(None, &key1).unwrap().is_none());
|
assert!(db.get(None, &key1).unwrap().is_none());
|
||||||
assert_eq!(&*db.get(None, &key3).unwrap().unwrap(), b"elephant");
|
assert_eq!(&*db.get(None, &key3).unwrap().unwrap(), b"elephant");
|
||||||
@ -456,9 +451,9 @@ mod tests {
|
|||||||
assert_eq!(&*db.get_by_prefix(None, &key2).unwrap(), b"dog");
|
assert_eq!(&*db.get_by_prefix(None, &key2).unwrap(), b"dog");
|
||||||
|
|
||||||
let transaction = db.transaction();
|
let transaction = db.transaction();
|
||||||
transaction.put(None, &key1, b"horse").unwrap();
|
transaction.put(None, &key1, b"horse");
|
||||||
transaction.delete(None, &key3).unwrap();
|
transaction.delete(None, &key3);
|
||||||
db.write_buffered(transaction).unwrap();
|
db.write_buffered(transaction);
|
||||||
assert!(db.get(None, &key3).unwrap().is_none());
|
assert!(db.get(None, &key3).unwrap().is_none());
|
||||||
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
|
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ impl Batch {
|
|||||||
let transaction = DBTransaction::new(dest);
|
let transaction = DBTransaction::new(dest);
|
||||||
|
|
||||||
for keypair in &self.inner {
|
for keypair in &self.inner {
|
||||||
try!(transaction.put(self.column, &keypair.0, &keypair.1).map_err(Error::Custom));
|
transaction.put(self.column, &keypair.0, &keypair.1);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.inner.clear();
|
self.inner.clear();
|
||||||
|
@ -37,7 +37,7 @@ fn make_db(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
|
|||||||
{
|
{
|
||||||
let transaction = db.transaction();
|
let transaction = db.transaction();
|
||||||
for (k, v) in pairs {
|
for (k, v) in pairs {
|
||||||
transaction.put(None, &k, &v).expect("failed to add pair to transaction");
|
transaction.put(None, &k, &v);
|
||||||
}
|
}
|
||||||
|
|
||||||
db.write(transaction).expect("failed to write db transaction");
|
db.write(transaction).expect("failed to write db transaction");
|
||||||
|
@ -116,10 +116,10 @@ impl OverlayDB {
|
|||||||
let mut s = RlpStream::new_list(2);
|
let mut s = RlpStream::new_list(2);
|
||||||
s.append(&payload.1);
|
s.append(&payload.1);
|
||||||
s.append(&payload.0);
|
s.append(&payload.0);
|
||||||
batch.put(self.column, key, s.as_raw()).expect("Low-level database error. Some issue with your hard disk?");
|
batch.put(self.column, key, s.as_raw());
|
||||||
false
|
false
|
||||||
} else {
|
} else {
|
||||||
batch.delete(self.column, key).expect("Low-level database error. Some issue with your hard disk?");
|
batch.delete(self.column, key);
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -301,7 +301,7 @@ fn playpen() {
|
|||||||
{
|
{
|
||||||
let db = Database::open_default("/tmp/test").unwrap();
|
let db = Database::open_default("/tmp/test").unwrap();
|
||||||
let batch = db.transaction();
|
let batch = db.transaction();
|
||||||
batch.put(None, b"test", b"test2").unwrap();
|
batch.put(None, b"test", b"test2");
|
||||||
db.write(batch).unwrap();
|
db.write(batch).unwrap();
|
||||||
match db.get(None, b"test") {
|
match db.get(None, b"test") {
|
||||||
Ok(Some(value)) => println!("Got value {:?}", &*value),
|
Ok(Some(value)) => println!("Got value {:?}", &*value),
|
||||||
@ -309,7 +309,7 @@ fn playpen() {
|
|||||||
Err(..) => println!("Gah"),
|
Err(..) => println!("Gah"),
|
||||||
}
|
}
|
||||||
let batch = db.transaction();
|
let batch = db.transaction();
|
||||||
batch.delete(None, b"test").unwrap();
|
batch.delete(None, b"test");
|
||||||
db.write(batch).unwrap();
|
db.write(batch).unwrap();
|
||||||
}
|
}
|
||||||
fs::remove_dir_all("/tmp/test").unwrap();
|
fs::remove_dir_all("/tmp/test").unwrap();
|
||||||
|
Loading…
Reference in New Issue
Block a user