diff --git a/Cargo.lock b/Cargo.lock
index 7f20bcbb8..da3c29005 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -595,7 +595,6 @@ dependencies = [
"triehash 0.1.0",
"unexpected 0.1.0",
"using_queue 0.1.0",
- "util-error 0.1.0",
"vm 0.1.0",
"wasm 0.1.0",
]
@@ -1419,7 +1418,6 @@ dependencies = [
"parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"plain_hasher 0.2.0",
"rlp 0.2.1",
- "util-error 0.1.0",
]
[[package]]
@@ -3712,16 +3710,6 @@ name = "utf8-ranges"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-[[package]]
-name = "util-error"
-version = "0.1.0"
-dependencies = [
- "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rlp 0.2.1",
- "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
[[package]]
name = "vec_map"
version = "0.8.0"
diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml
index 756c9a8b1..9c25cdccb 100644
--- a/ethcore/Cargo.toml
+++ b/ethcore/Cargo.toml
@@ -52,7 +52,6 @@ rlp_compress = { path = "../util/rlp_compress" }
rlp_derive = { path = "../util/rlp_derive" }
kvdb = { path = "../util/kvdb" }
kvdb-memorydb = { path = "../util/kvdb-memorydb" }
-util-error = { path = "../util/error" }
snappy = { git = "https://github.com/paritytech/rust-snappy" }
stop-guard = { path = "../util/stop-guard" }
macros = { path = "../util/macros" }
diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs
index 10da80902..bdd00a273 100644
--- a/ethcore/src/client/client.rs
+++ b/ethcore/src/client/client.rs
@@ -28,7 +28,6 @@ use itertools::Itertools;
use journaldb;
use trie::{TrieSpec, TrieFactory, Trie};
use kvdb::{DBValue, KeyValueDB, DBTransaction};
-use util_error::UtilError;
// other
use ethereum_types::{H256, Address, U256};
@@ -442,7 +441,7 @@ impl Importer {
{
trace_time!("import_old_block");
// verify the block, passing the chain for updating the epoch verifier.
- let mut rng = OsRng::new().map_err(UtilError::from)?;
+ let mut rng = OsRng::new()?;
self.ancient_verifier.verify(&mut rng, &header, &chain)?;
// Commit results
diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs
index c8b931dee..9e811d3d1 100644
--- a/ethcore/src/client/config.rs
+++ b/ethcore/src/client/config.rs
@@ -152,7 +152,7 @@ impl Default for ClientConfig {
}
#[cfg(test)]
mod test {
- use super::{DatabaseCompactionProfile};
+ use super::DatabaseCompactionProfile;
#[test]
fn test_default_compaction_profile() {
diff --git a/ethcore/src/client/error.rs b/ethcore/src/client/error.rs
index d40fd261c..6851a4057 100644
--- a/ethcore/src/client/error.rs
+++ b/ethcore/src/client/error.rs
@@ -15,7 +15,7 @@
// along with Parity. If not, see .
use std::fmt::{Display, Formatter, Error as FmtError};
-use util_error::UtilError;
+use std::io;
use ethtrie::TrieError;
/// Client configuration errors.
@@ -23,8 +23,8 @@ use ethtrie::TrieError;
pub enum Error {
/// TrieDB-related error.
Trie(TrieError),
- /// Util error
- Util(UtilError),
+ /// Io error.
+ Io(io::Error),
}
impl From for Error {
@@ -33,9 +33,9 @@ impl From for Error {
}
}
-impl From for Error {
- fn from(err: UtilError) -> Self {
- Error::Util(err)
+impl From for Error {
+ fn from(err: io::Error) -> Self {
+ Error::Io(err)
}
}
@@ -49,7 +49,7 @@ impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
match *self {
Error::Trie(ref err) => write!(f, "{}", err),
- Error::Util(ref err) => write!(f, "{}", err),
+ Error::Io(ref err) => write!(f, "{}", err),
}
}
}
diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs
index 020eea8a6..88b50f813 100644
--- a/ethcore/src/error.rs
+++ b/ethcore/src/error.rs
@@ -19,7 +19,6 @@
use std::{fmt, error};
use std::time::SystemTime;
use ethereum_types::{H256, U256, Address, Bloom};
-use util_error::{self, UtilError};
use snappy::InvalidInput;
use unexpected::{Mismatch, OutOfBounds};
use ethtrie::TrieError;
@@ -206,7 +205,6 @@ impl From for BlockImportError {
match e {
Error(ErrorKind::Block(block_error), _) => BlockImportErrorKind::Block(block_error).into(),
Error(ErrorKind::Import(import_error), _) => BlockImportErrorKind::Import(import_error.into()).into(),
- Error(ErrorKind::Util(util_error::ErrorKind::Decoder(decoder_err)), _) => BlockImportErrorKind::Decoder(decoder_err).into(),
_ => BlockImportErrorKind::Other(format!("other block import error: {:?}", e)).into(),
}
}
@@ -236,7 +234,6 @@ error_chain! {
}
links {
- Util(UtilError, util_error::ErrorKind) #[doc = "Error concerning a utility"];
Import(ImportError, ImportErrorKind) #[doc = "Error concerning block import." ];
}
@@ -326,7 +323,6 @@ impl From for Error {
match err {
BlockImportError(BlockImportErrorKind::Block(e), _) => ErrorKind::Block(e).into(),
BlockImportError(BlockImportErrorKind::Import(e), _) => ErrorKind::Import(e).into(),
- BlockImportError(BlockImportErrorKind::Other(s), _) => UtilError::from(s).into(),
_ => ErrorKind::Msg(format!("other block import error: {:?}", err)).into(),
}
}
diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs
index 6e3cae4a4..70044bd09 100644
--- a/ethcore/src/lib.rs
+++ b/ethcore/src/lib.rs
@@ -100,7 +100,6 @@ extern crate patricia_trie_ethereum as ethtrie;
extern crate triehash;
extern crate ansi_term;
extern crate unexpected;
-extern crate util_error;
extern crate snappy;
extern crate ethabi;
extern crate rustc_hex;
diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs
index 14d9be8f4..2e76153cf 100644
--- a/ethcore/src/snapshot/service.rs
+++ b/ethcore/src/snapshot/service.rs
@@ -37,7 +37,6 @@ use io::IoChannel;
use ethereum_types::H256;
use parking_lot::{Mutex, RwLock, RwLockReadGuard};
-use util_error::UtilError;
use bytes::Bytes;
use journaldb::Algorithm;
use snappy;
@@ -621,7 +620,7 @@ impl Service {
match is_done {
true => {
- db.key_value().flush().map_err(UtilError::from)?;
+ db.key_value().flush()?;
drop(db);
return self.finalize_restoration(&mut *restoration);
},
@@ -634,7 +633,10 @@ impl Service {
}
}
};
- result.and_then(|_| db.key_value().flush().map_err(|e| UtilError::from(e).into()))
+
+ result?;
+ db.key_value().flush()?;
+ Ok(())
}
/// Feed a state chunk to be processed synchronously.
diff --git a/ethcore/src/state_db.rs b/ethcore/src/state_db.rs
index 95cf73328..d614fe42f 100644
--- a/ethcore/src/state_db.rs
+++ b/ethcore/src/state_db.rs
@@ -16,6 +16,10 @@
//! State database abstraction. For more info, see the doc for `StateDB`
+use std::collections::{VecDeque, HashSet};
+use std::io;
+use std::sync::Arc;
+
use bloom_journal::{Bloom, BloomJournal};
use byteorder::{LittleEndian, ByteOrder};
use db::COL_ACCOUNT_BLOOM;
@@ -30,9 +34,6 @@ use lru_cache::LruCache;
use memory_cache::MemoryLruCache;
use parking_lot::Mutex;
use state::{self, Account};
-use std::collections::{VecDeque, HashSet};
-use std::sync::Arc;
-use util_error::UtilError;
/// Value used to initialize bloom bitmap size.
///
@@ -181,7 +182,7 @@ impl StateDB {
}
/// Commit blooms journal to the database transaction
- pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> Result<(), UtilError> {
+ pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> io::Result<()> {
assert!(journal.hash_functions <= 255);
batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &[journal.hash_functions as u8]);
let mut key = [0u8; 8];
@@ -196,7 +197,7 @@ impl StateDB {
}
/// Journal all recent operations under the given era and ID.
- pub fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result {
+ pub fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result {
{
let mut bloom_lock = self.account_bloom.lock();
Self::commit_bloom(batch, bloom_lock.drain_journal())?;
@@ -209,7 +210,7 @@ impl StateDB {
/// Mark a given candidate from an ancient era as canonical, enacting its removals from the
/// backing database and reverting any non-canonical historical commit's insertions.
- pub fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> Result {
+ pub fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result {
self.db.mark_canonical(batch, end_era, canon_id)
}
diff --git a/util/error/Cargo.toml b/util/error/Cargo.toml
deleted file mode 100644
index 66901f169..000000000
--- a/util/error/Cargo.toml
+++ /dev/null
@@ -1,10 +0,0 @@
-[package]
-name = "util-error"
-version = "0.1.0"
-authors = ["Parity Technologies "]
-
-[dependencies]
-rlp = { path = "../rlp" }
-ethereum-types = "0.3"
-error-chain = { version = "0.12", default-features = false }
-rustc-hex = "1.0"
diff --git a/util/error/src/lib.rs b/util/error/src/lib.rs
deleted file mode 100644
index 86b8f4439..000000000
--- a/util/error/src/lib.rs
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2015-2018 Parity Technologies (UK) Ltd.
-// This file is part of Parity.
-
-// Parity is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Parity is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Parity. If not, see .
-
-//! General error types for use in ethcore.
-
-#![allow(missing_docs)]
-#![allow(unknown_lints)]
-
-#[macro_use]
-extern crate error_chain;
-
-extern crate ethereum_types;
-extern crate rlp;
-extern crate rustc_hex;
-
-use std::fmt;
-use rustc_hex::FromHexError;
-use rlp::DecoderError;
-use ethereum_types::H256;
-
-#[derive(Debug)]
-/// Error in database subsystem.
-pub enum BaseDataError {
- /// An entry was removed more times than inserted.
- NegativelyReferencedHash(H256),
- /// A committed value was inserted more than once.
- AlreadyExists(H256),
-}
-
-impl fmt::Display for BaseDataError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match *self {
- BaseDataError::NegativelyReferencedHash(hash) =>
- write!(f, "Entry {} removed from database more times than it was added.", hash),
- BaseDataError::AlreadyExists(hash) =>
- write!(f, "Committed key already exists in database: {}", hash),
- }
- }
-}
-
-impl std::error::Error for BaseDataError {
- fn description(&self) -> &str {
- "Error in database subsystem"
- }
-}
-
-error_chain! {
- types {
- UtilError, ErrorKind, ResultExt, Result;
- }
-
- foreign_links {
- Io(::std::io::Error);
- FromHex(FromHexError);
- Decoder(DecoderError);
- BaseData(BaseDataError);
- }
-}
diff --git a/util/journaldb/Cargo.toml b/util/journaldb/Cargo.toml
index f6adedf6d..bd6c83415 100644
--- a/util/journaldb/Cargo.toml
+++ b/util/journaldb/Cargo.toml
@@ -17,7 +17,6 @@ memorydb = { version = "0.2.0", path = "../memorydb" }
parking_lot = "0.6"
plain_hasher = { path = "../plain_hasher" }
rlp = { path = "../rlp" }
-util-error = { path = "../error" }
[dev-dependencies]
ethcore-logger = { path = "../../logger" }
diff --git a/util/journaldb/src/archivedb.rs b/util/journaldb/src/archivedb.rs
index 2978e86c2..3993887e4 100644
--- a/util/journaldb/src/archivedb.rs
+++ b/util/journaldb/src/archivedb.rs
@@ -18,16 +18,16 @@
use std::collections::HashMap;
use std::collections::hash_map::Entry;
+use std::io;
use std::sync::Arc;
use bytes::Bytes;
-use error::{BaseDataError, UtilError};
use ethereum_types::H256;
use hashdb::*;
use keccak_hasher::KeccakHasher;
use kvdb::{KeyValueDB, DBTransaction};
use rlp::{encode, decode};
-use super::{DB_PREFIX_LEN, LATEST_ERA_KEY};
+use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, error_key_already_exists, error_negatively_reference_hash};
use super::memorydb::*;
use traits::JournalDB;
@@ -127,7 +127,7 @@ impl JournalDB for ArchiveDB {
self.latest_era.is_none()
}
- fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, _id: &H256) -> Result {
+ fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, _id: &H256) -> io::Result {
let mut inserts = 0usize;
let mut deletes = 0usize;
@@ -150,12 +150,12 @@ impl JournalDB for ArchiveDB {
Ok((inserts + deletes) as u32)
}
- fn mark_canonical(&mut self, _batch: &mut DBTransaction, _end_era: u64, _canon_id: &H256) -> Result {
+ fn mark_canonical(&mut self, _batch: &mut DBTransaction, _end_era: u64, _canon_id: &H256) -> io::Result {
// keep everything! it's an archive, after all.
Ok(0)
}
- fn inject(&mut self, batch: &mut DBTransaction) -> Result {
+ fn inject(&mut self, batch: &mut DBTransaction) -> io::Result {
let mut inserts = 0usize;
let mut deletes = 0usize;
@@ -163,7 +163,7 @@ impl JournalDB for ArchiveDB {
let (key, (value, rc)) = i;
if rc > 0 {
if self.backing.get(self.column, &key)?.is_some() {
- return Err(BaseDataError::AlreadyExists(key).into());
+ return Err(error_key_already_exists(&key));
}
batch.put(self.column, &key, &value);
inserts += 1;
@@ -171,7 +171,7 @@ impl JournalDB for ArchiveDB {
if rc < 0 {
assert!(rc == -1);
if self.backing.get(self.column, &key)?.is_none() {
- return Err(BaseDataError::NegativelyReferencedHash(key).into());
+ return Err(error_negatively_reference_hash(&key));
}
batch.delete(self.column, &key);
deletes += 1;
diff --git a/util/journaldb/src/earlymergedb.rs b/util/journaldb/src/earlymergedb.rs
index bee63ae7f..68b8675af 100644
--- a/util/journaldb/src/earlymergedb.rs
+++ b/util/journaldb/src/earlymergedb.rs
@@ -18,10 +18,10 @@
use std::collections::HashMap;
use std::collections::hash_map::Entry;
+use std::io;
use std::sync::Arc;
use bytes::Bytes;
-use error::{BaseDataError, UtilError};
use ethereum_types::H256;
use hashdb::*;
use heapsize::HeapSizeOf;
@@ -30,7 +30,7 @@ use kvdb::{KeyValueDB, DBTransaction};
use memorydb::*;
use parking_lot::RwLock;
use rlp::{encode, decode};
-use super::{DB_PREFIX_LEN, LATEST_ERA_KEY};
+use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, error_negatively_reference_hash, error_key_already_exists};
use super::traits::JournalDB;
use util::{DatabaseKey, DatabaseValueView, DatabaseValueRef};
@@ -362,7 +362,7 @@ impl JournalDB for EarlyMergeDB {
self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec())
}
- fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result {
+ fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result {
// record new commit's details.
let mut refs = match self.refs.as_ref() {
Some(refs) => refs.write(),
@@ -426,7 +426,7 @@ impl JournalDB for EarlyMergeDB {
}
}
- fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> Result {
+ fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result {
let mut refs = self.refs.as_ref().unwrap().write();
// apply old commits' details
@@ -488,7 +488,7 @@ impl JournalDB for EarlyMergeDB {
Ok(0)
}
- fn inject(&mut self, batch: &mut DBTransaction) -> Result {
+ fn inject(&mut self, batch: &mut DBTransaction) -> io::Result {
let mut ops = 0;
for (key, (value, rc)) in self.overlay.drain() {
if rc != 0 { ops += 1 }
@@ -497,13 +497,13 @@ impl JournalDB for EarlyMergeDB {
0 => {}
1 => {
if self.backing.get(self.column, &key)?.is_some() {
- return Err(BaseDataError::AlreadyExists(key).into());
+ return Err(error_key_already_exists(&key));
}
batch.put(self.column, &key, &value)
}
-1 => {
if self.backing.get(self.column, &key)?.is_none() {
- return Err(BaseDataError::NegativelyReferencedHash(key).into());
+ return Err(error_negatively_reference_hash(&key));
}
batch.delete(self.column, &key)
}
diff --git a/util/journaldb/src/lib.rs b/util/journaldb/src/lib.rs
index 4814ac868..5b2381f2d 100644
--- a/util/journaldb/src/lib.rs
+++ b/util/journaldb/src/lib.rs
@@ -29,7 +29,6 @@ extern crate memorydb;
extern crate parking_lot;
extern crate plain_hasher;
extern crate rlp;
-extern crate util_error as error;
#[cfg(test)]
extern crate ethcore_logger;
@@ -38,7 +37,7 @@ extern crate keccak_hash as keccak;
#[cfg(test)]
extern crate kvdb_memorydb;
-use std::{fmt, str};
+use std::{fmt, str, io};
use std::sync::Arc;
/// Export the journaldb module.
@@ -151,6 +150,14 @@ pub fn new(backing: Arc<::kvdb::KeyValueDB>, algorithm: Algorithm, col: Option io::Error {
+ io::Error::new(io::ErrorKind::AlreadyExists, hash.to_string())
+}
+
+fn error_negatively_reference_hash(hash: ðereum_types::H256) -> io::Error {
+ io::Error::new(io::ErrorKind::Other, format!("Entry {} removed from database more times than it was added.", hash))
+}
+
#[cfg(test)]
mod tests {
use super::Algorithm;
diff --git a/util/journaldb/src/overlaydb.rs b/util/journaldb/src/overlaydb.rs
index 1e01edfea..f4b20b219 100644
--- a/util/journaldb/src/overlaydb.rs
+++ b/util/journaldb/src/overlaydb.rs
@@ -16,16 +16,18 @@
//! Disk-backed `HashDB` implementation.
-use std::sync::Arc;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
-use error::{Result, BaseDataError};
+use std::io;
+use std::sync::Arc;
+
use ethereum_types::H256;
use rlp::{Rlp, RlpStream, Encodable, DecoderError, Decodable, encode, decode};
use hashdb::*;
use keccak_hasher::KeccakHasher;
use memorydb::*;
use kvdb::{KeyValueDB, DBTransaction};
+use super::error_negatively_reference_hash;
/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay.
///
@@ -65,7 +67,7 @@ impl Encodable for Payload {
}
impl Decodable for Payload {
- fn decode(rlp: &Rlp) -> ::std::result::Result {
+ fn decode(rlp: &Rlp) -> Result {
let payload = Payload {
count: rlp.val_at(0)?,
value: DBValue::from_slice(rlp.at(1)?.data()?),
@@ -90,14 +92,14 @@ impl OverlayDB {
/// Commit all operations in a single batch.
#[cfg(test)]
- pub fn commit(&mut self) -> Result {
+ pub fn commit(&mut self) -> io::Result {
let mut batch = self.backing.transaction();
let res = self.commit_to_batch(&mut batch)?;
self.backing.write(batch).map(|_| res).map_err(|e| e.into())
}
/// Commit all operations to given batch.
- pub fn commit_to_batch(&mut self, batch: &mut DBTransaction) -> Result {
+ pub fn commit_to_batch(&mut self, batch: &mut DBTransaction) -> io::Result {
let mut ret = 0u32;
let mut deletes = 0usize;
for i in self.overlay.drain() {
@@ -107,14 +109,14 @@ impl OverlayDB {
Some(x) => {
let total_rc: i32 = x.count as i32 + rc;
if total_rc < 0 {
- return Err(From::from(BaseDataError::NegativelyReferencedHash(key)));
+ return Err(error_negatively_reference_hash(&key));
}
let payload = Payload::new(total_rc as u32, x.value);
deletes += if self.put_payload_in_batch(batch, &key, &payload) {1} else {0};
}
None => {
if rc < 0 {
- return Err(From::from(BaseDataError::NegativelyReferencedHash(key)));
+ return Err(error_negatively_reference_hash(&key));
}
let payload = Payload::new(rc as u32, value);
self.put_payload_in_batch(batch, &key, &payload);
diff --git a/util/journaldb/src/overlayrecentdb.rs b/util/journaldb/src/overlayrecentdb.rs
index d38f91c7c..b63168e54 100644
--- a/util/journaldb/src/overlayrecentdb.rs
+++ b/util/journaldb/src/overlayrecentdb.rs
@@ -18,10 +18,10 @@
use std::collections::HashMap;
use std::collections::hash_map::Entry;
+use std::io;
use std::sync::Arc;
use bytes::Bytes;
-use error::{BaseDataError, UtilError};
use ethereum_types::H256;
use hashdb::*;
use heapsize::HeapSizeOf;
@@ -31,8 +31,7 @@ use memorydb::*;
use parking_lot::RwLock;
use plain_hasher::H256FastMap;
use rlp::{Rlp, RlpStream, encode, decode, DecoderError, Decodable, Encodable};
-use super::{DB_PREFIX_LEN, LATEST_ERA_KEY};
-use super::JournalDB;
+use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, JournalDB, error_negatively_reference_hash};
use util::DatabaseKey;
/// Implementation of the `JournalDB` trait for a disk-backed database with a memory overlay
@@ -284,7 +283,7 @@ impl JournalDB for OverlayRecentDB {
.or_else(|| self.backing.get_by_prefix(self.column, &key[0..DB_PREFIX_LEN]).map(|b| b.into_vec()))
}
- fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result {
+ fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result {
trace!(target: "journaldb", "entry: #{} ({})", now, id);
let mut journal_overlay = self.journal_overlay.write();
@@ -340,7 +339,7 @@ impl JournalDB for OverlayRecentDB {
Ok(ops as u32)
}
- fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> Result {
+ fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result {
trace!(target: "journaldb", "canonical: #{} ({})", end_era, canon_id);
let mut journal_overlay = self.journal_overlay.write();
@@ -412,7 +411,7 @@ impl JournalDB for OverlayRecentDB {
self.journal_overlay.write().pending_overlay.clear();
}
- fn inject(&mut self, batch: &mut DBTransaction) -> Result {
+ fn inject(&mut self, batch: &mut DBTransaction) -> io::Result {
let mut ops = 0;
for (key, (value, rc)) in self.transaction_overlay.drain() {
if rc != 0 { ops += 1 }
@@ -424,7 +423,7 @@ impl JournalDB for OverlayRecentDB {
}
-1 => {
if cfg!(debug_assertions) && self.backing.get(self.column, &key)?.is_none() {
- return Err(BaseDataError::NegativelyReferencedHash(key).into());
+ return Err(error_negatively_reference_hash(&key));
}
batch.delete(self.column, &key)
}
diff --git a/util/journaldb/src/refcounteddb.rs b/util/journaldb/src/refcounteddb.rs
index bca8d9305..7cbe9022d 100644
--- a/util/journaldb/src/refcounteddb.rs
+++ b/util/journaldb/src/refcounteddb.rs
@@ -17,10 +17,10 @@
//! Disk-backed, ref-counted `JournalDB` implementation.
use std::collections::HashMap;
+use std::io;
use std::sync::Arc;
use bytes::Bytes;
-use error::UtilError;
use ethereum_types::H256;
use hashdb::*;
use heapsize::HeapSizeOf;
@@ -119,7 +119,7 @@ impl JournalDB for RefCountedDB {
self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec())
}
- fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result {
+ fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result {
// record new commit's details.
let mut db_key = DatabaseKey {
era: now,
@@ -159,7 +159,7 @@ impl JournalDB for RefCountedDB {
Ok(ops as u32)
}
- fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> Result {
+ fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result {
// apply old commits' details
let mut db_key = DatabaseKey {
era: end_era,
@@ -191,7 +191,7 @@ impl JournalDB for RefCountedDB {
Ok(r)
}
- fn inject(&mut self, batch: &mut DBTransaction) -> Result {
+ fn inject(&mut self, batch: &mut DBTransaction) -> io::Result {
self.inserts.clear();
for remove in self.removes.drain(..) {
self.forward.remove(&remove);
diff --git a/util/journaldb/src/traits.rs b/util/journaldb/src/traits.rs
index 470761614..075a54600 100644
--- a/util/journaldb/src/traits.rs
+++ b/util/journaldb/src/traits.rs
@@ -16,13 +16,14 @@
//! Disk-backed `HashDB` implementation.
+use std::io;
+use std::sync::Arc;
+
use bytes::Bytes;
-use error::UtilError;
use ethereum_types::H256;
use hashdb::*;
use keccak_hasher::KeccakHasher;
use kvdb::{self, DBTransaction};
-use std::sync::Arc;
/// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually
/// exclusive actions.
@@ -49,10 +50,10 @@ pub trait JournalDB: HashDB {
/// Journal recent database operations as being associated with a given era and id.
// TODO: give the overlay to this function so journaldbs don't manage the overlays themeselves.
- fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result;
+ fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result;
/// Mark a given block as canonical, indicating that competing blocks' states may be pruned out.
- fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) -> Result;
+ fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) -> io::Result;
/// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions
/// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated.
@@ -61,7 +62,7 @@ pub trait JournalDB: HashDB {
/// by any previous `commit` operations. Essentially, this means that `inject` can be used
/// either to restore a state to a fresh database, or to insert data which may only be journalled
/// from this point onwards.
- fn inject(&mut self, batch: &mut DBTransaction) -> Result;
+ fn inject(&mut self, batch: &mut DBTransaction) -> io::Result;
/// State data query
fn state(&self, _id: &H256) -> Option;
@@ -81,7 +82,7 @@ pub trait JournalDB: HashDB {
/// Commit all changes in a single batch
#[cfg(test)]
- fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result {
+ fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> io::Result {
let mut batch = self.backing().transaction();
let mut ops = self.journal_under(&mut batch, now, id)?;
@@ -96,7 +97,7 @@ pub trait JournalDB: HashDB {
/// Inject all changes in a single batch.
#[cfg(test)]
- fn inject_batch(&mut self) -> Result {
+ fn inject_batch(&mut self) -> io::Result {
let mut batch = self.backing().transaction();
let res = self.inject(&mut batch)?;
self.backing().write(batch).map(|_| res).map_err(Into::into)