Update to latest kvdb-*: no default column, DBValue is Vec (#11312)

* Only use kvdb "column families"

This PR contains the changes necessary to use the `kvdb-*` crates from https://github.com/paritytech/parity-common/pull/278 (so a synchronized merge is required) which drops support for the old-style rocksdb "default" column to get a smaller and less complex API.

As it stands this PR is working correctly except for secret-store; we need to migrate it to use a new column family.

* Fix secretstore build

* Fix secretstore build: include ethkey when building with the "accounts" feature

* typos

* Restore state test commit

* Override all of parity-common from git

* Be precise about version requirement to migrate secretstore code

* Update ethcore/db/src/db.rs

Co-Authored-By: Niklas Adolfsson <niklasadolfsson1@gmail.com>

* Address review grumbles

* Review grumbles

* Cleanup

Co-authored-by: Niklas Adolfsson <niklasadolfsson1@gmail.com>
This commit is contained in:
David 2019-12-20 12:27:38 +01:00 committed by GitHub
parent 860ef19e95
commit b9f9d11929
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
68 changed files with 694 additions and 755 deletions

538
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -39,8 +39,8 @@ futures = "0.1"
journaldb = { path = "util/journaldb" }
jsonrpc-core = "14.0.3"
keccak-hash = "0.4.0"
kvdb = "0.1"
kvdb-rocksdb = "0.2.0"
kvdb = "0.2"
kvdb-rocksdb = "0.3.0"
log = "0.4"
migration-rocksdb = { path = "util/migration-rocksdb" }
node-filter = { path = "ethcore/node-filter" }
@ -57,7 +57,7 @@ parity-path = "0.1"
parity-rpc = { path = "rpc" }
parity-runtime = { path = "util/runtime" }
parity-updater = { path = "updater" }
parity-util-mem = { version = "0.2.0", features = ["jemalloc-global"] }
parity-util-mem = { version = "0.3.0", features = ["jemalloc-global"] }
parity-version = { path = "util/version" }
parking_lot = "0.9"
regex = "1.0"
@ -135,4 +135,3 @@ members = [
"evmbin",
"parity-clib",
]

View File

@ -31,9 +31,9 @@ hash-db = "0.15.0"
itertools = "0.5"
journaldb = { path = "../util/journaldb" }
keccak-hash = "0.4.0"
kvdb = "0.1"
kvdb-memorydb = { version = "0.1.2", optional = true }
kvdb-rocksdb = { version = "0.2.0", optional = true }
kvdb = "0.2"
kvdb-memorydb = { version = "0.2.0", optional = true }
kvdb-rocksdb = { version = "0.3.0", optional = true }
lazy_static = { version = "1.3", optional = true }
log = "0.4"
macros = { path = "../util/macros", optional = true }
@ -42,7 +42,7 @@ memory-cache = { path = "../util/memory-cache" }
parity-bytes = "0.1"
parking_lot = "0.9"
pod = { path = "pod", optional = true }
trie-db = "0.16.0"
trie-db = "0.18.0"
parity-crypto = { version = "0.4.2", features = ["publickey"], optional = true }
patricia-trie-ethereum = { path = "../util/patricia-trie-ethereum" }
rand = "0.7"
@ -78,8 +78,8 @@ ethcore-builtin = { path = "./builtin" }
ethjson = { path = "../json", features = ["test-helpers"] }
parity-crypto = { version = "0.4.2", features = ["publickey"] }
fetch = { path = "../util/fetch" }
kvdb-memorydb = "0.1.2"
kvdb-rocksdb = "0.2.0"
kvdb-memorydb = "0.2.0"
kvdb-rocksdb = "0.3.0"
lazy_static = "1.3"
machine = { path = "./machine", features = ["test-helpers"] }
macros = { path = "../util/macros" }

View File

@ -11,5 +11,5 @@ ethereum-types = "0.8.0"
hash-db = "0.15.0"
keccak-hash = "0.4.0"
keccak-hasher = { path = "../../util/keccak-hasher" }
kvdb = "0.1"
kvdb = "0.2"
rlp = "0.4"

View File

@ -93,7 +93,7 @@ impl<'db> AsHashDB<KeccakHasher, DBValue> for AccountDB<'db> {
impl<'db> HashDB<KeccakHasher, DBValue> for AccountDB<'db> {
fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> {
if key == &KECCAK_NULL_RLP {
return Some(DBValue::from_slice(&NULL_RLP));
return Some(NULL_RLP.to_vec());
}
self.db.get(&combine_key(&self.address_hash, key), prefix)
}
@ -139,7 +139,7 @@ impl<'db> AccountDBMut<'db> {
impl<'db> HashDB<KeccakHasher, DBValue> for AccountDBMut<'db>{
fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> {
if key == &KECCAK_NULL_RLP {
return Some(DBValue::from_slice(&NULL_RLP));
return Some(NULL_RLP.to_vec());
}
self.db.get(&combine_key(&self.address_hash, key), prefix)
}
@ -157,7 +157,7 @@ impl<'db> HashDB<KeccakHasher, DBValue> for AccountDBMut<'db>{
}
let k = keccak(value);
let ak = combine_key(&self.address_hash, &k);
self.db.emplace(ak, prefix, DBValue::from_slice(value));
self.db.emplace(ak, prefix, value.to_vec());
k
}
@ -193,7 +193,7 @@ impl<'db> AsHashDB<KeccakHasher, DBValue> for Wrapping<'db> {
impl<'db> HashDB<KeccakHasher, DBValue> for Wrapping<'db> {
fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> {
if key == &KECCAK_NULL_RLP {
return Some(DBValue::from_slice(&NULL_RLP));
return Some(NULL_RLP.to_vec());
}
self.0.get(key, prefix)
}
@ -227,7 +227,7 @@ impl<'db> AsHashDB<KeccakHasher, DBValue> for WrappingMut<'db> {
impl<'db> HashDB<KeccakHasher, DBValue> for WrappingMut<'db>{
fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> {
if key == &KECCAK_NULL_RLP {
return Some(DBValue::from_slice(&NULL_RLP));
return Some(NULL_RLP.to_vec());
}
self.0.get(key, prefix)
}

View File

@ -16,18 +16,18 @@ hash-db = "0.15.0"
journaldb = { path = "../../util/journaldb" }
keccak-hash = "0.4.0"
keccak-hasher = { path = "../../util/keccak-hasher" }
kvdb = "0.1"
kvdb = "0.2"
log = "0.4"
lru-cache = "0.1.2"
memory-db = "0.15.0"
memory-db = "0.18.0"
parity-bytes = "0.1.0"
parity-util-mem = "0.2.0"
parity-util-mem = "0.3.0"
parking_lot = "0.9"
pod = { path = "../pod" }
rlp = "0.4.0"
serde = { version = "1.0", features = ["derive"] }
trace = { path = "../trace" }
trie-db = "0.16.0"
trie-db = "0.18.0"
[dev-dependencies]
account-db = { path = "../account-db" }

View File

@ -390,7 +390,7 @@ impl Account {
match db.get(&self.code_hash, hash_db::EMPTY_PREFIX) {
Some(x) => {
self.code_size = Some(x.len());
self.code_cache = Arc::new(x.into_vec());
self.code_cache = Arc::new(x);
Some(self.code_cache.clone())
},
_ => {
@ -530,7 +530,7 @@ impl Account {
self.code_filth = Filth::Clean;
},
(true, false) => {
db.emplace(self.code_hash.clone(), hash_db::EMPTY_PREFIX, DBValue::from_slice(&*self.code_cache));
db.emplace(self.code_hash.clone(), hash_db::EMPTY_PREFIX, self.code_cache.to_vec());
self.code_size = Some(self.code_cache.len());
self.code_filth = Filth::Clean;
},

View File

@ -14,9 +14,9 @@ common-types = { path = "../types" }
ethcore-db = { path = "../db" }
ethereum-types = "0.8.0"
keccak-hash = "0.4.0"
parity-util-mem = "0.2.0"
parity-util-mem = "0.3.0"
itertools = "0.5"
kvdb = "0.1"
kvdb = "0.2"
log = "0.4"
parity-bytes = "0.1"
rand = "0.7"
@ -32,4 +32,4 @@ env_logger = "0.5"
parity-crypto = { version = "0.4.2", features = ["publickey"] }
rustc-hex = "1.0"
tempdir = "0.3"
kvdb-memorydb = "0.1.2"
kvdb-memorydb = "0.2.0"

View File

@ -628,8 +628,7 @@ impl BlockChain {
let best_block_number = bc.best_block.read().header.number();
// Fetch first and best ancient block details
let raw_first = bc.db.key_value().get(db::COL_EXTRA, b"first")
.expect("Low level database error when fetching 'first' block. Some issue with disk?")
.map(|v| v.into_vec());
.expect("Low level database error when fetching 'first' block. Some issue with disk?");
let mut best_ancient = bc.db.key_value().get(db::COL_EXTRA, b"ancient")
.expect("Low level database error when fetching 'best ancient' block. Some issue with disk?")
.map(|h| H256::from_slice(&h));
@ -1665,7 +1664,7 @@ mod tests {
trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(),
_blooms_dir: blooms_dir,
_trace_blooms_dir: trace_blooms_dir,
key_value: Arc::new(kvdb_memorydb::create(ethcore_db::NUM_COLUMNS.unwrap()))
key_value: Arc::new(kvdb_memorydb::create(ethcore_db::NUM_COLUMNS))
};
Arc::new(db)

View File

@ -15,7 +15,7 @@ common-types = { path = "../types" }
ethcore-db = { path = "../db" }
ethcore-miner = { path = "../../miner" }
ethereum-types = "0.8.0"
kvdb = "0.1"
kvdb = "0.2"
registrar = { path = "../../util/registrar" }
stats = { path = "../../util/stats" }
trace = { path = "../trace" }

View File

@ -10,8 +10,8 @@ edition = "2018"
[dependencies]
common-types = { path = "../types" }
ethereum-types = "0.8.0"
kvdb = "0.1"
parity-util-mem = "0.2.0"
kvdb = "0.2"
parity-util-mem = "0.3.0"
parking_lot = "0.9"
rlp = "0.4.0"
rlp_derive = { path = "../../util/rlp-derive" }

View File

@ -24,27 +24,27 @@ use kvdb::{DBTransaction, KeyValueDB};
use rlp;
// database columns
// Database column indexes.
/// Column for State
pub const COL_STATE: Option<u32> = Some(0);
pub const COL_STATE: u32 = 0;
/// Column for Block headers
pub const COL_HEADERS: Option<u32> = Some(1);
pub const COL_HEADERS: u32 = 1;
/// Column for Block bodies
pub const COL_BODIES: Option<u32> = Some(2);
pub const COL_BODIES: u32 = 2;
/// Column for Extras
pub const COL_EXTRA: Option<u32> = Some(3);
pub const COL_EXTRA: u32 = 3;
/// Column for Traces
pub const COL_TRACE: Option<u32> = Some(4);
pub const COL_TRACE: u32 = 4;
/// Column for the empty accounts bloom filter.
pub const COL_ACCOUNT_BLOOM: Option<u32> = Some(5);
pub const COL_ACCOUNT_BLOOM: u32 = 5;
/// Column for general information from the local node which can persist.
pub const COL_NODE_INFO: Option<u32> = Some(6);
pub const COL_NODE_INFO: u32 = 6;
/// Column for the light client chain.
pub const COL_LIGHT_CHAIN: Option<u32> = Some(7);
pub const COL_LIGHT_CHAIN: u32 = 7;
/// Column for the private transactions state.
pub const COL_PRIVATE_TRANSACTIONS_STATE: Option<u32> = Some(8);
pub const COL_PRIVATE_TRANSACTIONS_STATE: u32 = 8;
/// Number of columns in DB
pub const NUM_COLUMNS: Option<u32> = Some(9);
pub const NUM_COLUMNS: u32 = 9;
/// Modes for updating caches.
#[derive(Clone, Copy)]
@ -93,16 +93,25 @@ pub trait Key<T> {
/// Should be used to write value into database.
pub trait Writable {
/// Writes the value into the database.
fn write<T, R>(&mut self, col: Option<u32>, key: &dyn Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: AsRef<[u8]>;
fn write<T, R>(&mut self, col: u32, key: &dyn Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: AsRef<[u8]>;
/// Deletes key from the databse.
fn delete<T, R>(&mut self, col: Option<u32>, key: &dyn Key<T, Target = R>) where T: rlp::Encodable, R: AsRef<[u8]>;
/// Deletes key from the database.
fn delete<T, R>(&mut self, col: u32, key: &dyn Key<T, Target = R>) where T: rlp::Encodable, R: AsRef<[u8]>;
/// Writes the value into the database and updates the cache.
fn write_with_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut dyn Cache<K, T>, key: K, value: T, policy: CacheUpdatePolicy) where
fn write_with_cache<K, T, R>(
&mut self,
col: u32,
cache: &mut dyn Cache<K, T>,
key: K,
value: T,
policy: CacheUpdatePolicy
)
where
K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable,
R: AsRef<[u8]> {
R: AsRef<[u8]>
{
self.write(col, &key, &value);
match policy {
CacheUpdatePolicy::Overwrite => {
@ -115,10 +124,18 @@ pub trait Writable {
}
/// Writes the values into the database and updates the cache.
fn extend_with_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut dyn Cache<K, T>, values: HashMap<K, T>, policy: CacheUpdatePolicy) where
fn extend_with_cache<K, T, R>(
&mut self,
col: u32,
cache: &mut dyn Cache<K, T>,
values: HashMap<K, T>,
policy: CacheUpdatePolicy
)
where
K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable,
R: AsRef<[u8]> {
R: AsRef<[u8]>
{
match policy {
CacheUpdatePolicy::Overwrite => {
for (key, value) in values {
@ -136,10 +153,18 @@ pub trait Writable {
}
/// Writes and removes the values into the database and updates the cache.
fn extend_with_option_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut dyn Cache<K, Option<T>>, values: HashMap<K, Option<T>>, policy: CacheUpdatePolicy) where
fn extend_with_option_cache<K, T, R>(
&mut self,
col: u32,
cache: &mut dyn Cache<K, Option<T>>,
values: HashMap<K, Option<T>>,
policy: CacheUpdatePolicy
)
where
K: Key<T, Target = R> + Hash + Eq,
T: rlp::Encodable,
R: AsRef<[u8]> {
R: AsRef<[u8]>
{
match policy {
CacheUpdatePolicy::Overwrite => {
for (key, value) in values {
@ -167,12 +192,12 @@ pub trait Writable {
/// Should be used to read values from database.
pub trait Readable {
/// Returns value for given key.
fn read<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> Option<T> where
fn read<T, R>(&self, col: u32, key: &dyn Key<T, Target = R>) -> Option<T> where
T: rlp::Decodable,
R: AsRef<[u8]>;
/// Returns value for given key either in cache or in database.
fn read_with_cache<K, T, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> Option<T> where
fn read_with_cache<K, T, C>(&self, col: u32, cache: &RwLock<C>, key: &K) -> Option<T> where
K: Key<T> + Eq + Hash + Clone,
T: Clone + rlp::Decodable,
C: Cache<K, T> {
@ -191,10 +216,18 @@ pub trait Readable {
}
/// Returns value for given key either in two-layered cache or in database.
fn read_with_two_layer_cache<K, T, C>(&self, col: Option<u32>, l1_cache: &RwLock<C>, l2_cache: &RwLock<C>, key: &K) -> Option<T> where
fn read_with_two_layer_cache<K, T, C>(
&self,
col: u32,
l1_cache: &RwLock<C>,
l2_cache: &RwLock<C>,
key: &K
) -> Option<T>
where
K: Key<T> + Eq + Hash + Clone,
T: Clone + rlp::Decodable,
C: Cache<K, T> {
C: Cache<K, T>
{
{
let read = l1_cache.read();
if let Some(v) = read.get(key) {
@ -206,10 +239,10 @@ pub trait Readable {
}
/// Returns true if given value exists.
fn exists<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> bool where R: AsRef<[u8]>;
fn exists<T, R>(&self, col: u32, key: &dyn Key<T, Target = R>) -> bool where R: AsRef<[u8]>;
/// Returns true if given value exists either in cache or in database.
fn exists_with_cache<K, T, R, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> bool where
fn exists_with_cache<K, T, R, C>(&self, col: u32, cache: &RwLock<C>, key: &K) -> bool where
K: Eq + Hash + Key<T, Target = R>,
R: AsRef<[u8]>,
C: Cache<K, T> {
@ -225,17 +258,17 @@ pub trait Readable {
}
impl Writable for DBTransaction {
fn write<T, R>(&mut self, col: Option<u32>, key: &dyn Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: AsRef<[u8]> {
fn write<T, R>(&mut self, col: u32, key: &dyn Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: AsRef<[u8]> {
self.put(col, key.key().as_ref(), &rlp::encode(value));
}
fn delete<T, R>(&mut self, col: Option<u32>, key: &dyn Key<T, Target = R>) where T: rlp::Encodable, R: AsRef<[u8]> {
fn delete<T, R>(&mut self, col: u32, key: &dyn Key<T, Target = R>) where T: rlp::Encodable, R: AsRef<[u8]> {
self.delete(col, key.key().as_ref());
}
}
impl<KVDB: KeyValueDB + ?Sized> Readable for KVDB {
fn read<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> Option<T>
fn read<T, R>(&self, col: u32, key: &dyn Key<T, Target = R>) -> Option<T>
where T: rlp::Decodable, R: AsRef<[u8]> {
self.get(col, key.key().as_ref())
.expect(&format!("db get failed, key: {:?}", key.key().as_ref()))
@ -243,7 +276,7 @@ impl<KVDB: KeyValueDB + ?Sized> Readable for KVDB {
}
fn exists<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> bool where R: AsRef<[u8]> {
fn exists<T, R>(&self, col: u32, key: &dyn Key<T, Target = R>) -> bool where R: AsRef<[u8]> {
let result = self.get(col, key.key().as_ref());
match result {

View File

@ -17,13 +17,13 @@ ethereum-types = "0.8.0"
ethjson = { path = "../../../json" }
executive-state = { path = "../../executive-state" }
keccak-hash = "0.4.0"
kvdb = "0.1"
kvdb = "0.2"
lazy_static = "1.3.0"
log = "0.4.8"
machine = { path = "../../machine" }
memory-cache = { path = "../../../util/memory-cache" }
parity-bytes = "0.1.0"
parity-util-mem = "0.2.0"
parity-util-mem = "0.3.0"
parking_lot = "0.9"
rlp = "0.4.2"
triehash = { package = "triehash-ethereum", version = "0.2", path = "../../../util/triehash-ethereum" }

View File

@ -154,11 +154,10 @@ fn check_first_proof(machine: &Machine, contract_address: Address, old_header: H
fn decode_first_proof(rlp: &Rlp) -> Result<(Header, Vec<DBValue>), EthcoreError> {
let header = rlp.val_at(0)?;
let state_items = rlp.at(1)?.iter().map(|x| {
let mut val = DBValue::new();
val.append_slice(x.data()?);
Ok(val)
}).collect::<Result<_, EthcoreError>>()?;
let state_items = rlp.at(1)?
.iter()
.map(|x| Ok(x.data()?.to_vec()) )
.collect::<Result<_, EthcoreError>>()?;
Ok((header, state_items))
}

View File

@ -8,7 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
bit-set = "0.4"
parity-bytes = "0.1"
ethereum-types = "0.8.0"
parity-util-mem = "0.2.0"
parity-util-mem = "0.3.0"
lazy_static = "1.0"
log = "0.4"
vm = { path = "../vm" }

View File

@ -14,7 +14,7 @@ common-types = { path = "../types" }
ethereum-types = "0.8.0"
hash-db = "0.15.0"
keccak-hasher = { path = "../../util/keccak-hasher" }
kvdb = "0.1"
kvdb = "0.2"
log = "0.4.8"
machine = { path = "../machine" }
trace = { path = "../trace" }
@ -30,5 +30,5 @@ keccak-hash = "0.4.0"
pod = { path = "../pod" }
rustc-hex = "1.0"
spec = { path = "../spec" }
trie-db = "0.16.0"
trie-db = "0.18.0"
ethtrie = { package = "patricia-trie-ethereum", path = "../../util/patricia-trie-ethereum" }

View File

@ -18,14 +18,14 @@ ethcore-blockchain = { path = "../blockchain" }
ethereum-types = "0.8.0"
executive-state = { path = "../executive-state" }
machine = { path = "../machine" }
memory-db = "0.15.0"
trie-db = "0.16.0"
memory-db = "0.18.0"
trie-db = "0.18.0"
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
ethcore-network = { path = "../../util/network" }
ethcore-miner = { path = "../../miner" }
ethcore-io = { path = "../../util/io" }
hash-db = "0.15.0"
parity-util-mem = "0.2.0"
parity-util-mem = "0.3.0"
vm = { path = "../vm" }
fastmap = { path = "../../util/fastmap" }
failsafe = { version = "0.3.0", default-features = false, features = ["parking_lot_mutex"] }
@ -43,14 +43,14 @@ stats = { path = "../../util/stats" }
keccak-hash = "0.4.0"
keccak-hasher = { path = "../../util/keccak-hasher" }
triehash-ethereum = { version = "0.2", path = "../../util/triehash-ethereum" }
kvdb = "0.1"
kvdb = "0.2"
memory-cache = { path = "../../util/memory-cache" }
journaldb = { path = "../../util/journaldb" }
verification = { path = "../verification" }
[dev-dependencies]
ethcore = { path = "..", features = ["test-helpers"] }
kvdb-memorydb = "0.1.2"
kvdb-memorydb = "0.2.0"
tempdir = "0.3"
[features]

View File

@ -220,7 +220,7 @@ pub struct HeaderChain {
#[ignore_malloc_size_of = "ignored for performance reason"]
db: Arc<dyn KeyValueDB>,
#[ignore_malloc_size_of = "ignored for performance reason"]
col: Option<u32>,
col: u32,
#[ignore_malloc_size_of = "ignored for performance reason"]
cache: Arc<Mutex<Cache>>,
}
@ -229,7 +229,7 @@ impl HeaderChain {
/// Create a new header chain given this genesis block and database to read from.
pub fn new(
db: Arc<dyn KeyValueDB>,
col: Option<u32>,
col: u32,
spec: &Spec,
cache: Arc<Mutex<Cache>>,
allow_hs: HardcodedSync,
@ -259,7 +259,7 @@ impl HeaderChain {
live_epoch_proofs.insert(c.hash, EpochTransition {
block_hash: c.hash,
block_number: cur_number,
proof: proof.into_vec(),
proof,
});
}
}
@ -667,7 +667,8 @@ impl HeaderChain {
None => {
match self.db.get(self.col, hash.as_bytes()) {
Ok(db_value) => {
db_value.map(|x| x.into_vec()).map(encoded::Header::new)
db_value
.map(encoded::Header::new)
.and_then(|header| {
cache.insert_block_header(hash, header.clone());
Some(header)
@ -886,7 +887,7 @@ mod tests {
use parking_lot::Mutex;
fn make_db() -> Arc<dyn KeyValueDB> {
Arc::new(kvdb_memorydb::create(0))
Arc::new(kvdb_memorydb::create(1))
}
#[test]
@ -897,7 +898,7 @@ mod tests {
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();
let chain = HeaderChain::new(db.clone(), 0, &spec, cache, HardcodedSync::Allow).unwrap();
let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
@ -930,7 +931,7 @@ mod tests {
let db = make_db();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();
let chain = HeaderChain::new(db.clone(), 0, &spec, cache, HardcodedSync::Allow).unwrap();
let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
@ -1012,7 +1013,7 @@ mod tests {
let db = make_db();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();
let chain = HeaderChain::new(db.clone(), 0, &spec, cache, HardcodedSync::Allow).unwrap();
assert!(chain.block_header(BlockId::Earliest).is_some());
assert!(chain.block_header(BlockId::Latest).is_some());
@ -1026,7 +1027,7 @@ mod tests {
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
{
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
let chain = HeaderChain::new(db.clone(), 0, &spec, cache.clone(),
HardcodedSync::Allow).unwrap();
let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
@ -1047,7 +1048,7 @@ mod tests {
}
}
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
let chain = HeaderChain::new(db.clone(), 0, &spec, cache.clone(),
HardcodedSync::Allow).unwrap();
assert!(chain.block_header(BlockId::Number(10)).is_none());
assert!(chain.block_header(BlockId::Number(9000)).is_some());
@ -1064,7 +1065,7 @@ mod tests {
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
{
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
let chain = HeaderChain::new(db.clone(), 0, &spec, cache.clone(),
HardcodedSync::Allow).unwrap();
let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
@ -1107,7 +1108,7 @@ mod tests {
}
// after restoration, non-canonical eras should still be loaded.
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
let chain = HeaderChain::new(db.clone(), 0, &spec, cache.clone(),
HardcodedSync::Allow).unwrap();
assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10);
assert!(chain.candidates.read().get(&100).is_some())
@ -1120,7 +1121,7 @@ mod tests {
let db = make_db();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
let chain = HeaderChain::new(db.clone(), 0, &spec, cache.clone(),
HardcodedSync::Allow).unwrap();
assert!(chain.block_header(BlockId::Earliest).is_some());
@ -1135,7 +1136,7 @@ mod tests {
let db = make_db();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();
let chain = HeaderChain::new(db.clone(), 0, &spec, cache, HardcodedSync::Allow).unwrap();
let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();
@ -1202,7 +1203,7 @@ mod tests {
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).expect("failed to instantiate a new HeaderChain");
let chain = HeaderChain::new(db.clone(), 0, &spec, cache, HardcodedSync::Allow).expect("failed to instantiate a new HeaderChain");
let mut parent_hash = genesis_header.hash();
let mut rolling_timestamp = genesis_header.timestamp();

View File

@ -61,7 +61,7 @@ pub struct Config {
/// Verification queue config.
pub queue: queue::Config,
/// Chain column in database.
pub chain_column: Option<u32>,
pub chain_column: u32,
/// Should it do full verification of blocks?
pub verify_full: bool,
/// Should it check the seal of blocks?
@ -74,7 +74,7 @@ impl Default for Config {
fn default() -> Config {
Config {
queue: Default::default(),
chain_column: None,
chain_column: 0,
verify_full: true,
check_seal: true,
no_hardcoded_sync: false,
@ -182,7 +182,7 @@ impl<T: ChainDataFetcher> Client<T> {
pub fn new(
config: Config,
db: Arc<dyn KeyValueDB>,
chain_col: Option<u32>,
chain_col: u32,
spec: &Spec,
fetcher: T,
io_channel: IoChannel<ClientIoMessage<()>>,

View File

@ -1507,9 +1507,7 @@ pub mod execution {
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
let mut items = Vec::new();
for raw_item in rlp.iter() {
let mut item = DBValue::new();
item.append_slice(raw_item.data()?);
items.push(item);
items.push(raw_item.data()?.to_vec());
}
Ok(Response { items })
@ -1839,8 +1837,6 @@ mod tests {
#[test]
fn execution_roundtrip() {
use kvdb::DBValue;
let req = IncompleteExecutionRequest {
block_hash: Field::Scalar(Default::default()),
from: Default::default(),
@ -1852,13 +1848,7 @@ mod tests {
};
let full_req = Request::Execution(req.clone());
let res = ExecutionResponse {
items: vec![DBValue::new(), {
let mut value = DBValue::new();
value.append_slice(&[1, 1, 1, 2, 3]);
value
}],
};
let res = ExecutionResponse { items: vec![vec![], vec![1, 1, 1, 2, 3]] };
let full_res = Response::Execution(res.clone());
check_roundtrip(req);

View File

@ -22,7 +22,7 @@ lru-cache = "0.1"
[dev-dependencies]
ethcore = { path = "..", features = ["test-helpers"] }
kvdb-memorydb = "0.1.2"
kvdb-memorydb = "0.2.0"
ethcore-io = { path = "../../util/io" }
spec = { path = "../spec" }
tempdir = "0.3"

View File

@ -15,13 +15,13 @@ hash-db = "0.15.0"
itertools = "0.8"
keccak-hash = "0.4.0"
keccak-hasher = { path = "../../util/keccak-hasher" }
kvdb = "0.1"
kvdb = "0.2"
log = "0.4"
parity-bytes = "0.1.0"
rlp = "0.4"
rustc-hex = "1"
serde = { version = "1.0", features = ["derive"] }
trie-db = "0.16.0"
trie-db = "0.18.0"
triehash = { package = "triehash-ethereum", version = "0.2", path = "../../util/triehash-ethereum" }
[dev-dependencies]

View File

@ -22,18 +22,18 @@ ethereum-types = "0.8.0"
ethjson = { path = "../../json" }
fetch = { path = "../../util/fetch" }
futures = "0.1"
parity-util-mem = "0.2.0"
parity-util-mem = "0.3.0"
hash-db = "0.15.0"
keccak-hash = "0.4.0"
keccak-hasher = { path = "../../util/keccak-hasher" }
kvdb = "0.1"
kvdb = "0.2"
log = "0.4"
machine = { path = "../machine" }
journaldb = { path = "../../util/journaldb" }
parity-bytes = "0.1"
parity-crypto = { version = "0.4.2", features = ["publickey"] }
parking_lot = "0.9"
trie-db = "0.16.0"
trie-db = "0.18.0"
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
registrar = { path = "../../util/registrar" }
rlp = "0.4.0"

View File

@ -14,7 +14,7 @@ ethcore-io = { path = "../../util/io" }
ethcore-private-tx = { path = "../private-tx" }
ethcore-sync = { path = "../sync" }
ethereum-types = "0.8.0"
kvdb = "0.1"
kvdb = "0.2"
log = "0.4"
snapshot = { path = "../snapshot" }
spec = { path = "../spec" }
@ -23,5 +23,5 @@ trace-time = "0.1"
[dev-dependencies]
ethcore = { path = "..", features = ["test-helpers"] }
ethcore-db = { path = "../db" }
kvdb-rocksdb = "0.2.0"
kvdb-rocksdb = "0.3.0"
tempdir = "0.3"

View File

@ -29,7 +29,7 @@ itertools = "0.5"
journaldb = { path = "../../util/journaldb" }
keccak-hash = "0.4.0"
keccak-hasher = { path = "../../util/keccak-hasher" }
kvdb = "0.1"
kvdb = "0.2"
log = "0.4.8"
num_cpus = "1.10.1"
rand = "0.7"
@ -40,7 +40,7 @@ rlp_derive = { path = "../../util/rlp-derive" }
scopeguard = "1.0.0"
snappy = { package = "parity-snappy", version ="0.1.0" }
state-db = { path = "../state-db" }
trie-db = "0.16.0"
trie-db = "0.18.0"
triehash = { package = "triehash-ethereum", version = "0.2", path = "../../util/triehash-ethereum" }
[dev-dependencies]
@ -53,7 +53,7 @@ ethabi-contract = "9.0.0"
ethabi-derive = "9.0.1"
ethcore = { path = "..", features = ["test-helpers"] }
ethkey = { path = "../../accounts/ethkey" }
kvdb-rocksdb = "0.2.0"
kvdb-rocksdb = "0.3.0"
lazy_static = { version = "1.3" }
spec = { path = "../spec" }
tempdir = "0.3"

View File

@ -23,8 +23,8 @@ hash-db = "0.15.0"
journaldb = { path = "../../../util/journaldb" }
keccak-hash = "0.4.0"
keccak-hasher = { path = "../../../util/keccak-hasher" }
kvdb = "0.1"
kvdb-rocksdb = "0.2.0"
kvdb = "0.2"
kvdb-rocksdb = "0.3.0"
log = "0.4.8"
parking_lot = "0.9"
parity-crypto = { version = "0.4.2", features = ["publickey"] }
@ -35,7 +35,7 @@ snappy = { package = "parity-snappy", version ="0.1.0" }
snapshot = { path = "../../snapshot", features = ["test-helpers"] }
spec = { path = "../../spec" }
tempdir = "0.3"
trie-db = "0.16.0"
trie-db = "0.18.0"
trie-standardmap = "0.15.0"
ethabi = "9.0.1"
ethabi-contract = "9.0.0"

View File

@ -27,7 +27,6 @@ use ethcore::test_helpers::get_temp_state_db;
use ethereum_types::{H256, Address};
use hash_db::{HashDB, EMPTY_PREFIX};
use keccak_hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak};
use kvdb::DBValue;
use parking_lot::RwLock;
use rlp::Rlp;
use snapshot::test_helpers::{ACC_EMPTY, to_fat_rlps, from_fat_rlp};
@ -151,7 +150,7 @@ fn encoding_code() {
{
let mut acct_db = AccountDBMut::from_hash(db.as_hash_db_mut(), keccak(addr2));
acct_db.emplace(code_hash.clone(), EMPTY_PREFIX, DBValue::from_slice(b"this is definitely code"));
acct_db.emplace(code_hash.clone(), EMPTY_PREFIX, b"this is definitely code".to_vec());
}
let account1 = BasicAccount {

View File

@ -91,7 +91,7 @@ impl StateProducer {
let mut account: BasicAccount = rlp::decode(&*account_data).expect("error decoding basic account");
let acct_db = AccountDBMut::from_hash(db, *address_hash);
fill_storage(acct_db, &mut account.storage_root, &mut self.storage_seed);
*account_data = DBValue::from_vec(rlp::encode(&account));
*account_data = rlp::encode(&account);
}
// sweep again to alter account trie.

View File

@ -421,7 +421,7 @@ impl StateRebuilder {
for (code_hash, code, first_with) in status.new_code {
for addr_hash in self.missing_code.remove(&code_hash).unwrap_or_else(Vec::new) {
let mut db = AccountDBMut::from_hash(self.db.as_hash_db_mut(), addr_hash);
db.emplace(code_hash, hash_db::EMPTY_PREFIX, DBValue::from_slice(&code));
db.emplace(code_hash, hash_db::EMPTY_PREFIX, code.to_vec());
}
self.known_code.insert(code_hash, first_with);

View File

@ -25,7 +25,7 @@ hash-db = "0.15.0"
instant-seal = { path = "../engines/instant-seal" }
journaldb = { path = "../../util/journaldb" }
keccak-hash = "0.4.0"
kvdb-memorydb = "0.1.2"
kvdb-memorydb = "0.2.0"
log = "0.4.8"
machine = { path = "../machine" }
null-engine = { path = "../engines/null-engine" }

View File

@ -510,9 +510,9 @@ impl Spec {
let factories = Default::default();
let mut db = journaldb::new(
Arc::new(kvdb_memorydb::create(0)),
Arc::new(kvdb_memorydb::create(1)),
journaldb::Algorithm::Archive,
None,
0,
);
self.ensure_db_good(BasicBackend(db.as_hash_db_mut()), &factories)
@ -540,18 +540,14 @@ impl Spec {
data: d,
}.fake_sign(from);
let res = executive_state::prove_transaction_virtual(
executive_state::prove_transaction_virtual(
db.as_hash_db_mut(),
*genesis.state_root(),
&tx,
self.engine.machine(),
&env_info,
factories.clone(),
);
res.map(|(out, proof)| {
(out, proof.into_iter().map(|x| x.into_vec()).collect())
}).ok_or_else(|| "Failed to prove call: insufficient state".into())
).ok_or_else(|| "Failed to prove call: insufficient state".into())
};
self.engine.genesis_epoch_data(&genesis, &call)

View File

@ -647,15 +647,13 @@ impl Importer {
let res = Executive::new(&mut state, &env_info, &machine, &schedule)
.transact(&transaction, options);
let res = match res {
match res {
Err(e) => {
trace!(target: "client", "Proved call failed: {}", e);
Err(e.to_string())
}
Ok(res) => Ok((res.output, state.drop().1.extract_proof())),
};
res.map(|(output, proof)| (output, proof.into_iter().map(|x| x.into_vec()).collect()))
}
};
match with_state.generate_proof(&call) {

View File

@ -165,7 +165,7 @@ impl<'a> EvmTestClient<'a> {
}
fn state_from_spec(spec: &'a spec::Spec, factories: &Factories) -> Result<State<state_db::StateDB>, EvmTestError> {
let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS));
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
let mut state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
state_db = spec.ensure_db_good(state_db, factories)?;
@ -187,7 +187,7 @@ impl<'a> EvmTestClient<'a> {
}
fn state_from_pod(spec: &'a spec::Spec, factories: &Factories, pod_state: PodState) -> Result<State<state_db::StateDB>, EvmTestError> {
let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS));
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
let state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
let mut state = State::new(

View File

@ -312,7 +312,7 @@ pub fn new_db() -> Arc<dyn BlockChainDB> {
trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(),
_blooms_dir: blooms_dir,
_trace_blooms_dir: trace_blooms_dir,
key_value: Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap()))
key_value: Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS))
};
Arc::new(db)

View File

@ -399,7 +399,7 @@ impl TestBlockChainClient {
}
pub fn get_temp_state_db() -> StateDB {
let db = kvdb_memorydb::create(NUM_COLUMNS.unwrap_or(0));
let db = kvdb_memorydb::create(NUM_COLUMNS);
let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE);
StateDB::new(journal_db, 1024 * 1024)
}

View File

@ -16,7 +16,7 @@ hash-db = "0.15.0"
keccak-hash = "0.4.0"
keccak-hasher = { path = "../../util/keccak-hasher" }
journaldb = { path = "../../util/journaldb" }
kvdb = "0.1"
kvdb = "0.2"
log = "0.4.6"
lru-cache = "0.1.2"
memory-cache = { path = "../../util/memory-cache" }

View File

@ -27,7 +27,7 @@ macros = { path = "../../util/macros" }
network = { package = "ethcore-network", path = "../../util/network" }
parity-runtime = { path = "../../util/runtime" }
parity-crypto = { version = "0.4.2", features = ["publickey"] }
parity-util-mem = "0.2.0"
parity-util-mem = "0.3.0"
rand = "0.7"
parking_lot = "0.9"
rlp = "0.4.0"
@ -40,7 +40,7 @@ env_logger = "0.5"
engine = { path = "../engine" }
ethcore = { path = "..", features = ["test-helpers"] }
ethcore-io = { path = "../../util/io", features = ["mio"] }
kvdb-memorydb = "0.1.2"
kvdb-memorydb = "0.2.0"
machine = { path = "../machine" }
rand_xorshift = "0.2"
rustc-hex = "1.0"

View File

@ -235,11 +235,11 @@ impl TestNet<Peer> {
// skip full verification because the blocks are bad.
config.verify_full = false;
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
let db = kvdb_memorydb::create(0);
let db = kvdb_memorydb::create(1);
let client = LightClient::new(
config,
Arc::new(db),
None,
0,
&spec::new_test(),
fetch::unavailable(), // TODO: allow fetch from full nodes.
IoChannel::disconnected(),

View File

@ -11,10 +11,10 @@ ethcore-blockchain = { path = "../blockchain" }
ethcore-db = { path = "../db" }
ethereum-types = "0.8.0"
evm = { path = "../evm" }
kvdb = "0.1"
kvdb = "0.2"
log = "0.4"
parity-bytes = "0.1.0"
parity-util-mem = "0.2.0"
parity-util-mem = "0.3.0"
parking_lot = "0.9"
rlp = "0.4.0"
rlp_derive = { path = "../../util/rlp-derive" }

View File

@ -6,7 +6,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
edition = "2018"
[dependencies]
trie-db = "0.16.0"
trie-db = "0.18.0"
ethtrie = { package = "patricia-trie-ethereum", path = "../../util/patricia-trie-ethereum" }
account-db = { path = "../account-db" }
evm = { path = "../evm" }

View File

@ -13,7 +13,7 @@ ethjson = { path = "../../json" }
keccak-hash = "0.4.0"
parity-bytes = "0.1"
parity-crypto = { version = "0.4.2", features = ["publickey"] }
parity-util-mem = "0.2.0"
parity-util-mem = "0.3.0"
parity-snappy = "0.1"
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
rlp = "0.4.0"

View File

@ -24,7 +24,7 @@ len-caching-lock = { path = "../../util/len-caching-lock" }
log = "0.4"
num_cpus = "1.2"
parity-bytes = "0.1.0"
parity-util-mem = "0.2.0"
parity-util-mem = "0.3.0"
parking_lot = "0.9"
rlp = "0.4.2"
time-utils = { path = "../../util/time-utils" }

View File

@ -22,7 +22,7 @@ ethabi-contract = "9.0.0"
ethcore-call-contract = { path = "../ethcore/call-contract" }
ethereum-types = "0.8.0"
futures = "0.1"
parity-util-mem = "0.2.0"
parity-util-mem = "0.3.0"
keccak-hash = "0.4.0"
linked-hash-map = "0.5"
log = "0.4"

View File

@ -8,7 +8,7 @@ edition = "2018"
[dependencies]
common-types = { path = "../../ethcore/types" }
ethcore-io = { path = "../../util/io" }
kvdb = "0.1"
kvdb = "0.2"
log = "0.4"
rlp = "0.4.0"
serde = "1.0"
@ -18,4 +18,4 @@ serde_json = "1.0"
[dev-dependencies]
ethkey = { path = "../../accounts/ethkey" }
parity-crypto = { version = "0.4.2", features = ["publickey"] }
kvdb-memorydb = "0.1.2"
kvdb-memorydb = "0.2.0"

View File

@ -107,7 +107,7 @@ pub trait NodeInfo: Send + Sync {
/// Create a new local data store, given a database, a column to write to, and a node.
/// Attempts to read data out of the store, and move it into the node.
pub fn create<T: NodeInfo>(db: Arc<dyn KeyValueDB>, col: Option<u32>, node: T) -> LocalDataStore<T> {
pub fn create<T: NodeInfo>(db: Arc<dyn KeyValueDB>, col: u32, node: T) -> LocalDataStore<T> {
LocalDataStore {
db,
col,
@ -121,7 +121,7 @@ pub fn create<T: NodeInfo>(db: Arc<dyn KeyValueDB>, col: Option<u32>, node: T) -
/// and the node security level.
pub struct LocalDataStore<T: NodeInfo> {
db: Arc<dyn KeyValueDB>,
col: Option<u32>,
col: u32,
node: T,
}
@ -214,15 +214,15 @@ mod tests {
#[test]
fn twice_empty() {
let db = Arc::new(::kvdb_memorydb::create(0));
let db = Arc::new(::kvdb_memorydb::create(1));
{
let store = super::create(db.clone(), None, Dummy(vec![]));
let store = super::create(db.clone(), 0, Dummy(vec![]));
assert_eq!(store.pending_transactions().unwrap(), vec![])
}
{
let store = super::create(db.clone(), None, Dummy(vec![]));
let store = super::create(db.clone(), 0, Dummy(vec![]));
assert_eq!(store.pending_transactions().unwrap(), vec![])
}
}
@ -243,21 +243,21 @@ mod tests {
PendingTransaction::new(signed, condition)
}).collect();
let db = Arc::new(::kvdb_memorydb::create(0));
let db = Arc::new(::kvdb_memorydb::create(1));
{
// nothing written yet, will write pending.
let store = super::create(db.clone(), None, Dummy(transactions.clone()));
let store = super::create(db.clone(), 0, Dummy(transactions.clone()));
assert_eq!(store.pending_transactions().unwrap(), vec![])
}
{
// pending written, will write nothing.
let store = super::create(db.clone(), None, Dummy(vec![]));
let store = super::create(db.clone(), 0, Dummy(vec![]));
assert_eq!(store.pending_transactions().unwrap(), transactions)
}
{
// pending removed, will write nothing.
let store = super::create(db.clone(), None, Dummy(vec![]));
let store = super::create(db.clone(), 0, Dummy(vec![]));
assert_eq!(store.pending_transactions().unwrap(), vec![])
}
}
@ -282,15 +282,15 @@ mod tests {
PendingTransaction::new(signed, None)
});
let db = Arc::new(::kvdb_memorydb::create(0));
let db = Arc::new(::kvdb_memorydb::create(1));
{
// nothing written, will write bad.
let store = super::create(db.clone(), None, Dummy(transactions.clone()));
let store = super::create(db.clone(), 0, Dummy(transactions.clone()));
assert_eq!(store.pending_transactions().unwrap(), vec![])
}
{
// try to load transactions. The last transaction, which is invalid, will be skipped.
let store = super::create(db.clone(), None, Dummy(vec![]));
let store = super::create(db.clone(), 0, Dummy(vec![]));
let loaded = store.pending_transactions().unwrap();
transactions.pop();
assert_eq!(loaded, transactions);

View File

@ -37,7 +37,7 @@ pub fn migrate_blooms<P: AsRef<Path>>(path: P, config: &DatabaseConfig) -> Resul
// 3u8 -> ExtrasIndex::BlocksBlooms
// 0u8 -> level 0
let blooms_iterator = db.key_value()
.iter_from_prefix(Some(3), &[3u8, 0u8])
.iter_from_prefix(3, &[3u8, 0u8])
.filter(|(key, _)| key.len() == 6)
.take_while(|(key, _)| {
key[0] == 3u8 && key[1] == 0u8
@ -63,7 +63,7 @@ pub fn migrate_blooms<P: AsRef<Path>>(path: P, config: &DatabaseConfig) -> Resul
// 1u8 -> TraceDBIndex::BloomGroups
// 0u8 -> level 0
let trace_blooms_iterator = db.key_value()
.iter_from_prefix(Some(4), &[1u8, 0u8])
.iter_from_prefix(4, &[1u8, 0u8])
.filter(|(key, _)| key.len() == 6)
.take_while(|(key, _)| {
key[0] == 1u8 && key[1] == 0u8

View File

@ -29,32 +29,36 @@ pub fn compaction_profile(profile: &DatabaseCompactionProfile, db_path: &Path) -
/// Spreads the `total` (in MiB) memory budget across the db columns.
/// If it's `None`, the default memory budget will be used for each column.
pub fn memory_per_column(total: Option<usize>) -> HashMap<Option<u32>, usize> {
/// 90% of the memory budget is assigned to the first column, `col0`, which is where we store the
/// state.
pub fn memory_per_column(total: Option<usize>) -> HashMap<u32, usize> {
let mut memory_per_column = HashMap::new();
if let Some(budget) = total {
// spend 90% of the memory budget on the state column, but at least 256 MiB
memory_per_column.insert(ethcore_db::COL_STATE, std::cmp::max(budget * 9 / 10, 256));
let num_columns = ethcore_db::NUM_COLUMNS.expect("NUM_COLUMNS is Some; qed");
// spread the remaining 10% evenly across columns
let rest_budget = budget / 10 / (num_columns as usize - 1);
for i in 1..num_columns {
let rest_budget = budget / 10 / (ethcore_db::NUM_COLUMNS as usize - 1);
for i in 1..ethcore_db::NUM_COLUMNS {
// but at least 16 MiB for each column
memory_per_column.insert(Some(i), std::cmp::max(rest_budget, 16));
memory_per_column.insert(i, std::cmp::max(rest_budget, 16));
}
}
memory_per_column
}
/// Spreads the `total` (in MiB) memory budget across the light db columns.
pub fn memory_per_column_light(total: usize) -> HashMap<Option<u32>, usize> {
pub fn memory_per_column_light(total: usize) -> HashMap<u32, usize> {
let mut memory_per_column = HashMap::new();
let num_columns = ethcore_db::NUM_COLUMNS.expect("NUM_COLUMNS is Some; qed");
// spread the memory budget evenly across columns
// light client doesn't use the state column
let per_column = total / (num_columns as usize - 1);
for i in 1..num_columns {
let per_column = total / (ethcore_db::NUM_COLUMNS as usize - 1);
// Note: `col0` (State) is not used for the light client so setting it to a low value.
memory_per_column.insert(0, 1);
for i in 1..ethcore_db::NUM_COLUMNS {
// but at least 4 MiB for each column
memory_per_column.insert(Some(i), std::cmp::max(per_column, 4));
memory_per_column.insert(i, std::cmp::max(per_column, 4));
}
memory_per_column
}

View File

@ -29,24 +29,24 @@ use super::blooms::migrate_blooms;
/// The migration from v10 to v11.
/// Adds a column for node info.
pub const TO_V11: ChangeColumns = ChangeColumns {
pre_columns: Some(6),
post_columns: Some(7),
pre_columns: 6,
post_columns: 7,
version: 11,
};
/// The migration from v11 to v12.
/// Adds a column for light chain storage.
pub const TO_V12: ChangeColumns = ChangeColumns {
pre_columns: Some(7),
post_columns: Some(8),
pre_columns: 7,
post_columns: 8,
version: 12,
};
/// The migration from v12 to v14.
/// Adds a column for private transactions state storage.
pub const TO_V14: ChangeColumns = ChangeColumns {
pre_columns: Some(8),
post_columns: Some(9),
pre_columns: 8,
post_columns: 9,
version: 14,
};

View File

@ -114,7 +114,7 @@ fn take_spec_name_override() -> Option<String> {
#[cfg(windows)]
fn global_cleanup() {
// We need to cleanup all sockets before spawning another Parity process. This makes sure everything is cleaned up.
// We need to clean up all sockets before spawning another Parity process. This makes sure everything is cleaned up.
// The loop is required because of internal reference counter for winsock dll. We don't know how many crates we use do
// initialize it. There's at least 2 now.
for _ in 0.. 10 {

View File

@ -21,8 +21,8 @@ ethkey = { path = "../accounts/ethkey", optional = true }
futures = "0.1"
hyper = { version = "0.12", default-features = false }
keccak-hash = "0.4.0"
kvdb = "0.1"
kvdb-rocksdb = "0.2.0"
kvdb = "0.2"
kvdb-rocksdb = "0.3.0"
lazy_static = "1.0"
log = "0.4"
parity-bytes = "0.1"
@ -47,7 +47,7 @@ env_logger = "0.5"
ethkey = { path = "../accounts/ethkey" }
ethcore = { path = "../ethcore", features = ["test-helpers"] }
tempdir = "0.3"
kvdb-rocksdb = "0.2.0"
kvdb-rocksdb = "0.3.0"
[features]
accounts = ["ethcore-accounts", "ethkey"]

View File

@ -34,7 +34,7 @@ const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checke
/// ACL storage of Secret Store
pub trait AclStorage: Send + Sync {
/// Check if requestor can access document with hash `document`
/// Check if requester can access document with hash `document`
fn check(&self, requester: Address, document: &ServerKeyId) -> Result<bool, Error>;
}
@ -132,7 +132,7 @@ impl CachedContract {
}
impl DummyAclStorage {
/// Prohibit given requestor access to given documents
/// Prohibit given requester access to given documents
#[cfg(test)]
pub fn prohibit(&self, requester: Address, document: ServerKeyId) {
self.prohibited.write()

View File

@ -120,7 +120,7 @@ impl KeyStorage for PersistentKeyStorage {
let key: SerializableDocumentKeyShareV3 = key.into();
let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?;
let mut batch = self.db.transaction();
batch.put(Some(0), document.as_bytes(), &key);
batch.put(0, document.as_bytes(), &key);
self.db.write(batch).map_err(Into::into)
}
@ -129,7 +129,7 @@ impl KeyStorage for PersistentKeyStorage {
}
fn get(&self, document: &ServerKeyId) -> Result<Option<DocumentKeyShare>, Error> {
self.db.get(Some(0), document.as_bytes())
self.db.get(0, document.as_bytes())
.map_err(|e| Error::Database(e.to_string()))
.and_then(|key| match key {
None => Ok(None),
@ -142,28 +142,28 @@ impl KeyStorage for PersistentKeyStorage {
fn remove(&self, document: &ServerKeyId) -> Result<(), Error> {
let mut batch = self.db.transaction();
batch.delete(Some(0), document.as_bytes());
batch.delete(0, document.as_bytes());
self.db.write(batch).map_err(Into::into)
}
fn clear(&self) -> Result<(), Error> {
let mut batch = self.db.transaction();
for (key, _) in self.iter() {
batch.delete(Some(0), key.as_bytes());
batch.delete(0, key.as_bytes());
}
self.db.write(batch)
.map_err(|e| Error::Database(e.to_string()))
}
fn contains(&self, document: &ServerKeyId) -> bool {
self.db.get(Some(0), document.as_bytes())
self.db.get(0, document.as_bytes())
.map(|k| k.is_some())
.unwrap_or(false)
}
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a> {
Box::new(PersistentKeyStorageIterator {
iter: self.db.iter(Some(0)),
iter: self.db.iter(0),
})
}
}
@ -350,7 +350,7 @@ pub mod tests {
};
let key3 = ServerKeyId::from_low_u64_be(3);
let db_config = DatabaseConfig::with_columns(Some(1));
let db_config = DatabaseConfig::with_columns(1);
let db = Database::open(&db_config, &tempdir.path().display().to_string()).unwrap();
let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap();

View File

@ -104,7 +104,7 @@ pub fn open_secretstore_db(data_path: &str) -> Result<Arc<dyn KeyValueDB>, Strin
db_path.push("db");
let db_path = db_path.to_str().ok_or_else(|| "Invalid secretstore path".to_string())?;
let config = DatabaseConfig::with_columns(Some(1));
let config = DatabaseConfig::with_columns(1);
Ok(Arc::new(Database::open(&config, &db_path).map_err(|e| format!("Error opening database: {:?}", e))?))
}

View File

@ -19,14 +19,9 @@
use std::fmt::{Display, Error as FmtError, Formatter};
use std::fs;
use std::io::{Error as IoError, ErrorKind as IoErrorKind, Read as _, Write as _};
use std::io::{Error as IoError, ErrorKind as IoErrorKind, Read as _};
use std::path::PathBuf;
use kvdb::DBTransaction;
use kvdb_rocksdb::{Database, DatabaseConfig};
/// We used to store the version in the database (until version 4).
const LEGACY_DB_META_KEY_VERSION: &[u8; 7] = b"version";
/// Current db version.
const CURRENT_VERSION: u8 = 4;
/// Database is assumed to be at the default version, when no version file is found.
@ -34,14 +29,16 @@ const DEFAULT_VERSION: u8 = 3;
/// Version file name.
const VERSION_FILE_NAME: &str = "db_version";
/// Migration related erorrs.
/// Migration related errors.
#[derive(Debug)]
pub enum Error {
/// Returned when current version cannot be read or guessed.
UnknownDatabaseVersion,
/// Existing DB is newer than the known one.
FutureDBVersion,
/// Migration was completed succesfully,
/// Migration using parity-ethereum 2.6.7 is required.
MigrationWithLegacyVersionRequired,
/// Migration was completed successfully,
/// but there was a problem with io.
Io(IoError),
}
@ -54,6 +51,9 @@ impl Display for Error {
Error::FutureDBVersion =>
"Secret Store database was created with newer client version.\
Upgrade your client or delete DB and resync.".into(),
Error::MigrationWithLegacyVersionRequired =>
"Secret Store database was created with an older client version.\
To migrate, use parity-ethereum v2.6.7, then retry using the latest.".into(),
Error::Io(ref err) =>
format!("Unexpected io error on Secret Store database migration: {}.", err),
};
@ -67,75 +67,17 @@ impl From<IoError> for Error {
}
}
// Moves "default" column to column 0 in preparation for a kvdb-rocksdb 0.3 migration.
fn migrate_to_v4(parent_dir: &str) -> Result<(), Error> {
// Naïve implementation until
// https://github.com/facebook/rocksdb/issues/6130 is resolved
let old_db_config = DatabaseConfig::with_columns(Some(1));
let new_db_config = DatabaseConfig::with_columns(Some(1));
const BATCH_SIZE: usize = 1024;
let old_dir = db_dir(parent_dir);
let new_dir = migration_dir(parent_dir);
let old_db = Database::open(&old_db_config, &old_dir)?;
let new_db = Database::open(&new_db_config, &new_dir)?;
const OLD_COLUMN: Option<u32> = None;
const NEW_COLUMN: Option<u32> = Some(0);
// remove legacy version key
{
let mut batch = DBTransaction::with_capacity(1);
batch.delete(OLD_COLUMN, LEGACY_DB_META_KEY_VERSION);
if let Err(err) = old_db.write(batch) {
error!(target: "migration", "Failed to delete db version {}", &err);
return Err(err.into());
}
}
let mut batch = DBTransaction::with_capacity(BATCH_SIZE);
for (i, (key, value)) in old_db.iter(OLD_COLUMN).enumerate() {
batch.put(NEW_COLUMN, &key, &value);
if i % BATCH_SIZE == 0 {
new_db.write(batch)?;
batch = DBTransaction::with_capacity(BATCH_SIZE);
info!(target: "migration", "Migrating Secret Store DB: {} keys written", i);
}
}
new_db.write(batch)?;
drop(new_db);
old_db.restore(&new_dir)?;
info!(target: "migration", "Secret Store migration finished");
Ok(())
}
/// Apply all migrations if possible.
pub fn upgrade_db(db_path: &str) -> Result<(), Error> {
match current_version(db_path)? {
old_version if old_version < CURRENT_VERSION => {
migrate_to_v4(db_path)?;
update_version(db_path)?;
Ok(())
Err(Error::MigrationWithLegacyVersionRequired)
},
CURRENT_VERSION => Ok(()),
_ => Err(Error::FutureDBVersion),
}
}
fn db_dir(path: &str) -> String {
let mut dir = PathBuf::from(path);
dir.push("db");
dir.to_string_lossy().to_string()
}
fn migration_dir(path: &str) -> String {
let mut dir = PathBuf::from(path);
dir.push("migration");
dir.to_string_lossy().to_string()
}
/// Returns the version file path.
fn version_file_path(path: &str) -> PathBuf {
let mut file_path = PathBuf::from(path);
@ -157,42 +99,3 @@ fn current_version(path: &str) -> Result<u8, Error> {
}
}
/// Writes current database version to the file.
/// Creates a new file if the version file does not exist yet.
fn update_version(path: &str) -> Result<(), Error> {
let mut file = fs::File::create(version_file_path(path))?;
file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use tempdir::TempDir;
#[test]
fn migration_works() -> Result<(), Error> {
let parent = TempDir::new("secret_store_migration")?.into_path();
let mut db_path = parent.clone();
db_path.push("db");
let db_path = db_path.to_str().unwrap();
let parent_path = parent.to_str().unwrap();
let old_db = Database::open(&DatabaseConfig::with_columns(None), db_path)?;
let mut batch = old_db.transaction();
batch.put(None, b"key1", b"value1");
batch.put(None, b"key2", b"value2");
old_db.write(batch)?;
drop(old_db);
upgrade_db(parent_path)?;
let migrated = Database::open(&DatabaseConfig::with_columns(Some(1)), db_path)?;
assert_eq!(migrated.get(Some(0), b"key1")?.expect("key1"), b"value1".to_vec());
assert_eq!(migrated.get(Some(0), b"key2")?.expect("key2"), b"value2".to_vec());
Ok(())
}
}

View File

@ -10,11 +10,11 @@ edition = "2018"
parity-bytes = "0.1"
ethereum-types = "0.8.0"
hash-db = "0.15.0"
malloc_size_of = { version = "0.2", package = "parity-util-mem" }
malloc_size_of = { version = "0.3.0", package = "parity-util-mem" }
keccak-hasher = { path = "../keccak-hasher" }
kvdb = "0.1"
kvdb = "0.2"
log = "0.4"
memory-db = "0.15.0"
memory-db = "0.18.0"
parking_lot = "0.9"
fastmap = { path = "../../util/fastmap" }
rlp = "0.4.0"
@ -22,4 +22,4 @@ rlp = "0.4.0"
[dev-dependencies]
env_logger = "0.5"
keccak-hash = "0.4.0"
kvdb-memorydb = "0.1.2"
kvdb-memorydb = "0.2.0"

View File

@ -46,12 +46,12 @@ pub struct ArchiveDB {
overlay: super::MemoryDB,
backing: Arc<dyn KeyValueDB>,
latest_era: Option<u64>,
column: Option<u32>,
column: u32,
}
impl ArchiveDB {
/// Create a new instance from a key-value db.
pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> ArchiveDB {
pub fn new(backing: Arc<dyn KeyValueDB>, column: u32) -> ArchiveDB {
let latest_era = backing.get(column, &LATEST_ERA_KEY)
.expect("Low-level database error.")
.map(|val| decode::<u64>(&val).expect("decoding db value failed"));
@ -214,7 +214,7 @@ mod tests {
#[test]
fn insert_same_in_fork() {
// history is 1
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
let x = jdb.insert(EMPTY_PREFIX, b"X");
commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap();
@ -236,7 +236,7 @@ mod tests {
#[test]
fn long_history() {
// history is 3
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
let h = jdb.insert(EMPTY_PREFIX, b"foo");
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
assert!(jdb.contains(&h, EMPTY_PREFIX));
@ -254,7 +254,7 @@ mod tests {
#[test]
#[should_panic]
fn multiple_owed_removal_not_allowed() {
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
let h = jdb.insert(EMPTY_PREFIX, b"foo");
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
assert!(jdb.contains(&h, EMPTY_PREFIX));
@ -268,7 +268,7 @@ mod tests {
#[test]
fn complex() {
// history is 1
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
@ -300,7 +300,7 @@ mod tests {
#[test]
fn fork() {
// history is 1
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
@ -326,7 +326,7 @@ mod tests {
#[test]
fn overwrite() {
// history is 1
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
@ -345,7 +345,7 @@ mod tests {
#[test]
fn fork_same_key() {
// history is 1
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
@ -361,26 +361,26 @@ mod tests {
#[test]
fn reopen() {
let shared_db = Arc::new(kvdb_memorydb::create(0));
let shared_db = Arc::new(kvdb_memorydb::create(1));
let bar = H256::random();
let foo = {
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
let mut jdb = ArchiveDB::new(shared_db.clone(), 0);
// history is 1
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
jdb.emplace(bar.clone(), EMPTY_PREFIX, DBValue::from_slice(b"bar"));
jdb.emplace(bar.clone(), EMPTY_PREFIX, b"bar".to_vec());
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
foo
};
{
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
let mut jdb = ArchiveDB::new(shared_db.clone(), 0);
jdb.remove(&foo, EMPTY_PREFIX);
commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
}
{
let mut jdb = ArchiveDB::new(shared_db, None);
let mut jdb = ArchiveDB::new(shared_db, 0);
assert!(jdb.contains(&foo, EMPTY_PREFIX));
assert!(jdb.contains(&bar, EMPTY_PREFIX));
commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
@ -389,10 +389,10 @@ mod tests {
#[test]
fn reopen_remove() {
let shared_db = Arc::new(kvdb_memorydb::create(0));
let shared_db = Arc::new(kvdb_memorydb::create(1));
let foo = {
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
let mut jdb = ArchiveDB::new(shared_db.clone(), 0);
// history is 1
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
@ -406,7 +406,7 @@ mod tests {
};
{
let mut jdb = ArchiveDB::new(shared_db, None);
let mut jdb = ArchiveDB::new(shared_db, 0);
jdb.remove(&foo, EMPTY_PREFIX);
commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap();
assert!(jdb.contains(&foo, EMPTY_PREFIX));
@ -418,9 +418,9 @@ mod tests {
#[test]
fn reopen_fork() {
let shared_db = Arc::new(kvdb_memorydb::create(0));
let shared_db = Arc::new(kvdb_memorydb::create(1));
let (foo, _, _) = {
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
let mut jdb = ArchiveDB::new(shared_db.clone(), 0);
// history is 1
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
@ -435,7 +435,7 @@ mod tests {
};
{
let mut jdb = ArchiveDB::new(shared_db, None);
let mut jdb = ArchiveDB::new(shared_db, 0);
commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
assert!(jdb.contains(&foo, EMPTY_PREFIX));
}
@ -443,17 +443,17 @@ mod tests {
#[test]
fn returns_state() {
let shared_db = Arc::new(kvdb_memorydb::create(0));
let shared_db = Arc::new(kvdb_memorydb::create(1));
let key = {
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
let mut jdb = ArchiveDB::new(shared_db.clone(), 0);
let key = jdb.insert(EMPTY_PREFIX, b"foo");
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
key
};
{
let jdb = ArchiveDB::new(shared_db, None);
let jdb = ArchiveDB::new(shared_db, 0);
let state = jdb.state(&key);
assert!(state.is_some());
}
@ -461,11 +461,11 @@ mod tests {
#[test]
fn inject() {
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
let key = jdb.insert(EMPTY_PREFIX, b"dog");
inject_batch(&mut jdb).unwrap();
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog"));
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec());
jdb.remove(&key, EMPTY_PREFIX);
inject_batch(&mut jdb).unwrap();

View File

@ -111,20 +111,20 @@ pub struct EarlyMergeDB {
backing: Arc<dyn KeyValueDB>,
refs: Option<Arc<RwLock<HashMap<H256, RefInfo>>>>,
latest_era: Option<u64>,
column: Option<u32>,
column: u32,
}
impl EarlyMergeDB {
/// Create a new instance from file
pub fn new(backing: Arc<dyn KeyValueDB>, col: Option<u32>) -> EarlyMergeDB {
let (latest_era, refs) = EarlyMergeDB::read_refs(&*backing, col);
pub fn new(backing: Arc<dyn KeyValueDB>, column: u32) -> EarlyMergeDB {
let (latest_era, refs) = EarlyMergeDB::read_refs(&*backing, column);
let refs = Some(Arc::new(RwLock::new(refs)));
EarlyMergeDB {
overlay: new_memory_db(),
backing: backing,
refs: refs,
latest_era: latest_era,
column: col,
backing,
refs,
latest_era,
column,
}
}
@ -135,13 +135,13 @@ impl EarlyMergeDB {
}
// The next three are valid only as long as there is an insert operation of `key` in the journal.
fn set_already_in(batch: &mut DBTransaction, col: Option<u32>, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]); }
fn reset_already_in(batch: &mut DBTransaction, col: Option<u32>, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)); }
fn is_already_in(backing: &dyn KeyValueDB, col: Option<u32>, key: &H256) -> bool {
fn set_already_in(batch: &mut DBTransaction, col: u32, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]); }
fn reset_already_in(batch: &mut DBTransaction, col: u32, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)); }
fn is_already_in(backing: &dyn KeyValueDB, col: u32, key: &H256) -> bool {
backing.get(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
}
fn insert_keys(inserts: &[(H256, DBValue)], backing: &dyn KeyValueDB, col: Option<u32>, refs: &mut HashMap<H256, RefInfo>, batch: &mut DBTransaction) {
fn insert_keys(inserts: &[(H256, DBValue)], backing: &dyn KeyValueDB, col: u32, refs: &mut HashMap<H256, RefInfo>, batch: &mut DBTransaction) {
for &(ref h, ref d) in inserts {
match refs.entry(*h) {
Entry::Occupied(mut entry) => {
@ -174,7 +174,7 @@ impl EarlyMergeDB {
}
}
fn replay_keys(inserts: &[H256], backing: &dyn KeyValueDB, col: Option<u32>, refs: &mut HashMap<H256, RefInfo>) {
fn replay_keys(inserts: &[H256], backing: &dyn KeyValueDB, col: u32, refs: &mut HashMap<H256, RefInfo>) {
trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs);
for h in inserts {
match refs.entry(*h) {
@ -195,7 +195,7 @@ impl EarlyMergeDB {
trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs);
}
fn remove_keys(deletes: &[H256], refs: &mut HashMap<H256, RefInfo>, batch: &mut DBTransaction, col: Option<u32>, from: RemoveFrom) {
fn remove_keys(deletes: &[H256], refs: &mut HashMap<H256, RefInfo>, batch: &mut DBTransaction, col: u32, from: RemoveFrom) {
// with a remove on {queue_refs: 1, in_archive: true}, we have two options:
// - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive)
// - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue)
@ -264,7 +264,7 @@ impl EarlyMergeDB {
.expect("Low-level database error. Some issue with your hard disk?")
}
fn read_refs(db: &dyn KeyValueDB, col: Option<u32>) -> (Option<u64>, HashMap<H256, RefInfo>) {
fn read_refs(db: &dyn KeyValueDB, col: u32) -> (Option<u64>, HashMap<H256, RefInfo>) {
let mut refs = HashMap::new();
let mut latest_era = None;
if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") {
@ -788,34 +788,34 @@ mod tests {
}
fn new_db() -> EarlyMergeDB {
let backing = Arc::new(kvdb_memorydb::create(0));
EarlyMergeDB::new(backing, None)
let backing = Arc::new(kvdb_memorydb::create(1));
EarlyMergeDB::new(backing, 0)
}
#[test]
fn reopen() {
let shared_db = Arc::new(kvdb_memorydb::create(0));
let shared_db = Arc::new(kvdb_memorydb::create(1));
let bar = H256::random();
let foo = {
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
let mut jdb = EarlyMergeDB::new(shared_db.clone(), 0);
// history is 1
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
jdb.emplace(bar.clone(), EMPTY_PREFIX, DBValue::from_slice(b"bar"));
jdb.emplace(bar.clone(), EMPTY_PREFIX, b"bar".to_vec());
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
assert!(jdb.can_reconstruct_refs());
foo
};
{
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
let mut jdb = EarlyMergeDB::new(shared_db.clone(), 0);
jdb.remove(&foo, EMPTY_PREFIX);
commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
{
let mut jdb = EarlyMergeDB::new(shared_db, None);
let mut jdb = EarlyMergeDB::new(shared_db, 0);
assert!(jdb.contains(&foo, EMPTY_PREFIX));
assert!(jdb.contains(&bar, EMPTY_PREFIX));
commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
@ -964,11 +964,11 @@ mod tests {
fn reopen_remove_three() {
let _ = ::env_logger::try_init();
let shared_db = Arc::new(kvdb_memorydb::create(0));
let shared_db = Arc::new(kvdb_memorydb::create(1));
let foo = keccak(b"foo");
{
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
let mut jdb = EarlyMergeDB::new(shared_db.clone(), 0);
// history is 1
jdb.insert(EMPTY_PREFIX, b"foo");
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
@ -990,7 +990,7 @@ mod tests {
// incantation to reopen the db
}; {
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
let mut jdb = EarlyMergeDB::new(shared_db.clone(), 0);
jdb.remove(&foo, EMPTY_PREFIX);
commit_batch(&mut jdb, 4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap();
@ -999,7 +999,7 @@ mod tests {
// incantation to reopen the db
}; {
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
let mut jdb = EarlyMergeDB::new(shared_db.clone(), 0);
commit_batch(&mut jdb, 5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap();
assert!(jdb.can_reconstruct_refs());
@ -1007,7 +1007,7 @@ mod tests {
// incantation to reopen the db
}; {
let mut jdb = EarlyMergeDB::new(shared_db, None);
let mut jdb = EarlyMergeDB::new(shared_db, 0);
commit_batch(&mut jdb, 6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap();
assert!(jdb.can_reconstruct_refs());
@ -1017,10 +1017,10 @@ mod tests {
#[test]
fn reopen_fork() {
let shared_db = Arc::new(kvdb_memorydb::create(0));
let shared_db = Arc::new(kvdb_memorydb::create(1));
let (foo, bar, baz) = {
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
let mut jdb = EarlyMergeDB::new(shared_db.clone(), 0);
// history is 1
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
@ -1038,7 +1038,7 @@ mod tests {
};
{
let mut jdb = EarlyMergeDB::new(shared_db, None);
let mut jdb = EarlyMergeDB::new(shared_db, 0);
commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo, EMPTY_PREFIX));
@ -1053,7 +1053,7 @@ mod tests {
let key = jdb.insert(EMPTY_PREFIX, b"dog");
inject_batch(&mut jdb).unwrap();
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog"));
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec());
jdb.remove(&key, EMPTY_PREFIX);
inject_batch(&mut jdb).unwrap();

View File

@ -185,7 +185,7 @@ impl fmt::Display for Algorithm {
}
/// Create a new `JournalDB` trait object over a generic key-value database.
pub fn new(backing: Arc<dyn (::kvdb::KeyValueDB)>, algorithm: Algorithm, col: Option<u32>) -> Box<dyn JournalDB> {
pub fn new(backing: Arc<dyn (::kvdb::KeyValueDB)>, algorithm: Algorithm, col: u32) -> Box<dyn JournalDB> {
match algorithm {
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(backing, col)),
Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(backing, col)),

View File

@ -43,7 +43,7 @@ use crate::{error_negatively_reference_hash, new_memory_db};
pub struct OverlayDB {
overlay: super::MemoryDB,
backing: Arc<dyn KeyValueDB>,
column: Option<u32>,
column: u32,
}
struct Payload {
@ -72,7 +72,7 @@ impl Decodable for Payload {
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
let payload = Payload {
count: rlp.val_at(0)?,
value: DBValue::from_slice(rlp.at(1)?.data()?),
value: rlp.at(1)?.data()?.to_vec(),
};
Ok(payload)
@ -81,7 +81,7 @@ impl Decodable for Payload {
impl OverlayDB {
/// Create a new instance of OverlayDB given a `backing` database.
pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> OverlayDB {
pub fn new(backing: Arc<dyn KeyValueDB>, column: u32) -> OverlayDB {
OverlayDB {
overlay: new_memory_db(),
backing,
@ -92,8 +92,8 @@ impl OverlayDB {
/// Create a new instance of OverlayDB with an anonymous temporary database.
#[cfg(test)]
pub fn new_temp() -> OverlayDB {
let backing = Arc::new(::kvdb_memorydb::create(0));
Self::new(backing, None)
let backing = Arc::new(::kvdb_memorydb::create(1));
Self::new(backing, 0)
}
/// Commit all operations in a single batch.
@ -251,7 +251,7 @@ mod tests {
fn overlaydb_overlay_insert_and_remove() {
let mut trie = OverlayDB::new_temp();
let h = trie.insert(EMPTY_PREFIX, b"hello world");
assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world"));
assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), b"hello world".to_vec());
trie.remove(&h, EMPTY_PREFIX);
assert_eq!(trie.get(&h, EMPTY_PREFIX), None);
}
@ -260,9 +260,9 @@ mod tests {
fn overlaydb_backing_insert_revert() {
let mut trie = OverlayDB::new_temp();
let h = trie.insert(EMPTY_PREFIX, b"hello world");
assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world"));
assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), b"hello world".to_vec());
trie.commit().unwrap();
assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world"));
assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), b"hello world".to_vec());
}
#[test]
@ -300,29 +300,29 @@ mod tests {
fn overlaydb_complex() {
let mut trie = OverlayDB::new_temp();
let hfoo = trie.insert(EMPTY_PREFIX, b"foo");
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
let hbar = trie.insert(EMPTY_PREFIX, b"bar");
assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"bar"));
assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), b"bar".to_vec());
trie.commit().unwrap();
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"bar"));
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), b"bar".to_vec());
trie.insert(EMPTY_PREFIX, b"foo"); // two refs
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
trie.commit().unwrap();
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"bar"));
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), b"bar".to_vec());
trie.remove(&hbar, EMPTY_PREFIX); // zero refs - delete
assert_eq!(trie.get(&hbar, EMPTY_PREFIX), None);
trie.remove(&hfoo, EMPTY_PREFIX); // one ref - keep
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
trie.commit().unwrap();
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
trie.remove(&hfoo, EMPTY_PREFIX); // zero ref - would delete, but...
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX), None);
trie.insert(EMPTY_PREFIX, b"foo"); // one ref - keep after all.
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
trie.commit().unwrap();
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
trie.remove(&hfoo, EMPTY_PREFIX); // zero ref - delete
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX), None);
trie.commit().unwrap(); //

View File

@ -74,7 +74,7 @@ pub struct OverlayRecentDB {
transaction_overlay: super::MemoryDB,
backing: Arc<dyn KeyValueDB>,
journal_overlay: Arc<RwLock<JournalOverlay>>,
column: Option<u32>,
column: u32,
}
struct DatabaseValue {
@ -88,7 +88,7 @@ impl Decodable for DatabaseValue {
let id = rlp.val_at(0)?;
let inserts = rlp.at(1)?.iter().map(|r| {
let k = r.val_at(0)?;
let v = DBValue::from_slice(r.at(1)?.data()?);
let v = r.at(1)?.data()?.to_vec();
Ok((k, v))
}).collect::<Result<Vec<_>, _>>()?;
let deletes = rlp.list_at(2)?;
@ -153,12 +153,12 @@ impl Clone for OverlayRecentDB {
impl OverlayRecentDB {
/// Create a new instance.
pub fn new(backing: Arc<dyn KeyValueDB>, col: Option<u32>) -> OverlayRecentDB {
pub fn new(backing: Arc<dyn KeyValueDB>, col: u32) -> OverlayRecentDB {
let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&*backing, col)));
OverlayRecentDB {
transaction_overlay: new_memory_db(),
backing: backing,
journal_overlay: journal_overlay,
backing,
journal_overlay,
column: col,
}
}
@ -180,7 +180,7 @@ impl OverlayRecentDB {
.expect("Low-level database error. Some issue with your hard disk?")
}
fn read_overlay(db: &dyn KeyValueDB, col: Option<u32>) -> JournalOverlay {
fn read_overlay(db: &dyn KeyValueDB, col: u32) -> JournalOverlay {
let mut journal = HashMap::new();
let mut overlay = new_memory_db();
let mut count = 0;
@ -281,9 +281,9 @@ impl JournalDB for OverlayRecentDB {
fn state(&self, key: &H256) -> Option<Bytes> {
let journal_overlay = self.journal_overlay.read();
let key = to_short_key(key);
journal_overlay.backing_overlay.get(&key, EMPTY_PREFIX).map(|v| v.into_vec())
.or_else(|| journal_overlay.pending_overlay.get(&key).map(|d| d.clone().into_vec()))
.or_else(|| self.backing.get_by_prefix(self.column, &key[0..DB_PREFIX_LEN]).map(|b| b.into_vec()))
journal_overlay.backing_overlay.get(&key, EMPTY_PREFIX)
.or_else(|| journal_overlay.pending_overlay.get(&key).map(|d| d.clone()))
.or_else(|| self.backing.get_by_prefix(self.column, &key[0..DB_PREFIX_LEN]).map(|b| b.to_vec()))
}
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
@ -500,8 +500,8 @@ mod tests {
use crate::{JournalDB, inject_batch, commit_batch};
fn new_db() -> OverlayRecentDB {
let backing = Arc::new(kvdb_memorydb::create(0));
OverlayRecentDB::new(backing, None)
let backing = Arc::new(kvdb_memorydb::create(1));
OverlayRecentDB::new(backing, 0)
}
#[test]
@ -742,28 +742,28 @@ mod tests {
#[test]
fn reopen() {
let shared_db = Arc::new(kvdb_memorydb::create(0));
let shared_db = Arc::new(kvdb_memorydb::create(1));
let bar = H256::random();
let foo = {
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
// history is 1
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
jdb.emplace(bar.clone(), EMPTY_PREFIX, DBValue::from_slice(b"bar"));
jdb.emplace(bar.clone(), EMPTY_PREFIX, b"bar".to_vec());
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
assert!(jdb.can_reconstruct_refs());
foo
};
{
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
jdb.remove(&foo, EMPTY_PREFIX);
commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
assert!(jdb.can_reconstruct_refs());
}
{
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
assert!(jdb.contains(&foo, EMPTY_PREFIX));
assert!(jdb.contains(&bar, EMPTY_PREFIX));
commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
@ -909,11 +909,11 @@ mod tests {
fn reopen_remove_three() {
let _ = ::env_logger::try_init();
let shared_db = Arc::new(kvdb_memorydb::create(0));
let shared_db = Arc::new(kvdb_memorydb::create(1));
let foo = keccak(b"foo");
{
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
// history is 1
jdb.insert(EMPTY_PREFIX, b"foo");
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
@ -935,7 +935,7 @@ mod tests {
// incantation to reopen the db
}; {
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
jdb.remove(&foo, EMPTY_PREFIX);
commit_batch(&mut jdb, 4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap();
@ -944,7 +944,7 @@ mod tests {
// incantation to reopen the db
}; {
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
commit_batch(&mut jdb, 5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap();
assert!(jdb.can_reconstruct_refs());
@ -952,7 +952,7 @@ mod tests {
// incantation to reopen the db
}; {
let mut jdb = OverlayRecentDB::new(shared_db, None);
let mut jdb = OverlayRecentDB::new(shared_db, 0);
commit_batch(&mut jdb, 6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap();
assert!(jdb.can_reconstruct_refs());
@ -962,10 +962,10 @@ mod tests {
#[test]
fn reopen_fork() {
let shared_db = Arc::new(kvdb_memorydb::create(0));
let shared_db = Arc::new(kvdb_memorydb::create(1));
let (foo, bar, baz) = {
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
// history is 1
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
@ -983,7 +983,7 @@ mod tests {
};
{
let mut jdb = OverlayRecentDB::new(shared_db, None);
let mut jdb = OverlayRecentDB::new(shared_db, 0);
commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
assert!(jdb.can_reconstruct_refs());
assert!(jdb.contains(&foo, EMPTY_PREFIX));
@ -1018,7 +1018,7 @@ mod tests {
let key = jdb.insert(EMPTY_PREFIX, b"dog");
inject_batch(&mut jdb).unwrap();
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog"));
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec());
jdb.remove(&key, EMPTY_PREFIX);
inject_batch(&mut jdb).unwrap();
@ -1027,10 +1027,10 @@ mod tests {
#[test]
fn earliest_era() {
let shared_db = Arc::new(kvdb_memorydb::create(0));
let shared_db = Arc::new(kvdb_memorydb::create(1));
// empty DB
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
assert!(jdb.earliest_era().is_none());
// single journalled era.
@ -1064,7 +1064,7 @@ mod tests {
// reconstructed: no journal entries.
drop(jdb);
let jdb = OverlayRecentDB::new(shared_db, None);
let jdb = OverlayRecentDB::new(shared_db, 0);
assert_eq!(jdb.earliest_era(), None);
}
}

View File

@ -63,12 +63,12 @@ pub struct RefCountedDB {
latest_era: Option<u64>,
inserts: Vec<H256>,
removes: Vec<H256>,
column: Option<u32>,
column: u32,
}
impl RefCountedDB {
/// Create a new instance given a `backing` database.
pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> RefCountedDB {
pub fn new(backing: Arc<dyn KeyValueDB>, column: u32) -> RefCountedDB {
let latest_era = backing.get(column, &LATEST_ERA_KEY)
.expect("Low-level database error.")
.map(|v| decode::<u64>(&v).expect("decoding db value failed"));
@ -229,8 +229,8 @@ mod tests {
use crate::{JournalDB, inject_batch, commit_batch};
fn new_db() -> RefCountedDB {
let backing = Arc::new(kvdb_memorydb::create(0));
RefCountedDB::new(backing, None)
let backing = Arc::new(kvdb_memorydb::create(1));
RefCountedDB::new(backing, 0)
}
#[test]
@ -342,7 +342,7 @@ mod tests {
let key = jdb.insert(EMPTY_PREFIX, b"dog");
inject_batch(&mut jdb).unwrap();
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog"));
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec());
jdb.remove(&key, EMPTY_PREFIX);
inject_batch(&mut jdb).unwrap();

View File

@ -6,5 +6,5 @@ description = "An LRU-cache which operates on memory used"
license = "GPL3"
[dependencies]
parity-util-mem = "0.2.0"
parity-util-mem = "0.3.0"
lru-cache = "0.1"

View File

@ -6,8 +6,8 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
log = "0.4"
macros = { path = "../macros" }
kvdb = "0.1"
kvdb-rocksdb = "0.2.0"
kvdb = "0.2"
kvdb-rocksdb = "0.3.0"
[dev-dependencies]
tempdir = "0.3"

View File

@ -58,16 +58,16 @@ impl Default for Config {
pub struct Batch {
inner: BTreeMap<Vec<u8>, Vec<u8>>,
batch_size: usize,
column: Option<u32>,
column: u32,
}
impl Batch {
/// Make a new batch with the given config.
pub fn new(config: &Config, col: Option<u32>) -> Self {
pub fn new(config: &Config, column: u32) -> Self {
Batch {
inner: BTreeMap::new(),
batch_size: config.batch_size,
column: col,
column,
}
}
@ -98,39 +98,39 @@ impl Batch {
/// A generalized migration from the given db to a destination db.
pub trait Migration: 'static {
/// Number of columns in the database before the migration.
fn pre_columns(&self) -> Option<u32> { self.columns() }
fn pre_columns(&self) -> u32 { self.columns() }
/// Number of columns in database after the migration.
fn columns(&self) -> Option<u32>;
fn columns(&self) -> u32;
/// Whether this migration alters any existing columns.
/// if not, then column families will simply be added and `migrate` will never be called.
fn alters_existing(&self) -> bool { true }
/// Version of the database after the migration.
fn version(&self) -> u32;
/// Migrate a source to a destination.
fn migrate(&mut self, source: Arc<Database>, config: &Config, destination: &mut Database, col: Option<u32>) -> io::Result<()>;
fn migrate(&mut self, source: Arc<Database>, config: &Config, destination: &mut Database, col: u32) -> io::Result<()>;
}
/// A simple migration over key-value pairs of a single column.
pub trait SimpleMigration: 'static {
/// Number of columns in database after the migration.
fn columns(&self) -> Option<u32>;
fn columns(&self) -> u32;
/// Version of database after the migration.
fn version(&self) -> u32;
/// Index of column which should be migrated.
fn migrated_column_index(&self) -> Option<u32>;
fn migrated_column_index(&self) -> u32;
/// Should migrate existing object to new database.
/// Returns `None` if the object does not exist in new version of database.
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)>;
}
impl<T: SimpleMigration> Migration for T {
fn columns(&self) -> Option<u32> { SimpleMigration::columns(self) }
fn version(&self) -> u32 { SimpleMigration::version(self) }
fn columns(&self) -> u32 { SimpleMigration::columns(self) }
fn alters_existing(&self) -> bool { true }
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> io::Result<()> {
fn version(&self) -> u32 { SimpleMigration::version(self) }
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: u32) -> io::Result<()> {
let migration_needed = col == SimpleMigration::migrated_column_index(self);
let mut batch = Batch::new(config, col);
@ -151,19 +151,19 @@ impl<T: SimpleMigration> Migration for T {
/// An even simpler migration which just changes the number of columns.
pub struct ChangeColumns {
/// The amount of columns before this migration.
pub pre_columns: Option<u32>,
pub pre_columns: u32,
/// The amount of columns after this migration.
pub post_columns: Option<u32>,
pub post_columns: u32,
/// The version after this migration.
pub version: u32,
}
impl Migration for ChangeColumns {
fn pre_columns(&self) -> Option<u32> { self.pre_columns }
fn columns(&self) -> Option<u32> { self.post_columns }
fn version(&self) -> u32 { self.version }
fn pre_columns(&self) -> u32 { self.pre_columns }
fn columns(&self) -> u32 { self.post_columns }
fn alters_existing(&self) -> bool { false }
fn migrate(&mut self, _: Arc<Database>, _: &Config, _: &mut Database, _: Option<u32>) -> io::Result<()> {
fn version(&self) -> u32 { self.version }
fn migrate(&mut self, _: Arc<Database>, _: &Config, _: &mut Database, _: u32) -> io::Result<()> {
Ok(())
}
}
@ -211,7 +211,7 @@ impl Manager {
/// Creates new migration manager with given configuration.
pub fn new(config: Config) -> Self {
Manager {
config: config,
config,
migrations: vec![],
}
}
@ -239,9 +239,8 @@ impl Manager {
return Err(other_io_err("Migration impossible"));
};
let columns = migrations.get(0).and_then(|m| m.pre_columns());
trace!(target: "migration", "Expecting database to contain {:?} columns", columns);
let columns = migrations.first().expect("checked empty above; qed").pre_columns();
trace!(target: "migration", "Expecting database to contain {} columns", columns);
let mut db_config = DatabaseConfig {
max_open_files: 64,
compaction: config.compaction_profile,
@ -271,16 +270,10 @@ impl Manager {
let temp_path_str = temp_path.to_str().ok_or_else(|| other_io_err("Migration impossible."))?;
let mut new_db = Database::open(&db_config, temp_path_str)?;
match current_columns {
// migrate only default column
None => migration.migrate(cur_db.clone(), &config, &mut new_db, None)?,
Some(v) => {
// Migrate all columns in previous DB
for col in 0..v {
migration.migrate(cur_db.clone(), &config, &mut new_db, Some(col))?
}
}
for col in 0..current_columns {
migration.migrate(cur_db.clone(), &config, &mut new_db, col)?
}
// next iteration, we will migrate from this db into the other temp.
cur_db = Arc::new(new_db);
temp_idx.swap();
@ -290,13 +283,13 @@ impl Manager {
} else {
// migrations which simply add or remove column families.
// we can do this in-place.
let goal_columns = migration.columns().unwrap_or(0);
let goal_columns = migration.columns();
while cur_db.num_columns() < goal_columns {
cur_db.add_column().map_err(other_io_err)?;
}
while cur_db.num_columns() > goal_columns {
cur_db.drop_column().map_err(other_io_err)?;
cur_db.remove_last_column().map_err(other_io_err)?;
}
}
}

View File

@ -29,7 +29,7 @@ use std::io;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tempdir::TempDir;
use kvdb_rocksdb::Database;
use kvdb_rocksdb::{Database, DatabaseConfig};
use migration::{Batch, Config, SimpleMigration, Migration, Manager, ChangeColumns};
#[inline]
@ -39,11 +39,11 @@ fn db_path(path: &Path) -> PathBuf {
// initialize a database at the given directory with the given values.
fn make_db(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
let db = Database::open_default(path.to_str().unwrap()).expect("failed to open temp database");
let db = Database::open(&DatabaseConfig::default(), path.to_str().unwrap()).expect("failed to open temp database");
{
let mut transaction = db.transaction();
for (k, v) in pairs {
transaction.put(None, &k, &v);
transaction.put(0, &k, &v);
}
db.write(transaction).expect("failed to write db transaction");
@ -52,10 +52,12 @@ fn make_db(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
// helper for verifying a migrated database.
fn verify_migration(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
let db = Database::open_default(path.to_str().unwrap()).unwrap();
let db = Database::open(&DatabaseConfig::default(), path.to_str().expect("valid path")).expect("database should be there");
for (k, v) in pairs {
let x = db.get(None, &k).unwrap().unwrap();
let x = db.get(0, &k)
.expect("database IO should work")
.expect(&format!("key={:?} should be in column 0 in the db", &k));
assert_eq!(&x[..], &v[..]);
}
@ -64,18 +66,9 @@ fn verify_migration(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
struct Migration0;
impl SimpleMigration for Migration0 {
fn columns(&self) -> Option<u32> {
None
}
fn version(&self) -> u32 {
1
}
fn migrated_column_index(&self) -> Option<u32> {
None
}
fn columns(&self) -> u32 { 1 }
fn version(&self) -> u32 { 1 }
fn migrated_column_index(&self) -> u32 { 0 }
fn simple_migrate(&mut self, mut key: Vec<u8>, mut value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
key.push(0x11);
value.push(0x22);
@ -87,18 +80,9 @@ impl SimpleMigration for Migration0 {
struct Migration1;
impl SimpleMigration for Migration1 {
fn columns(&self) -> Option<u32> {
None
}
fn version(&self) -> u32 {
2
}
fn migrated_column_index(&self) -> Option<u32> {
None
}
fn columns(&self) -> u32 { 1 }
fn version(&self) -> u32 { 2 }
fn migrated_column_index(&self) -> u32 { 0 }
fn simple_migrate(&mut self, key: Vec<u8>, _value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
Some((key, vec![]))
}
@ -107,20 +91,17 @@ impl SimpleMigration for Migration1 {
struct AddsColumn;
impl Migration for AddsColumn {
fn pre_columns(&self) -> Option<u32> { None }
fn columns(&self) -> Option<u32> { Some(1) }
fn pre_columns(&self) -> u32 { 1 }
fn columns(&self) -> u32 { 1 }
fn version(&self) -> u32 { 1 }
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> io::Result<()> {
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: u32) -> io::Result<()> {
let mut batch = Batch::new(config, col);
for (key, value) in source.iter(col) {
batch.insert(key.into_vec(), value.into_vec(), dest)?;
}
if col == Some(1) {
if col == 1 {
batch.insert(vec![1, 2, 3], vec![4, 5, 6], dest)?;
}
@ -204,8 +185,8 @@ fn first_and_noop_migration() {
make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]);
let expected = map![vec![0x11] => vec![0x22], vec![1, 0x11] => vec![1, 0x22]];
manager.add_migration(Migration0).unwrap();
let end_path = manager.execute(&db_path, 0).unwrap();
manager.add_migration(Migration0).expect("Migration0 can be added");
let end_path = manager.execute(&db_path, 0).expect("Migration0 runs clean");
verify_migration(&end_path, expected);
}
@ -254,8 +235,8 @@ fn change_columns() {
let mut manager = Manager::new(Config::default());
manager.add_migration(ChangeColumns {
pre_columns: None,
post_columns: Some(4),
pre_columns: 1,
post_columns: 4,
version: 1,
}).unwrap();
@ -266,7 +247,7 @@ fn change_columns() {
assert_eq!(db_path, new_path, "Changing columns is an in-place migration.");
let config = DatabaseConfig::with_columns(Some(4));
let config = DatabaseConfig::with_columns(4);
let db = Database::open(&config, new_path.to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 4);
}

View File

@ -6,7 +6,7 @@ description = "Merkle-Patricia Trie (Ethereum Style)"
license = "GPL-3.0"
[dependencies]
trie-db = "0.16.0"
trie-db = "0.18.0"
keccak-hasher = { version = "0.1.1", path = "../keccak-hasher" }
hash-db = "0.15.0"
rlp = "0.4.4"
@ -15,7 +15,7 @@ ethereum-types = "0.8.0"
elastic-array = "0.10"
[dev-dependencies]
memory-db = "0.15.0"
memory-db = "0.18.0"
keccak-hash = "0.4.0"
journaldb = { path = "../journaldb" }
criterion = "0.3"

View File

@ -78,7 +78,7 @@ impl trie_db::TrieLayout for Layout {
/// TrieDBMut::new(&mut memdb, &mut root).insert(b"foo", b"bar").unwrap();
/// let t = TrieDB::new(&memdb, &root).unwrap();
/// assert!(t.contains(b"foo").unwrap());
/// assert_eq!(t.get(b"foo").unwrap().unwrap(), DBValue::from_slice(b"bar"));
/// assert_eq!(t.get(b"foo").unwrap().unwrap(), b"bar".to_vec());
/// }
/// ```
pub type TrieDB<'db> = trie::TrieDB<'db, Layout>;
@ -125,7 +125,7 @@ pub type FatDB<'db> = trie::FatDB<'db, Layout>;
/// assert_eq!(*t.root(), KECCAK_NULL_RLP);
/// t.insert(b"foo", b"bar").unwrap();
/// assert!(t.contains(b"foo").unwrap());
/// assert_eq!(t.get(b"foo").unwrap().unwrap(), DBValue::from_slice(b"bar"));
/// assert_eq!(t.get(b"foo").unwrap().unwrap(), b"bar".to_vec());
/// t.remove(b"foo").unwrap();
/// assert!(!t.contains(b"foo").unwrap());
/// }

View File

@ -6,6 +6,6 @@ description = "Trie-root helpers, ethereum style"
license = "GPL-3.0"
[dependencies]
triehash = "0.8.0"
triehash = "0.8.2"
ethereum-types = "0.8.0"
keccak-hasher = { path = "../keccak-hasher" }