Update to latest kvdb-*
: no default column, DBValue is Vec (#11312)
* Only use kvdb "column families" This PR contains the changes necessary to use the `kvdb-*` crates from https://github.com/paritytech/parity-common/pull/278 (so a synchronized merge is required) which drops support for the old-style rocksdb "default" column to get a smaller and less complex API. As it stands this PR is working correctly except for secret-store; we need to migrate it to use a new column family. * Fix secretstore build * Fix secretstore build: include ethkey when building with the "accounts" feature * typos * Restore state test commit * Override all of parity-common from git * Be precise about version requirement to migrate secretstore code * Update ethcore/db/src/db.rs Co-Authored-By: Niklas Adolfsson <niklasadolfsson1@gmail.com> * Address review grumbles * Review grumbles * Cleanup Co-authored-by: Niklas Adolfsson <niklasadolfsson1@gmail.com>
This commit is contained in:
parent
860ef19e95
commit
b9f9d11929
538
Cargo.lock
generated
538
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -39,8 +39,8 @@ futures = "0.1"
|
|||||||
journaldb = { path = "util/journaldb" }
|
journaldb = { path = "util/journaldb" }
|
||||||
jsonrpc-core = "14.0.3"
|
jsonrpc-core = "14.0.3"
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
kvdb-rocksdb = "0.2.0"
|
kvdb-rocksdb = "0.3.0"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
migration-rocksdb = { path = "util/migration-rocksdb" }
|
migration-rocksdb = { path = "util/migration-rocksdb" }
|
||||||
node-filter = { path = "ethcore/node-filter" }
|
node-filter = { path = "ethcore/node-filter" }
|
||||||
@ -57,7 +57,7 @@ parity-path = "0.1"
|
|||||||
parity-rpc = { path = "rpc" }
|
parity-rpc = { path = "rpc" }
|
||||||
parity-runtime = { path = "util/runtime" }
|
parity-runtime = { path = "util/runtime" }
|
||||||
parity-updater = { path = "updater" }
|
parity-updater = { path = "updater" }
|
||||||
parity-util-mem = { version = "0.2.0", features = ["jemalloc-global"] }
|
parity-util-mem = { version = "0.3.0", features = ["jemalloc-global"] }
|
||||||
parity-version = { path = "util/version" }
|
parity-version = { path = "util/version" }
|
||||||
parking_lot = "0.9"
|
parking_lot = "0.9"
|
||||||
regex = "1.0"
|
regex = "1.0"
|
||||||
@ -135,4 +135,3 @@ members = [
|
|||||||
"evmbin",
|
"evmbin",
|
||||||
"parity-clib",
|
"parity-clib",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -31,9 +31,9 @@ hash-db = "0.15.0"
|
|||||||
itertools = "0.5"
|
itertools = "0.5"
|
||||||
journaldb = { path = "../util/journaldb" }
|
journaldb = { path = "../util/journaldb" }
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
kvdb-memorydb = { version = "0.1.2", optional = true }
|
kvdb-memorydb = { version = "0.2.0", optional = true }
|
||||||
kvdb-rocksdb = { version = "0.2.0", optional = true }
|
kvdb-rocksdb = { version = "0.3.0", optional = true }
|
||||||
lazy_static = { version = "1.3", optional = true }
|
lazy_static = { version = "1.3", optional = true }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
macros = { path = "../util/macros", optional = true }
|
macros = { path = "../util/macros", optional = true }
|
||||||
@ -42,7 +42,7 @@ memory-cache = { path = "../util/memory-cache" }
|
|||||||
parity-bytes = "0.1"
|
parity-bytes = "0.1"
|
||||||
parking_lot = "0.9"
|
parking_lot = "0.9"
|
||||||
pod = { path = "pod", optional = true }
|
pod = { path = "pod", optional = true }
|
||||||
trie-db = "0.16.0"
|
trie-db = "0.18.0"
|
||||||
parity-crypto = { version = "0.4.2", features = ["publickey"], optional = true }
|
parity-crypto = { version = "0.4.2", features = ["publickey"], optional = true }
|
||||||
patricia-trie-ethereum = { path = "../util/patricia-trie-ethereum" }
|
patricia-trie-ethereum = { path = "../util/patricia-trie-ethereum" }
|
||||||
rand = "0.7"
|
rand = "0.7"
|
||||||
@ -78,8 +78,8 @@ ethcore-builtin = { path = "./builtin" }
|
|||||||
ethjson = { path = "../json", features = ["test-helpers"] }
|
ethjson = { path = "../json", features = ["test-helpers"] }
|
||||||
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
||||||
fetch = { path = "../util/fetch" }
|
fetch = { path = "../util/fetch" }
|
||||||
kvdb-memorydb = "0.1.2"
|
kvdb-memorydb = "0.2.0"
|
||||||
kvdb-rocksdb = "0.2.0"
|
kvdb-rocksdb = "0.3.0"
|
||||||
lazy_static = "1.3"
|
lazy_static = "1.3"
|
||||||
machine = { path = "./machine", features = ["test-helpers"] }
|
machine = { path = "./machine", features = ["test-helpers"] }
|
||||||
macros = { path = "../util/macros" }
|
macros = { path = "../util/macros" }
|
||||||
|
@ -11,5 +11,5 @@ ethereum-types = "0.8.0"
|
|||||||
hash-db = "0.15.0"
|
hash-db = "0.15.0"
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
keccak-hasher = { path = "../../util/keccak-hasher" }
|
keccak-hasher = { path = "../../util/keccak-hasher" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
rlp = "0.4"
|
rlp = "0.4"
|
||||||
|
@ -93,7 +93,7 @@ impl<'db> AsHashDB<KeccakHasher, DBValue> for AccountDB<'db> {
|
|||||||
impl<'db> HashDB<KeccakHasher, DBValue> for AccountDB<'db> {
|
impl<'db> HashDB<KeccakHasher, DBValue> for AccountDB<'db> {
|
||||||
fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> {
|
fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> {
|
||||||
if key == &KECCAK_NULL_RLP {
|
if key == &KECCAK_NULL_RLP {
|
||||||
return Some(DBValue::from_slice(&NULL_RLP));
|
return Some(NULL_RLP.to_vec());
|
||||||
}
|
}
|
||||||
self.db.get(&combine_key(&self.address_hash, key), prefix)
|
self.db.get(&combine_key(&self.address_hash, key), prefix)
|
||||||
}
|
}
|
||||||
@ -139,7 +139,7 @@ impl<'db> AccountDBMut<'db> {
|
|||||||
impl<'db> HashDB<KeccakHasher, DBValue> for AccountDBMut<'db>{
|
impl<'db> HashDB<KeccakHasher, DBValue> for AccountDBMut<'db>{
|
||||||
fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> {
|
fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> {
|
||||||
if key == &KECCAK_NULL_RLP {
|
if key == &KECCAK_NULL_RLP {
|
||||||
return Some(DBValue::from_slice(&NULL_RLP));
|
return Some(NULL_RLP.to_vec());
|
||||||
}
|
}
|
||||||
self.db.get(&combine_key(&self.address_hash, key), prefix)
|
self.db.get(&combine_key(&self.address_hash, key), prefix)
|
||||||
}
|
}
|
||||||
@ -157,7 +157,7 @@ impl<'db> HashDB<KeccakHasher, DBValue> for AccountDBMut<'db>{
|
|||||||
}
|
}
|
||||||
let k = keccak(value);
|
let k = keccak(value);
|
||||||
let ak = combine_key(&self.address_hash, &k);
|
let ak = combine_key(&self.address_hash, &k);
|
||||||
self.db.emplace(ak, prefix, DBValue::from_slice(value));
|
self.db.emplace(ak, prefix, value.to_vec());
|
||||||
k
|
k
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,7 +193,7 @@ impl<'db> AsHashDB<KeccakHasher, DBValue> for Wrapping<'db> {
|
|||||||
impl<'db> HashDB<KeccakHasher, DBValue> for Wrapping<'db> {
|
impl<'db> HashDB<KeccakHasher, DBValue> for Wrapping<'db> {
|
||||||
fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> {
|
fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> {
|
||||||
if key == &KECCAK_NULL_RLP {
|
if key == &KECCAK_NULL_RLP {
|
||||||
return Some(DBValue::from_slice(&NULL_RLP));
|
return Some(NULL_RLP.to_vec());
|
||||||
}
|
}
|
||||||
self.0.get(key, prefix)
|
self.0.get(key, prefix)
|
||||||
}
|
}
|
||||||
@ -227,7 +227,7 @@ impl<'db> AsHashDB<KeccakHasher, DBValue> for WrappingMut<'db> {
|
|||||||
impl<'db> HashDB<KeccakHasher, DBValue> for WrappingMut<'db>{
|
impl<'db> HashDB<KeccakHasher, DBValue> for WrappingMut<'db>{
|
||||||
fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> {
|
fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> {
|
||||||
if key == &KECCAK_NULL_RLP {
|
if key == &KECCAK_NULL_RLP {
|
||||||
return Some(DBValue::from_slice(&NULL_RLP));
|
return Some(NULL_RLP.to_vec());
|
||||||
}
|
}
|
||||||
self.0.get(key, prefix)
|
self.0.get(key, prefix)
|
||||||
}
|
}
|
||||||
|
@ -16,18 +16,18 @@ hash-db = "0.15.0"
|
|||||||
journaldb = { path = "../../util/journaldb" }
|
journaldb = { path = "../../util/journaldb" }
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
keccak-hasher = { path = "../../util/keccak-hasher" }
|
keccak-hasher = { path = "../../util/keccak-hasher" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
lru-cache = "0.1.2"
|
lru-cache = "0.1.2"
|
||||||
memory-db = "0.15.0"
|
memory-db = "0.18.0"
|
||||||
parity-bytes = "0.1.0"
|
parity-bytes = "0.1.0"
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
parking_lot = "0.9"
|
parking_lot = "0.9"
|
||||||
pod = { path = "../pod" }
|
pod = { path = "../pod" }
|
||||||
rlp = "0.4.0"
|
rlp = "0.4.0"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
trace = { path = "../trace" }
|
trace = { path = "../trace" }
|
||||||
trie-db = "0.16.0"
|
trie-db = "0.18.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
account-db = { path = "../account-db" }
|
account-db = { path = "../account-db" }
|
||||||
|
@ -390,7 +390,7 @@ impl Account {
|
|||||||
match db.get(&self.code_hash, hash_db::EMPTY_PREFIX) {
|
match db.get(&self.code_hash, hash_db::EMPTY_PREFIX) {
|
||||||
Some(x) => {
|
Some(x) => {
|
||||||
self.code_size = Some(x.len());
|
self.code_size = Some(x.len());
|
||||||
self.code_cache = Arc::new(x.into_vec());
|
self.code_cache = Arc::new(x);
|
||||||
Some(self.code_cache.clone())
|
Some(self.code_cache.clone())
|
||||||
},
|
},
|
||||||
_ => {
|
_ => {
|
||||||
@ -530,7 +530,7 @@ impl Account {
|
|||||||
self.code_filth = Filth::Clean;
|
self.code_filth = Filth::Clean;
|
||||||
},
|
},
|
||||||
(true, false) => {
|
(true, false) => {
|
||||||
db.emplace(self.code_hash.clone(), hash_db::EMPTY_PREFIX, DBValue::from_slice(&*self.code_cache));
|
db.emplace(self.code_hash.clone(), hash_db::EMPTY_PREFIX, self.code_cache.to_vec());
|
||||||
self.code_size = Some(self.code_cache.len());
|
self.code_size = Some(self.code_cache.len());
|
||||||
self.code_filth = Filth::Clean;
|
self.code_filth = Filth::Clean;
|
||||||
},
|
},
|
||||||
|
@ -14,9 +14,9 @@ common-types = { path = "../types" }
|
|||||||
ethcore-db = { path = "../db" }
|
ethcore-db = { path = "../db" }
|
||||||
ethereum-types = "0.8.0"
|
ethereum-types = "0.8.0"
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
itertools = "0.5"
|
itertools = "0.5"
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
parity-bytes = "0.1"
|
parity-bytes = "0.1"
|
||||||
rand = "0.7"
|
rand = "0.7"
|
||||||
@ -32,4 +32,4 @@ env_logger = "0.5"
|
|||||||
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
||||||
rustc-hex = "1.0"
|
rustc-hex = "1.0"
|
||||||
tempdir = "0.3"
|
tempdir = "0.3"
|
||||||
kvdb-memorydb = "0.1.2"
|
kvdb-memorydb = "0.2.0"
|
||||||
|
@ -628,8 +628,7 @@ impl BlockChain {
|
|||||||
let best_block_number = bc.best_block.read().header.number();
|
let best_block_number = bc.best_block.read().header.number();
|
||||||
// Fetch first and best ancient block details
|
// Fetch first and best ancient block details
|
||||||
let raw_first = bc.db.key_value().get(db::COL_EXTRA, b"first")
|
let raw_first = bc.db.key_value().get(db::COL_EXTRA, b"first")
|
||||||
.expect("Low level database error when fetching 'first' block. Some issue with disk?")
|
.expect("Low level database error when fetching 'first' block. Some issue with disk?");
|
||||||
.map(|v| v.into_vec());
|
|
||||||
let mut best_ancient = bc.db.key_value().get(db::COL_EXTRA, b"ancient")
|
let mut best_ancient = bc.db.key_value().get(db::COL_EXTRA, b"ancient")
|
||||||
.expect("Low level database error when fetching 'best ancient' block. Some issue with disk?")
|
.expect("Low level database error when fetching 'best ancient' block. Some issue with disk?")
|
||||||
.map(|h| H256::from_slice(&h));
|
.map(|h| H256::from_slice(&h));
|
||||||
@ -1665,7 +1664,7 @@ mod tests {
|
|||||||
trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(),
|
trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(),
|
||||||
_blooms_dir: blooms_dir,
|
_blooms_dir: blooms_dir,
|
||||||
_trace_blooms_dir: trace_blooms_dir,
|
_trace_blooms_dir: trace_blooms_dir,
|
||||||
key_value: Arc::new(kvdb_memorydb::create(ethcore_db::NUM_COLUMNS.unwrap()))
|
key_value: Arc::new(kvdb_memorydb::create(ethcore_db::NUM_COLUMNS))
|
||||||
};
|
};
|
||||||
|
|
||||||
Arc::new(db)
|
Arc::new(db)
|
||||||
|
@ -15,7 +15,7 @@ common-types = { path = "../types" }
|
|||||||
ethcore-db = { path = "../db" }
|
ethcore-db = { path = "../db" }
|
||||||
ethcore-miner = { path = "../../miner" }
|
ethcore-miner = { path = "../../miner" }
|
||||||
ethereum-types = "0.8.0"
|
ethereum-types = "0.8.0"
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
registrar = { path = "../../util/registrar" }
|
registrar = { path = "../../util/registrar" }
|
||||||
stats = { path = "../../util/stats" }
|
stats = { path = "../../util/stats" }
|
||||||
trace = { path = "../trace" }
|
trace = { path = "../trace" }
|
||||||
|
@ -10,8 +10,8 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
common-types = { path = "../types" }
|
common-types = { path = "../types" }
|
||||||
ethereum-types = "0.8.0"
|
ethereum-types = "0.8.0"
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
parking_lot = "0.9"
|
parking_lot = "0.9"
|
||||||
rlp = "0.4.0"
|
rlp = "0.4.0"
|
||||||
rlp_derive = { path = "../../util/rlp-derive" }
|
rlp_derive = { path = "../../util/rlp-derive" }
|
||||||
|
@ -24,27 +24,27 @@ use kvdb::{DBTransaction, KeyValueDB};
|
|||||||
|
|
||||||
use rlp;
|
use rlp;
|
||||||
|
|
||||||
// database columns
|
// Database column indexes.
|
||||||
/// Column for State
|
/// Column for State
|
||||||
pub const COL_STATE: Option<u32> = Some(0);
|
pub const COL_STATE: u32 = 0;
|
||||||
/// Column for Block headers
|
/// Column for Block headers
|
||||||
pub const COL_HEADERS: Option<u32> = Some(1);
|
pub const COL_HEADERS: u32 = 1;
|
||||||
/// Column for Block bodies
|
/// Column for Block bodies
|
||||||
pub const COL_BODIES: Option<u32> = Some(2);
|
pub const COL_BODIES: u32 = 2;
|
||||||
/// Column for Extras
|
/// Column for Extras
|
||||||
pub const COL_EXTRA: Option<u32> = Some(3);
|
pub const COL_EXTRA: u32 = 3;
|
||||||
/// Column for Traces
|
/// Column for Traces
|
||||||
pub const COL_TRACE: Option<u32> = Some(4);
|
pub const COL_TRACE: u32 = 4;
|
||||||
/// Column for the empty accounts bloom filter.
|
/// Column for the empty accounts bloom filter.
|
||||||
pub const COL_ACCOUNT_BLOOM: Option<u32> = Some(5);
|
pub const COL_ACCOUNT_BLOOM: u32 = 5;
|
||||||
/// Column for general information from the local node which can persist.
|
/// Column for general information from the local node which can persist.
|
||||||
pub const COL_NODE_INFO: Option<u32> = Some(6);
|
pub const COL_NODE_INFO: u32 = 6;
|
||||||
/// Column for the light client chain.
|
/// Column for the light client chain.
|
||||||
pub const COL_LIGHT_CHAIN: Option<u32> = Some(7);
|
pub const COL_LIGHT_CHAIN: u32 = 7;
|
||||||
/// Column for the private transactions state.
|
/// Column for the private transactions state.
|
||||||
pub const COL_PRIVATE_TRANSACTIONS_STATE: Option<u32> = Some(8);
|
pub const COL_PRIVATE_TRANSACTIONS_STATE: u32 = 8;
|
||||||
/// Number of columns in DB
|
/// Number of columns in DB
|
||||||
pub const NUM_COLUMNS: Option<u32> = Some(9);
|
pub const NUM_COLUMNS: u32 = 9;
|
||||||
|
|
||||||
/// Modes for updating caches.
|
/// Modes for updating caches.
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
@ -93,16 +93,25 @@ pub trait Key<T> {
|
|||||||
/// Should be used to write value into database.
|
/// Should be used to write value into database.
|
||||||
pub trait Writable {
|
pub trait Writable {
|
||||||
/// Writes the value into the database.
|
/// Writes the value into the database.
|
||||||
fn write<T, R>(&mut self, col: Option<u32>, key: &dyn Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: AsRef<[u8]>;
|
fn write<T, R>(&mut self, col: u32, key: &dyn Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: AsRef<[u8]>;
|
||||||
|
|
||||||
/// Deletes key from the databse.
|
/// Deletes key from the database.
|
||||||
fn delete<T, R>(&mut self, col: Option<u32>, key: &dyn Key<T, Target = R>) where T: rlp::Encodable, R: AsRef<[u8]>;
|
fn delete<T, R>(&mut self, col: u32, key: &dyn Key<T, Target = R>) where T: rlp::Encodable, R: AsRef<[u8]>;
|
||||||
|
|
||||||
/// Writes the value into the database and updates the cache.
|
/// Writes the value into the database and updates the cache.
|
||||||
fn write_with_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut dyn Cache<K, T>, key: K, value: T, policy: CacheUpdatePolicy) where
|
fn write_with_cache<K, T, R>(
|
||||||
K: Key<T, Target = R> + Hash + Eq,
|
&mut self,
|
||||||
T: rlp::Encodable,
|
col: u32,
|
||||||
R: AsRef<[u8]> {
|
cache: &mut dyn Cache<K, T>,
|
||||||
|
key: K,
|
||||||
|
value: T,
|
||||||
|
policy: CacheUpdatePolicy
|
||||||
|
)
|
||||||
|
where
|
||||||
|
K: Key<T, Target = R> + Hash + Eq,
|
||||||
|
T: rlp::Encodable,
|
||||||
|
R: AsRef<[u8]>
|
||||||
|
{
|
||||||
self.write(col, &key, &value);
|
self.write(col, &key, &value);
|
||||||
match policy {
|
match policy {
|
||||||
CacheUpdatePolicy::Overwrite => {
|
CacheUpdatePolicy::Overwrite => {
|
||||||
@ -115,10 +124,18 @@ pub trait Writable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Writes the values into the database and updates the cache.
|
/// Writes the values into the database and updates the cache.
|
||||||
fn extend_with_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut dyn Cache<K, T>, values: HashMap<K, T>, policy: CacheUpdatePolicy) where
|
fn extend_with_cache<K, T, R>(
|
||||||
K: Key<T, Target = R> + Hash + Eq,
|
&mut self,
|
||||||
T: rlp::Encodable,
|
col: u32,
|
||||||
R: AsRef<[u8]> {
|
cache: &mut dyn Cache<K, T>,
|
||||||
|
values: HashMap<K, T>,
|
||||||
|
policy: CacheUpdatePolicy
|
||||||
|
)
|
||||||
|
where
|
||||||
|
K: Key<T, Target = R> + Hash + Eq,
|
||||||
|
T: rlp::Encodable,
|
||||||
|
R: AsRef<[u8]>
|
||||||
|
{
|
||||||
match policy {
|
match policy {
|
||||||
CacheUpdatePolicy::Overwrite => {
|
CacheUpdatePolicy::Overwrite => {
|
||||||
for (key, value) in values {
|
for (key, value) in values {
|
||||||
@ -136,10 +153,18 @@ pub trait Writable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Writes and removes the values into the database and updates the cache.
|
/// Writes and removes the values into the database and updates the cache.
|
||||||
fn extend_with_option_cache<K, T, R>(&mut self, col: Option<u32>, cache: &mut dyn Cache<K, Option<T>>, values: HashMap<K, Option<T>>, policy: CacheUpdatePolicy) where
|
fn extend_with_option_cache<K, T, R>(
|
||||||
K: Key<T, Target = R> + Hash + Eq,
|
&mut self,
|
||||||
T: rlp::Encodable,
|
col: u32,
|
||||||
R: AsRef<[u8]> {
|
cache: &mut dyn Cache<K, Option<T>>,
|
||||||
|
values: HashMap<K, Option<T>>,
|
||||||
|
policy: CacheUpdatePolicy
|
||||||
|
)
|
||||||
|
where
|
||||||
|
K: Key<T, Target = R> + Hash + Eq,
|
||||||
|
T: rlp::Encodable,
|
||||||
|
R: AsRef<[u8]>
|
||||||
|
{
|
||||||
match policy {
|
match policy {
|
||||||
CacheUpdatePolicy::Overwrite => {
|
CacheUpdatePolicy::Overwrite => {
|
||||||
for (key, value) in values {
|
for (key, value) in values {
|
||||||
@ -167,12 +192,12 @@ pub trait Writable {
|
|||||||
/// Should be used to read values from database.
|
/// Should be used to read values from database.
|
||||||
pub trait Readable {
|
pub trait Readable {
|
||||||
/// Returns value for given key.
|
/// Returns value for given key.
|
||||||
fn read<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> Option<T> where
|
fn read<T, R>(&self, col: u32, key: &dyn Key<T, Target = R>) -> Option<T> where
|
||||||
T: rlp::Decodable,
|
T: rlp::Decodable,
|
||||||
R: AsRef<[u8]>;
|
R: AsRef<[u8]>;
|
||||||
|
|
||||||
/// Returns value for given key either in cache or in database.
|
/// Returns value for given key either in cache or in database.
|
||||||
fn read_with_cache<K, T, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> Option<T> where
|
fn read_with_cache<K, T, C>(&self, col: u32, cache: &RwLock<C>, key: &K) -> Option<T> where
|
||||||
K: Key<T> + Eq + Hash + Clone,
|
K: Key<T> + Eq + Hash + Clone,
|
||||||
T: Clone + rlp::Decodable,
|
T: Clone + rlp::Decodable,
|
||||||
C: Cache<K, T> {
|
C: Cache<K, T> {
|
||||||
@ -191,10 +216,18 @@ pub trait Readable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns value for given key either in two-layered cache or in database.
|
/// Returns value for given key either in two-layered cache or in database.
|
||||||
fn read_with_two_layer_cache<K, T, C>(&self, col: Option<u32>, l1_cache: &RwLock<C>, l2_cache: &RwLock<C>, key: &K) -> Option<T> where
|
fn read_with_two_layer_cache<K, T, C>(
|
||||||
K: Key<T> + Eq + Hash + Clone,
|
&self,
|
||||||
T: Clone + rlp::Decodable,
|
col: u32,
|
||||||
C: Cache<K, T> {
|
l1_cache: &RwLock<C>,
|
||||||
|
l2_cache: &RwLock<C>,
|
||||||
|
key: &K
|
||||||
|
) -> Option<T>
|
||||||
|
where
|
||||||
|
K: Key<T> + Eq + Hash + Clone,
|
||||||
|
T: Clone + rlp::Decodable,
|
||||||
|
C: Cache<K, T>
|
||||||
|
{
|
||||||
{
|
{
|
||||||
let read = l1_cache.read();
|
let read = l1_cache.read();
|
||||||
if let Some(v) = read.get(key) {
|
if let Some(v) = read.get(key) {
|
||||||
@ -206,10 +239,10 @@ pub trait Readable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if given value exists.
|
/// Returns true if given value exists.
|
||||||
fn exists<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> bool where R: AsRef<[u8]>;
|
fn exists<T, R>(&self, col: u32, key: &dyn Key<T, Target = R>) -> bool where R: AsRef<[u8]>;
|
||||||
|
|
||||||
/// Returns true if given value exists either in cache or in database.
|
/// Returns true if given value exists either in cache or in database.
|
||||||
fn exists_with_cache<K, T, R, C>(&self, col: Option<u32>, cache: &RwLock<C>, key: &K) -> bool where
|
fn exists_with_cache<K, T, R, C>(&self, col: u32, cache: &RwLock<C>, key: &K) -> bool where
|
||||||
K: Eq + Hash + Key<T, Target = R>,
|
K: Eq + Hash + Key<T, Target = R>,
|
||||||
R: AsRef<[u8]>,
|
R: AsRef<[u8]>,
|
||||||
C: Cache<K, T> {
|
C: Cache<K, T> {
|
||||||
@ -225,17 +258,17 @@ pub trait Readable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Writable for DBTransaction {
|
impl Writable for DBTransaction {
|
||||||
fn write<T, R>(&mut self, col: Option<u32>, key: &dyn Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: AsRef<[u8]> {
|
fn write<T, R>(&mut self, col: u32, key: &dyn Key<T, Target = R>, value: &T) where T: rlp::Encodable, R: AsRef<[u8]> {
|
||||||
self.put(col, key.key().as_ref(), &rlp::encode(value));
|
self.put(col, key.key().as_ref(), &rlp::encode(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delete<T, R>(&mut self, col: Option<u32>, key: &dyn Key<T, Target = R>) where T: rlp::Encodable, R: AsRef<[u8]> {
|
fn delete<T, R>(&mut self, col: u32, key: &dyn Key<T, Target = R>) where T: rlp::Encodable, R: AsRef<[u8]> {
|
||||||
self.delete(col, key.key().as_ref());
|
self.delete(col, key.key().as_ref());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<KVDB: KeyValueDB + ?Sized> Readable for KVDB {
|
impl<KVDB: KeyValueDB + ?Sized> Readable for KVDB {
|
||||||
fn read<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> Option<T>
|
fn read<T, R>(&self, col: u32, key: &dyn Key<T, Target = R>) -> Option<T>
|
||||||
where T: rlp::Decodable, R: AsRef<[u8]> {
|
where T: rlp::Decodable, R: AsRef<[u8]> {
|
||||||
self.get(col, key.key().as_ref())
|
self.get(col, key.key().as_ref())
|
||||||
.expect(&format!("db get failed, key: {:?}", key.key().as_ref()))
|
.expect(&format!("db get failed, key: {:?}", key.key().as_ref()))
|
||||||
@ -243,7 +276,7 @@ impl<KVDB: KeyValueDB + ?Sized> Readable for KVDB {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exists<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> bool where R: AsRef<[u8]> {
|
fn exists<T, R>(&self, col: u32, key: &dyn Key<T, Target = R>) -> bool where R: AsRef<[u8]> {
|
||||||
let result = self.get(col, key.key().as_ref());
|
let result = self.get(col, key.key().as_ref());
|
||||||
|
|
||||||
match result {
|
match result {
|
||||||
|
@ -17,13 +17,13 @@ ethereum-types = "0.8.0"
|
|||||||
ethjson = { path = "../../../json" }
|
ethjson = { path = "../../../json" }
|
||||||
executive-state = { path = "../../executive-state" }
|
executive-state = { path = "../../executive-state" }
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
lazy_static = "1.3.0"
|
lazy_static = "1.3.0"
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
machine = { path = "../../machine" }
|
machine = { path = "../../machine" }
|
||||||
memory-cache = { path = "../../../util/memory-cache" }
|
memory-cache = { path = "../../../util/memory-cache" }
|
||||||
parity-bytes = "0.1.0"
|
parity-bytes = "0.1.0"
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
parking_lot = "0.9"
|
parking_lot = "0.9"
|
||||||
rlp = "0.4.2"
|
rlp = "0.4.2"
|
||||||
triehash = { package = "triehash-ethereum", version = "0.2", path = "../../../util/triehash-ethereum" }
|
triehash = { package = "triehash-ethereum", version = "0.2", path = "../../../util/triehash-ethereum" }
|
||||||
|
@ -154,11 +154,10 @@ fn check_first_proof(machine: &Machine, contract_address: Address, old_header: H
|
|||||||
|
|
||||||
fn decode_first_proof(rlp: &Rlp) -> Result<(Header, Vec<DBValue>), EthcoreError> {
|
fn decode_first_proof(rlp: &Rlp) -> Result<(Header, Vec<DBValue>), EthcoreError> {
|
||||||
let header = rlp.val_at(0)?;
|
let header = rlp.val_at(0)?;
|
||||||
let state_items = rlp.at(1)?.iter().map(|x| {
|
let state_items = rlp.at(1)?
|
||||||
let mut val = DBValue::new();
|
.iter()
|
||||||
val.append_slice(x.data()?);
|
.map(|x| Ok(x.data()?.to_vec()) )
|
||||||
Ok(val)
|
.collect::<Result<_, EthcoreError>>()?;
|
||||||
}).collect::<Result<_, EthcoreError>>()?;
|
|
||||||
|
|
||||||
Ok((header, state_items))
|
Ok((header, state_items))
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
|||||||
bit-set = "0.4"
|
bit-set = "0.4"
|
||||||
parity-bytes = "0.1"
|
parity-bytes = "0.1"
|
||||||
ethereum-types = "0.8.0"
|
ethereum-types = "0.8.0"
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
lazy_static = "1.0"
|
lazy_static = "1.0"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
vm = { path = "../vm" }
|
vm = { path = "../vm" }
|
||||||
|
@ -14,7 +14,7 @@ common-types = { path = "../types" }
|
|||||||
ethereum-types = "0.8.0"
|
ethereum-types = "0.8.0"
|
||||||
hash-db = "0.15.0"
|
hash-db = "0.15.0"
|
||||||
keccak-hasher = { path = "../../util/keccak-hasher" }
|
keccak-hasher = { path = "../../util/keccak-hasher" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
machine = { path = "../machine" }
|
machine = { path = "../machine" }
|
||||||
trace = { path = "../trace" }
|
trace = { path = "../trace" }
|
||||||
@ -30,5 +30,5 @@ keccak-hash = "0.4.0"
|
|||||||
pod = { path = "../pod" }
|
pod = { path = "../pod" }
|
||||||
rustc-hex = "1.0"
|
rustc-hex = "1.0"
|
||||||
spec = { path = "../spec" }
|
spec = { path = "../spec" }
|
||||||
trie-db = "0.16.0"
|
trie-db = "0.18.0"
|
||||||
ethtrie = { package = "patricia-trie-ethereum", path = "../../util/patricia-trie-ethereum" }
|
ethtrie = { package = "patricia-trie-ethereum", path = "../../util/patricia-trie-ethereum" }
|
||||||
|
@ -18,14 +18,14 @@ ethcore-blockchain = { path = "../blockchain" }
|
|||||||
ethereum-types = "0.8.0"
|
ethereum-types = "0.8.0"
|
||||||
executive-state = { path = "../executive-state" }
|
executive-state = { path = "../executive-state" }
|
||||||
machine = { path = "../machine" }
|
machine = { path = "../machine" }
|
||||||
memory-db = "0.15.0"
|
memory-db = "0.18.0"
|
||||||
trie-db = "0.16.0"
|
trie-db = "0.18.0"
|
||||||
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
|
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
|
||||||
ethcore-network = { path = "../../util/network" }
|
ethcore-network = { path = "../../util/network" }
|
||||||
ethcore-miner = { path = "../../miner" }
|
ethcore-miner = { path = "../../miner" }
|
||||||
ethcore-io = { path = "../../util/io" }
|
ethcore-io = { path = "../../util/io" }
|
||||||
hash-db = "0.15.0"
|
hash-db = "0.15.0"
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
vm = { path = "../vm" }
|
vm = { path = "../vm" }
|
||||||
fastmap = { path = "../../util/fastmap" }
|
fastmap = { path = "../../util/fastmap" }
|
||||||
failsafe = { version = "0.3.0", default-features = false, features = ["parking_lot_mutex"] }
|
failsafe = { version = "0.3.0", default-features = false, features = ["parking_lot_mutex"] }
|
||||||
@ -43,14 +43,14 @@ stats = { path = "../../util/stats" }
|
|||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
keccak-hasher = { path = "../../util/keccak-hasher" }
|
keccak-hasher = { path = "../../util/keccak-hasher" }
|
||||||
triehash-ethereum = { version = "0.2", path = "../../util/triehash-ethereum" }
|
triehash-ethereum = { version = "0.2", path = "../../util/triehash-ethereum" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
memory-cache = { path = "../../util/memory-cache" }
|
memory-cache = { path = "../../util/memory-cache" }
|
||||||
journaldb = { path = "../../util/journaldb" }
|
journaldb = { path = "../../util/journaldb" }
|
||||||
verification = { path = "../verification" }
|
verification = { path = "../verification" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
ethcore = { path = "..", features = ["test-helpers"] }
|
ethcore = { path = "..", features = ["test-helpers"] }
|
||||||
kvdb-memorydb = "0.1.2"
|
kvdb-memorydb = "0.2.0"
|
||||||
tempdir = "0.3"
|
tempdir = "0.3"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
@ -220,7 +220,7 @@ pub struct HeaderChain {
|
|||||||
#[ignore_malloc_size_of = "ignored for performance reason"]
|
#[ignore_malloc_size_of = "ignored for performance reason"]
|
||||||
db: Arc<dyn KeyValueDB>,
|
db: Arc<dyn KeyValueDB>,
|
||||||
#[ignore_malloc_size_of = "ignored for performance reason"]
|
#[ignore_malloc_size_of = "ignored for performance reason"]
|
||||||
col: Option<u32>,
|
col: u32,
|
||||||
#[ignore_malloc_size_of = "ignored for performance reason"]
|
#[ignore_malloc_size_of = "ignored for performance reason"]
|
||||||
cache: Arc<Mutex<Cache>>,
|
cache: Arc<Mutex<Cache>>,
|
||||||
}
|
}
|
||||||
@ -229,7 +229,7 @@ impl HeaderChain {
|
|||||||
/// Create a new header chain given this genesis block and database to read from.
|
/// Create a new header chain given this genesis block and database to read from.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
db: Arc<dyn KeyValueDB>,
|
db: Arc<dyn KeyValueDB>,
|
||||||
col: Option<u32>,
|
col: u32,
|
||||||
spec: &Spec,
|
spec: &Spec,
|
||||||
cache: Arc<Mutex<Cache>>,
|
cache: Arc<Mutex<Cache>>,
|
||||||
allow_hs: HardcodedSync,
|
allow_hs: HardcodedSync,
|
||||||
@ -259,7 +259,7 @@ impl HeaderChain {
|
|||||||
live_epoch_proofs.insert(c.hash, EpochTransition {
|
live_epoch_proofs.insert(c.hash, EpochTransition {
|
||||||
block_hash: c.hash,
|
block_hash: c.hash,
|
||||||
block_number: cur_number,
|
block_number: cur_number,
|
||||||
proof: proof.into_vec(),
|
proof,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -667,7 +667,8 @@ impl HeaderChain {
|
|||||||
None => {
|
None => {
|
||||||
match self.db.get(self.col, hash.as_bytes()) {
|
match self.db.get(self.col, hash.as_bytes()) {
|
||||||
Ok(db_value) => {
|
Ok(db_value) => {
|
||||||
db_value.map(|x| x.into_vec()).map(encoded::Header::new)
|
db_value
|
||||||
|
.map(encoded::Header::new)
|
||||||
.and_then(|header| {
|
.and_then(|header| {
|
||||||
cache.insert_block_header(hash, header.clone());
|
cache.insert_block_header(hash, header.clone());
|
||||||
Some(header)
|
Some(header)
|
||||||
@ -886,7 +887,7 @@ mod tests {
|
|||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
|
|
||||||
fn make_db() -> Arc<dyn KeyValueDB> {
|
fn make_db() -> Arc<dyn KeyValueDB> {
|
||||||
Arc::new(kvdb_memorydb::create(0))
|
Arc::new(kvdb_memorydb::create(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -897,7 +898,7 @@ mod tests {
|
|||||||
|
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();
|
let chain = HeaderChain::new(db.clone(), 0, &spec, cache, HardcodedSync::Allow).unwrap();
|
||||||
|
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
@ -930,7 +931,7 @@ mod tests {
|
|||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();
|
let chain = HeaderChain::new(db.clone(), 0, &spec, cache, HardcodedSync::Allow).unwrap();
|
||||||
|
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
@ -1012,7 +1013,7 @@ mod tests {
|
|||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();
|
let chain = HeaderChain::new(db.clone(), 0, &spec, cache, HardcodedSync::Allow).unwrap();
|
||||||
|
|
||||||
assert!(chain.block_header(BlockId::Earliest).is_some());
|
assert!(chain.block_header(BlockId::Earliest).is_some());
|
||||||
assert!(chain.block_header(BlockId::Latest).is_some());
|
assert!(chain.block_header(BlockId::Latest).is_some());
|
||||||
@ -1026,7 +1027,7 @@ mod tests {
|
|||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
{
|
{
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
|
let chain = HeaderChain::new(db.clone(), 0, &spec, cache.clone(),
|
||||||
HardcodedSync::Allow).unwrap();
|
HardcodedSync::Allow).unwrap();
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
@ -1047,7 +1048,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
|
let chain = HeaderChain::new(db.clone(), 0, &spec, cache.clone(),
|
||||||
HardcodedSync::Allow).unwrap();
|
HardcodedSync::Allow).unwrap();
|
||||||
assert!(chain.block_header(BlockId::Number(10)).is_none());
|
assert!(chain.block_header(BlockId::Number(10)).is_none());
|
||||||
assert!(chain.block_header(BlockId::Number(9000)).is_some());
|
assert!(chain.block_header(BlockId::Number(9000)).is_some());
|
||||||
@ -1064,7 +1065,7 @@ mod tests {
|
|||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
{
|
{
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
|
let chain = HeaderChain::new(db.clone(), 0, &spec, cache.clone(),
|
||||||
HardcodedSync::Allow).unwrap();
|
HardcodedSync::Allow).unwrap();
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
@ -1107,7 +1108,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// after restoration, non-canonical eras should still be loaded.
|
// after restoration, non-canonical eras should still be loaded.
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
|
let chain = HeaderChain::new(db.clone(), 0, &spec, cache.clone(),
|
||||||
HardcodedSync::Allow).unwrap();
|
HardcodedSync::Allow).unwrap();
|
||||||
assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10);
|
assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10);
|
||||||
assert!(chain.candidates.read().get(&100).is_some())
|
assert!(chain.candidates.read().get(&100).is_some())
|
||||||
@ -1120,7 +1121,7 @@ mod tests {
|
|||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone(),
|
let chain = HeaderChain::new(db.clone(), 0, &spec, cache.clone(),
|
||||||
HardcodedSync::Allow).unwrap();
|
HardcodedSync::Allow).unwrap();
|
||||||
|
|
||||||
assert!(chain.block_header(BlockId::Earliest).is_some());
|
assert!(chain.block_header(BlockId::Earliest).is_some());
|
||||||
@ -1135,7 +1136,7 @@ mod tests {
|
|||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap();
|
let chain = HeaderChain::new(db.clone(), 0, &spec, cache, HardcodedSync::Allow).unwrap();
|
||||||
|
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
@ -1202,7 +1203,7 @@ mod tests {
|
|||||||
|
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).expect("failed to instantiate a new HeaderChain");
|
let chain = HeaderChain::new(db.clone(), 0, &spec, cache, HardcodedSync::Allow).expect("failed to instantiate a new HeaderChain");
|
||||||
|
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
|
@ -61,7 +61,7 @@ pub struct Config {
|
|||||||
/// Verification queue config.
|
/// Verification queue config.
|
||||||
pub queue: queue::Config,
|
pub queue: queue::Config,
|
||||||
/// Chain column in database.
|
/// Chain column in database.
|
||||||
pub chain_column: Option<u32>,
|
pub chain_column: u32,
|
||||||
/// Should it do full verification of blocks?
|
/// Should it do full verification of blocks?
|
||||||
pub verify_full: bool,
|
pub verify_full: bool,
|
||||||
/// Should it check the seal of blocks?
|
/// Should it check the seal of blocks?
|
||||||
@ -74,7 +74,7 @@ impl Default for Config {
|
|||||||
fn default() -> Config {
|
fn default() -> Config {
|
||||||
Config {
|
Config {
|
||||||
queue: Default::default(),
|
queue: Default::default(),
|
||||||
chain_column: None,
|
chain_column: 0,
|
||||||
verify_full: true,
|
verify_full: true,
|
||||||
check_seal: true,
|
check_seal: true,
|
||||||
no_hardcoded_sync: false,
|
no_hardcoded_sync: false,
|
||||||
@ -182,7 +182,7 @@ impl<T: ChainDataFetcher> Client<T> {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
config: Config,
|
config: Config,
|
||||||
db: Arc<dyn KeyValueDB>,
|
db: Arc<dyn KeyValueDB>,
|
||||||
chain_col: Option<u32>,
|
chain_col: u32,
|
||||||
spec: &Spec,
|
spec: &Spec,
|
||||||
fetcher: T,
|
fetcher: T,
|
||||||
io_channel: IoChannel<ClientIoMessage<()>>,
|
io_channel: IoChannel<ClientIoMessage<()>>,
|
||||||
|
@ -1507,9 +1507,7 @@ pub mod execution {
|
|||||||
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
|
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
|
||||||
let mut items = Vec::new();
|
let mut items = Vec::new();
|
||||||
for raw_item in rlp.iter() {
|
for raw_item in rlp.iter() {
|
||||||
let mut item = DBValue::new();
|
items.push(raw_item.data()?.to_vec());
|
||||||
item.append_slice(raw_item.data()?);
|
|
||||||
items.push(item);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Response { items })
|
Ok(Response { items })
|
||||||
@ -1839,8 +1837,6 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn execution_roundtrip() {
|
fn execution_roundtrip() {
|
||||||
use kvdb::DBValue;
|
|
||||||
|
|
||||||
let req = IncompleteExecutionRequest {
|
let req = IncompleteExecutionRequest {
|
||||||
block_hash: Field::Scalar(Default::default()),
|
block_hash: Field::Scalar(Default::default()),
|
||||||
from: Default::default(),
|
from: Default::default(),
|
||||||
@ -1852,13 +1848,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let full_req = Request::Execution(req.clone());
|
let full_req = Request::Execution(req.clone());
|
||||||
let res = ExecutionResponse {
|
let res = ExecutionResponse { items: vec![vec![], vec![1, 1, 1, 2, 3]] };
|
||||||
items: vec![DBValue::new(), {
|
|
||||||
let mut value = DBValue::new();
|
|
||||||
value.append_slice(&[1, 1, 1, 2, 3]);
|
|
||||||
value
|
|
||||||
}],
|
|
||||||
};
|
|
||||||
let full_res = Response::Execution(res.clone());
|
let full_res = Response::Execution(res.clone());
|
||||||
|
|
||||||
check_roundtrip(req);
|
check_roundtrip(req);
|
||||||
|
@ -22,7 +22,7 @@ lru-cache = "0.1"
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
ethcore = { path = "..", features = ["test-helpers"] }
|
ethcore = { path = "..", features = ["test-helpers"] }
|
||||||
kvdb-memorydb = "0.1.2"
|
kvdb-memorydb = "0.2.0"
|
||||||
ethcore-io = { path = "../../util/io" }
|
ethcore-io = { path = "../../util/io" }
|
||||||
spec = { path = "../spec" }
|
spec = { path = "../spec" }
|
||||||
tempdir = "0.3"
|
tempdir = "0.3"
|
||||||
|
@ -15,13 +15,13 @@ hash-db = "0.15.0"
|
|||||||
itertools = "0.8"
|
itertools = "0.8"
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
keccak-hasher = { path = "../../util/keccak-hasher" }
|
keccak-hasher = { path = "../../util/keccak-hasher" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
parity-bytes = "0.1.0"
|
parity-bytes = "0.1.0"
|
||||||
rlp = "0.4"
|
rlp = "0.4"
|
||||||
rustc-hex = "1"
|
rustc-hex = "1"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
trie-db = "0.16.0"
|
trie-db = "0.18.0"
|
||||||
triehash = { package = "triehash-ethereum", version = "0.2", path = "../../util/triehash-ethereum" }
|
triehash = { package = "triehash-ethereum", version = "0.2", path = "../../util/triehash-ethereum" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
@ -22,18 +22,18 @@ ethereum-types = "0.8.0"
|
|||||||
ethjson = { path = "../../json" }
|
ethjson = { path = "../../json" }
|
||||||
fetch = { path = "../../util/fetch" }
|
fetch = { path = "../../util/fetch" }
|
||||||
futures = "0.1"
|
futures = "0.1"
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
hash-db = "0.15.0"
|
hash-db = "0.15.0"
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
keccak-hasher = { path = "../../util/keccak-hasher" }
|
keccak-hasher = { path = "../../util/keccak-hasher" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
machine = { path = "../machine" }
|
machine = { path = "../machine" }
|
||||||
journaldb = { path = "../../util/journaldb" }
|
journaldb = { path = "../../util/journaldb" }
|
||||||
parity-bytes = "0.1"
|
parity-bytes = "0.1"
|
||||||
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
||||||
parking_lot = "0.9"
|
parking_lot = "0.9"
|
||||||
trie-db = "0.16.0"
|
trie-db = "0.18.0"
|
||||||
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
|
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
|
||||||
registrar = { path = "../../util/registrar" }
|
registrar = { path = "../../util/registrar" }
|
||||||
rlp = "0.4.0"
|
rlp = "0.4.0"
|
||||||
|
@ -14,7 +14,7 @@ ethcore-io = { path = "../../util/io" }
|
|||||||
ethcore-private-tx = { path = "../private-tx" }
|
ethcore-private-tx = { path = "../private-tx" }
|
||||||
ethcore-sync = { path = "../sync" }
|
ethcore-sync = { path = "../sync" }
|
||||||
ethereum-types = "0.8.0"
|
ethereum-types = "0.8.0"
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
snapshot = { path = "../snapshot" }
|
snapshot = { path = "../snapshot" }
|
||||||
spec = { path = "../spec" }
|
spec = { path = "../spec" }
|
||||||
@ -23,5 +23,5 @@ trace-time = "0.1"
|
|||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
ethcore = { path = "..", features = ["test-helpers"] }
|
ethcore = { path = "..", features = ["test-helpers"] }
|
||||||
ethcore-db = { path = "../db" }
|
ethcore-db = { path = "../db" }
|
||||||
kvdb-rocksdb = "0.2.0"
|
kvdb-rocksdb = "0.3.0"
|
||||||
tempdir = "0.3"
|
tempdir = "0.3"
|
||||||
|
@ -29,7 +29,7 @@ itertools = "0.5"
|
|||||||
journaldb = { path = "../../util/journaldb" }
|
journaldb = { path = "../../util/journaldb" }
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
keccak-hasher = { path = "../../util/keccak-hasher" }
|
keccak-hasher = { path = "../../util/keccak-hasher" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
num_cpus = "1.10.1"
|
num_cpus = "1.10.1"
|
||||||
rand = "0.7"
|
rand = "0.7"
|
||||||
@ -40,7 +40,7 @@ rlp_derive = { path = "../../util/rlp-derive" }
|
|||||||
scopeguard = "1.0.0"
|
scopeguard = "1.0.0"
|
||||||
snappy = { package = "parity-snappy", version ="0.1.0" }
|
snappy = { package = "parity-snappy", version ="0.1.0" }
|
||||||
state-db = { path = "../state-db" }
|
state-db = { path = "../state-db" }
|
||||||
trie-db = "0.16.0"
|
trie-db = "0.18.0"
|
||||||
triehash = { package = "triehash-ethereum", version = "0.2", path = "../../util/triehash-ethereum" }
|
triehash = { package = "triehash-ethereum", version = "0.2", path = "../../util/triehash-ethereum" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
@ -53,7 +53,7 @@ ethabi-contract = "9.0.0"
|
|||||||
ethabi-derive = "9.0.1"
|
ethabi-derive = "9.0.1"
|
||||||
ethcore = { path = "..", features = ["test-helpers"] }
|
ethcore = { path = "..", features = ["test-helpers"] }
|
||||||
ethkey = { path = "../../accounts/ethkey" }
|
ethkey = { path = "../../accounts/ethkey" }
|
||||||
kvdb-rocksdb = "0.2.0"
|
kvdb-rocksdb = "0.3.0"
|
||||||
lazy_static = { version = "1.3" }
|
lazy_static = { version = "1.3" }
|
||||||
spec = { path = "../spec" }
|
spec = { path = "../spec" }
|
||||||
tempdir = "0.3"
|
tempdir = "0.3"
|
||||||
|
@ -23,8 +23,8 @@ hash-db = "0.15.0"
|
|||||||
journaldb = { path = "../../../util/journaldb" }
|
journaldb = { path = "../../../util/journaldb" }
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
keccak-hasher = { path = "../../../util/keccak-hasher" }
|
keccak-hasher = { path = "../../../util/keccak-hasher" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
kvdb-rocksdb = "0.2.0"
|
kvdb-rocksdb = "0.3.0"
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
parking_lot = "0.9"
|
parking_lot = "0.9"
|
||||||
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
||||||
@ -35,7 +35,7 @@ snappy = { package = "parity-snappy", version ="0.1.0" }
|
|||||||
snapshot = { path = "../../snapshot", features = ["test-helpers"] }
|
snapshot = { path = "../../snapshot", features = ["test-helpers"] }
|
||||||
spec = { path = "../../spec" }
|
spec = { path = "../../spec" }
|
||||||
tempdir = "0.3"
|
tempdir = "0.3"
|
||||||
trie-db = "0.16.0"
|
trie-db = "0.18.0"
|
||||||
trie-standardmap = "0.15.0"
|
trie-standardmap = "0.15.0"
|
||||||
ethabi = "9.0.1"
|
ethabi = "9.0.1"
|
||||||
ethabi-contract = "9.0.0"
|
ethabi-contract = "9.0.0"
|
||||||
|
@ -27,7 +27,6 @@ use ethcore::test_helpers::get_temp_state_db;
|
|||||||
use ethereum_types::{H256, Address};
|
use ethereum_types::{H256, Address};
|
||||||
use hash_db::{HashDB, EMPTY_PREFIX};
|
use hash_db::{HashDB, EMPTY_PREFIX};
|
||||||
use keccak_hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak};
|
use keccak_hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak};
|
||||||
use kvdb::DBValue;
|
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use rlp::Rlp;
|
use rlp::Rlp;
|
||||||
use snapshot::test_helpers::{ACC_EMPTY, to_fat_rlps, from_fat_rlp};
|
use snapshot::test_helpers::{ACC_EMPTY, to_fat_rlps, from_fat_rlp};
|
||||||
@ -151,7 +150,7 @@ fn encoding_code() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
let mut acct_db = AccountDBMut::from_hash(db.as_hash_db_mut(), keccak(addr2));
|
let mut acct_db = AccountDBMut::from_hash(db.as_hash_db_mut(), keccak(addr2));
|
||||||
acct_db.emplace(code_hash.clone(), EMPTY_PREFIX, DBValue::from_slice(b"this is definitely code"));
|
acct_db.emplace(code_hash.clone(), EMPTY_PREFIX, b"this is definitely code".to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
let account1 = BasicAccount {
|
let account1 = BasicAccount {
|
||||||
|
@ -91,7 +91,7 @@ impl StateProducer {
|
|||||||
let mut account: BasicAccount = rlp::decode(&*account_data).expect("error decoding basic account");
|
let mut account: BasicAccount = rlp::decode(&*account_data).expect("error decoding basic account");
|
||||||
let acct_db = AccountDBMut::from_hash(db, *address_hash);
|
let acct_db = AccountDBMut::from_hash(db, *address_hash);
|
||||||
fill_storage(acct_db, &mut account.storage_root, &mut self.storage_seed);
|
fill_storage(acct_db, &mut account.storage_root, &mut self.storage_seed);
|
||||||
*account_data = DBValue::from_vec(rlp::encode(&account));
|
*account_data = rlp::encode(&account);
|
||||||
}
|
}
|
||||||
|
|
||||||
// sweep again to alter account trie.
|
// sweep again to alter account trie.
|
||||||
|
@ -421,7 +421,7 @@ impl StateRebuilder {
|
|||||||
for (code_hash, code, first_with) in status.new_code {
|
for (code_hash, code, first_with) in status.new_code {
|
||||||
for addr_hash in self.missing_code.remove(&code_hash).unwrap_or_else(Vec::new) {
|
for addr_hash in self.missing_code.remove(&code_hash).unwrap_or_else(Vec::new) {
|
||||||
let mut db = AccountDBMut::from_hash(self.db.as_hash_db_mut(), addr_hash);
|
let mut db = AccountDBMut::from_hash(self.db.as_hash_db_mut(), addr_hash);
|
||||||
db.emplace(code_hash, hash_db::EMPTY_PREFIX, DBValue::from_slice(&code));
|
db.emplace(code_hash, hash_db::EMPTY_PREFIX, code.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
self.known_code.insert(code_hash, first_with);
|
self.known_code.insert(code_hash, first_with);
|
||||||
|
@ -25,7 +25,7 @@ hash-db = "0.15.0"
|
|||||||
instant-seal = { path = "../engines/instant-seal" }
|
instant-seal = { path = "../engines/instant-seal" }
|
||||||
journaldb = { path = "../../util/journaldb" }
|
journaldb = { path = "../../util/journaldb" }
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
kvdb-memorydb = "0.1.2"
|
kvdb-memorydb = "0.2.0"
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
machine = { path = "../machine" }
|
machine = { path = "../machine" }
|
||||||
null-engine = { path = "../engines/null-engine" }
|
null-engine = { path = "../engines/null-engine" }
|
||||||
|
@ -510,9 +510,9 @@ impl Spec {
|
|||||||
|
|
||||||
let factories = Default::default();
|
let factories = Default::default();
|
||||||
let mut db = journaldb::new(
|
let mut db = journaldb::new(
|
||||||
Arc::new(kvdb_memorydb::create(0)),
|
Arc::new(kvdb_memorydb::create(1)),
|
||||||
journaldb::Algorithm::Archive,
|
journaldb::Algorithm::Archive,
|
||||||
None,
|
0,
|
||||||
);
|
);
|
||||||
|
|
||||||
self.ensure_db_good(BasicBackend(db.as_hash_db_mut()), &factories)
|
self.ensure_db_good(BasicBackend(db.as_hash_db_mut()), &factories)
|
||||||
@ -540,18 +540,14 @@ impl Spec {
|
|||||||
data: d,
|
data: d,
|
||||||
}.fake_sign(from);
|
}.fake_sign(from);
|
||||||
|
|
||||||
let res = executive_state::prove_transaction_virtual(
|
executive_state::prove_transaction_virtual(
|
||||||
db.as_hash_db_mut(),
|
db.as_hash_db_mut(),
|
||||||
*genesis.state_root(),
|
*genesis.state_root(),
|
||||||
&tx,
|
&tx,
|
||||||
self.engine.machine(),
|
self.engine.machine(),
|
||||||
&env_info,
|
&env_info,
|
||||||
factories.clone(),
|
factories.clone(),
|
||||||
);
|
).ok_or_else(|| "Failed to prove call: insufficient state".into())
|
||||||
|
|
||||||
res.map(|(out, proof)| {
|
|
||||||
(out, proof.into_iter().map(|x| x.into_vec()).collect())
|
|
||||||
}).ok_or_else(|| "Failed to prove call: insufficient state".into())
|
|
||||||
};
|
};
|
||||||
|
|
||||||
self.engine.genesis_epoch_data(&genesis, &call)
|
self.engine.genesis_epoch_data(&genesis, &call)
|
||||||
|
@ -647,15 +647,13 @@ impl Importer {
|
|||||||
let res = Executive::new(&mut state, &env_info, &machine, &schedule)
|
let res = Executive::new(&mut state, &env_info, &machine, &schedule)
|
||||||
.transact(&transaction, options);
|
.transact(&transaction, options);
|
||||||
|
|
||||||
let res = match res {
|
match res {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
trace!(target: "client", "Proved call failed: {}", e);
|
trace!(target: "client", "Proved call failed: {}", e);
|
||||||
Err(e.to_string())
|
Err(e.to_string())
|
||||||
}
|
}
|
||||||
Ok(res) => Ok((res.output, state.drop().1.extract_proof())),
|
Ok(res) => Ok((res.output, state.drop().1.extract_proof())),
|
||||||
};
|
}
|
||||||
|
|
||||||
res.map(|(output, proof)| (output, proof.into_iter().map(|x| x.into_vec()).collect()))
|
|
||||||
};
|
};
|
||||||
|
|
||||||
match with_state.generate_proof(&call) {
|
match with_state.generate_proof(&call) {
|
||||||
|
@ -165,7 +165,7 @@ impl<'a> EvmTestClient<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn state_from_spec(spec: &'a spec::Spec, factories: &Factories) -> Result<State<state_db::StateDB>, EvmTestError> {
|
fn state_from_spec(spec: &'a spec::Spec, factories: &Factories) -> Result<State<state_db::StateDB>, EvmTestError> {
|
||||||
let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
|
let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS));
|
||||||
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
|
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
|
||||||
let mut state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
|
let mut state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
|
||||||
state_db = spec.ensure_db_good(state_db, factories)?;
|
state_db = spec.ensure_db_good(state_db, factories)?;
|
||||||
@ -187,7 +187,7 @@ impl<'a> EvmTestClient<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn state_from_pod(spec: &'a spec::Spec, factories: &Factories, pod_state: PodState) -> Result<State<state_db::StateDB>, EvmTestError> {
|
fn state_from_pod(spec: &'a spec::Spec, factories: &Factories, pod_state: PodState) -> Result<State<state_db::StateDB>, EvmTestError> {
|
||||||
let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
|
let db = Arc::new(kvdb_memorydb::create(db::NUM_COLUMNS));
|
||||||
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
|
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
|
||||||
let state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
|
let state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
|
||||||
let mut state = State::new(
|
let mut state = State::new(
|
||||||
|
@ -312,7 +312,7 @@ pub fn new_db() -> Arc<dyn BlockChainDB> {
|
|||||||
trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(),
|
trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(),
|
||||||
_blooms_dir: blooms_dir,
|
_blooms_dir: blooms_dir,
|
||||||
_trace_blooms_dir: trace_blooms_dir,
|
_trace_blooms_dir: trace_blooms_dir,
|
||||||
key_value: Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap()))
|
key_value: Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS))
|
||||||
};
|
};
|
||||||
|
|
||||||
Arc::new(db)
|
Arc::new(db)
|
||||||
|
@ -399,7 +399,7 @@ impl TestBlockChainClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_temp_state_db() -> StateDB {
|
pub fn get_temp_state_db() -> StateDB {
|
||||||
let db = kvdb_memorydb::create(NUM_COLUMNS.unwrap_or(0));
|
let db = kvdb_memorydb::create(NUM_COLUMNS);
|
||||||
let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE);
|
let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE);
|
||||||
StateDB::new(journal_db, 1024 * 1024)
|
StateDB::new(journal_db, 1024 * 1024)
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@ hash-db = "0.15.0"
|
|||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
keccak-hasher = { path = "../../util/keccak-hasher" }
|
keccak-hasher = { path = "../../util/keccak-hasher" }
|
||||||
journaldb = { path = "../../util/journaldb" }
|
journaldb = { path = "../../util/journaldb" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
log = "0.4.6"
|
log = "0.4.6"
|
||||||
lru-cache = "0.1.2"
|
lru-cache = "0.1.2"
|
||||||
memory-cache = { path = "../../util/memory-cache" }
|
memory-cache = { path = "../../util/memory-cache" }
|
||||||
|
@ -27,7 +27,7 @@ macros = { path = "../../util/macros" }
|
|||||||
network = { package = "ethcore-network", path = "../../util/network" }
|
network = { package = "ethcore-network", path = "../../util/network" }
|
||||||
parity-runtime = { path = "../../util/runtime" }
|
parity-runtime = { path = "../../util/runtime" }
|
||||||
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
rand = "0.7"
|
rand = "0.7"
|
||||||
parking_lot = "0.9"
|
parking_lot = "0.9"
|
||||||
rlp = "0.4.0"
|
rlp = "0.4.0"
|
||||||
@ -40,7 +40,7 @@ env_logger = "0.5"
|
|||||||
engine = { path = "../engine" }
|
engine = { path = "../engine" }
|
||||||
ethcore = { path = "..", features = ["test-helpers"] }
|
ethcore = { path = "..", features = ["test-helpers"] }
|
||||||
ethcore-io = { path = "../../util/io", features = ["mio"] }
|
ethcore-io = { path = "../../util/io", features = ["mio"] }
|
||||||
kvdb-memorydb = "0.1.2"
|
kvdb-memorydb = "0.2.0"
|
||||||
machine = { path = "../machine" }
|
machine = { path = "../machine" }
|
||||||
rand_xorshift = "0.2"
|
rand_xorshift = "0.2"
|
||||||
rustc-hex = "1.0"
|
rustc-hex = "1.0"
|
||||||
|
@ -235,11 +235,11 @@ impl TestNet<Peer> {
|
|||||||
// skip full verification because the blocks are bad.
|
// skip full verification because the blocks are bad.
|
||||||
config.verify_full = false;
|
config.verify_full = false;
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));
|
||||||
let db = kvdb_memorydb::create(0);
|
let db = kvdb_memorydb::create(1);
|
||||||
let client = LightClient::new(
|
let client = LightClient::new(
|
||||||
config,
|
config,
|
||||||
Arc::new(db),
|
Arc::new(db),
|
||||||
None,
|
0,
|
||||||
&spec::new_test(),
|
&spec::new_test(),
|
||||||
fetch::unavailable(), // TODO: allow fetch from full nodes.
|
fetch::unavailable(), // TODO: allow fetch from full nodes.
|
||||||
IoChannel::disconnected(),
|
IoChannel::disconnected(),
|
||||||
|
@ -11,10 +11,10 @@ ethcore-blockchain = { path = "../blockchain" }
|
|||||||
ethcore-db = { path = "../db" }
|
ethcore-db = { path = "../db" }
|
||||||
ethereum-types = "0.8.0"
|
ethereum-types = "0.8.0"
|
||||||
evm = { path = "../evm" }
|
evm = { path = "../evm" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
parity-bytes = "0.1.0"
|
parity-bytes = "0.1.0"
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
parking_lot = "0.9"
|
parking_lot = "0.9"
|
||||||
rlp = "0.4.0"
|
rlp = "0.4.0"
|
||||||
rlp_derive = { path = "../../util/rlp-derive" }
|
rlp_derive = { path = "../../util/rlp-derive" }
|
||||||
|
@ -6,7 +6,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
trie-db = "0.16.0"
|
trie-db = "0.18.0"
|
||||||
ethtrie = { package = "patricia-trie-ethereum", path = "../../util/patricia-trie-ethereum" }
|
ethtrie = { package = "patricia-trie-ethereum", path = "../../util/patricia-trie-ethereum" }
|
||||||
account-db = { path = "../account-db" }
|
account-db = { path = "../account-db" }
|
||||||
evm = { path = "../evm" }
|
evm = { path = "../evm" }
|
||||||
|
@ -13,7 +13,7 @@ ethjson = { path = "../../json" }
|
|||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
parity-bytes = "0.1"
|
parity-bytes = "0.1"
|
||||||
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
parity-snappy = "0.1"
|
parity-snappy = "0.1"
|
||||||
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
|
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
|
||||||
rlp = "0.4.0"
|
rlp = "0.4.0"
|
||||||
|
@ -24,7 +24,7 @@ len-caching-lock = { path = "../../util/len-caching-lock" }
|
|||||||
log = "0.4"
|
log = "0.4"
|
||||||
num_cpus = "1.2"
|
num_cpus = "1.2"
|
||||||
parity-bytes = "0.1.0"
|
parity-bytes = "0.1.0"
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
parking_lot = "0.9"
|
parking_lot = "0.9"
|
||||||
rlp = "0.4.2"
|
rlp = "0.4.2"
|
||||||
time-utils = { path = "../../util/time-utils" }
|
time-utils = { path = "../../util/time-utils" }
|
||||||
|
@ -22,7 +22,7 @@ ethabi-contract = "9.0.0"
|
|||||||
ethcore-call-contract = { path = "../ethcore/call-contract" }
|
ethcore-call-contract = { path = "../ethcore/call-contract" }
|
||||||
ethereum-types = "0.8.0"
|
ethereum-types = "0.8.0"
|
||||||
futures = "0.1"
|
futures = "0.1"
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
linked-hash-map = "0.5"
|
linked-hash-map = "0.5"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
|
@ -8,7 +8,7 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
common-types = { path = "../../ethcore/types" }
|
common-types = { path = "../../ethcore/types" }
|
||||||
ethcore-io = { path = "../../util/io" }
|
ethcore-io = { path = "../../util/io" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
rlp = "0.4.0"
|
rlp = "0.4.0"
|
||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
@ -18,4 +18,4 @@ serde_json = "1.0"
|
|||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
ethkey = { path = "../../accounts/ethkey" }
|
ethkey = { path = "../../accounts/ethkey" }
|
||||||
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
parity-crypto = { version = "0.4.2", features = ["publickey"] }
|
||||||
kvdb-memorydb = "0.1.2"
|
kvdb-memorydb = "0.2.0"
|
||||||
|
@ -107,7 +107,7 @@ pub trait NodeInfo: Send + Sync {
|
|||||||
|
|
||||||
/// Create a new local data store, given a database, a column to write to, and a node.
|
/// Create a new local data store, given a database, a column to write to, and a node.
|
||||||
/// Attempts to read data out of the store, and move it into the node.
|
/// Attempts to read data out of the store, and move it into the node.
|
||||||
pub fn create<T: NodeInfo>(db: Arc<dyn KeyValueDB>, col: Option<u32>, node: T) -> LocalDataStore<T> {
|
pub fn create<T: NodeInfo>(db: Arc<dyn KeyValueDB>, col: u32, node: T) -> LocalDataStore<T> {
|
||||||
LocalDataStore {
|
LocalDataStore {
|
||||||
db,
|
db,
|
||||||
col,
|
col,
|
||||||
@ -121,7 +121,7 @@ pub fn create<T: NodeInfo>(db: Arc<dyn KeyValueDB>, col: Option<u32>, node: T) -
|
|||||||
/// and the node security level.
|
/// and the node security level.
|
||||||
pub struct LocalDataStore<T: NodeInfo> {
|
pub struct LocalDataStore<T: NodeInfo> {
|
||||||
db: Arc<dyn KeyValueDB>,
|
db: Arc<dyn KeyValueDB>,
|
||||||
col: Option<u32>,
|
col: u32,
|
||||||
node: T,
|
node: T,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,15 +214,15 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn twice_empty() {
|
fn twice_empty() {
|
||||||
let db = Arc::new(::kvdb_memorydb::create(0));
|
let db = Arc::new(::kvdb_memorydb::create(1));
|
||||||
|
|
||||||
{
|
{
|
||||||
let store = super::create(db.clone(), None, Dummy(vec![]));
|
let store = super::create(db.clone(), 0, Dummy(vec![]));
|
||||||
assert_eq!(store.pending_transactions().unwrap(), vec![])
|
assert_eq!(store.pending_transactions().unwrap(), vec![])
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let store = super::create(db.clone(), None, Dummy(vec![]));
|
let store = super::create(db.clone(), 0, Dummy(vec![]));
|
||||||
assert_eq!(store.pending_transactions().unwrap(), vec![])
|
assert_eq!(store.pending_transactions().unwrap(), vec![])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -243,21 +243,21 @@ mod tests {
|
|||||||
PendingTransaction::new(signed, condition)
|
PendingTransaction::new(signed, condition)
|
||||||
}).collect();
|
}).collect();
|
||||||
|
|
||||||
let db = Arc::new(::kvdb_memorydb::create(0));
|
let db = Arc::new(::kvdb_memorydb::create(1));
|
||||||
|
|
||||||
{
|
{
|
||||||
// nothing written yet, will write pending.
|
// nothing written yet, will write pending.
|
||||||
let store = super::create(db.clone(), None, Dummy(transactions.clone()));
|
let store = super::create(db.clone(), 0, Dummy(transactions.clone()));
|
||||||
assert_eq!(store.pending_transactions().unwrap(), vec![])
|
assert_eq!(store.pending_transactions().unwrap(), vec![])
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
// pending written, will write nothing.
|
// pending written, will write nothing.
|
||||||
let store = super::create(db.clone(), None, Dummy(vec![]));
|
let store = super::create(db.clone(), 0, Dummy(vec![]));
|
||||||
assert_eq!(store.pending_transactions().unwrap(), transactions)
|
assert_eq!(store.pending_transactions().unwrap(), transactions)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
// pending removed, will write nothing.
|
// pending removed, will write nothing.
|
||||||
let store = super::create(db.clone(), None, Dummy(vec![]));
|
let store = super::create(db.clone(), 0, Dummy(vec![]));
|
||||||
assert_eq!(store.pending_transactions().unwrap(), vec![])
|
assert_eq!(store.pending_transactions().unwrap(), vec![])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -282,15 +282,15 @@ mod tests {
|
|||||||
PendingTransaction::new(signed, None)
|
PendingTransaction::new(signed, None)
|
||||||
});
|
});
|
||||||
|
|
||||||
let db = Arc::new(::kvdb_memorydb::create(0));
|
let db = Arc::new(::kvdb_memorydb::create(1));
|
||||||
{
|
{
|
||||||
// nothing written, will write bad.
|
// nothing written, will write bad.
|
||||||
let store = super::create(db.clone(), None, Dummy(transactions.clone()));
|
let store = super::create(db.clone(), 0, Dummy(transactions.clone()));
|
||||||
assert_eq!(store.pending_transactions().unwrap(), vec![])
|
assert_eq!(store.pending_transactions().unwrap(), vec![])
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
// try to load transactions. The last transaction, which is invalid, will be skipped.
|
// try to load transactions. The last transaction, which is invalid, will be skipped.
|
||||||
let store = super::create(db.clone(), None, Dummy(vec![]));
|
let store = super::create(db.clone(), 0, Dummy(vec![]));
|
||||||
let loaded = store.pending_transactions().unwrap();
|
let loaded = store.pending_transactions().unwrap();
|
||||||
transactions.pop();
|
transactions.pop();
|
||||||
assert_eq!(loaded, transactions);
|
assert_eq!(loaded, transactions);
|
||||||
|
@ -37,7 +37,7 @@ pub fn migrate_blooms<P: AsRef<Path>>(path: P, config: &DatabaseConfig) -> Resul
|
|||||||
// 3u8 -> ExtrasIndex::BlocksBlooms
|
// 3u8 -> ExtrasIndex::BlocksBlooms
|
||||||
// 0u8 -> level 0
|
// 0u8 -> level 0
|
||||||
let blooms_iterator = db.key_value()
|
let blooms_iterator = db.key_value()
|
||||||
.iter_from_prefix(Some(3), &[3u8, 0u8])
|
.iter_from_prefix(3, &[3u8, 0u8])
|
||||||
.filter(|(key, _)| key.len() == 6)
|
.filter(|(key, _)| key.len() == 6)
|
||||||
.take_while(|(key, _)| {
|
.take_while(|(key, _)| {
|
||||||
key[0] == 3u8 && key[1] == 0u8
|
key[0] == 3u8 && key[1] == 0u8
|
||||||
@ -63,7 +63,7 @@ pub fn migrate_blooms<P: AsRef<Path>>(path: P, config: &DatabaseConfig) -> Resul
|
|||||||
// 1u8 -> TraceDBIndex::BloomGroups
|
// 1u8 -> TraceDBIndex::BloomGroups
|
||||||
// 0u8 -> level 0
|
// 0u8 -> level 0
|
||||||
let trace_blooms_iterator = db.key_value()
|
let trace_blooms_iterator = db.key_value()
|
||||||
.iter_from_prefix(Some(4), &[1u8, 0u8])
|
.iter_from_prefix(4, &[1u8, 0u8])
|
||||||
.filter(|(key, _)| key.len() == 6)
|
.filter(|(key, _)| key.len() == 6)
|
||||||
.take_while(|(key, _)| {
|
.take_while(|(key, _)| {
|
||||||
key[0] == 1u8 && key[1] == 0u8
|
key[0] == 1u8 && key[1] == 0u8
|
||||||
|
@ -29,32 +29,36 @@ pub fn compaction_profile(profile: &DatabaseCompactionProfile, db_path: &Path) -
|
|||||||
|
|
||||||
/// Spreads the `total` (in MiB) memory budget across the db columns.
|
/// Spreads the `total` (in MiB) memory budget across the db columns.
|
||||||
/// If it's `None`, the default memory budget will be used for each column.
|
/// If it's `None`, the default memory budget will be used for each column.
|
||||||
pub fn memory_per_column(total: Option<usize>) -> HashMap<Option<u32>, usize> {
|
/// 90% of the memory budget is assigned to the first column, `col0`, which is where we store the
|
||||||
|
/// state.
|
||||||
|
pub fn memory_per_column(total: Option<usize>) -> HashMap<u32, usize> {
|
||||||
let mut memory_per_column = HashMap::new();
|
let mut memory_per_column = HashMap::new();
|
||||||
if let Some(budget) = total {
|
if let Some(budget) = total {
|
||||||
// spend 90% of the memory budget on the state column, but at least 256 MiB
|
// spend 90% of the memory budget on the state column, but at least 256 MiB
|
||||||
memory_per_column.insert(ethcore_db::COL_STATE, std::cmp::max(budget * 9 / 10, 256));
|
memory_per_column.insert(ethcore_db::COL_STATE, std::cmp::max(budget * 9 / 10, 256));
|
||||||
let num_columns = ethcore_db::NUM_COLUMNS.expect("NUM_COLUMNS is Some; qed");
|
|
||||||
// spread the remaining 10% evenly across columns
|
// spread the remaining 10% evenly across columns
|
||||||
let rest_budget = budget / 10 / (num_columns as usize - 1);
|
let rest_budget = budget / 10 / (ethcore_db::NUM_COLUMNS as usize - 1);
|
||||||
for i in 1..num_columns {
|
|
||||||
|
for i in 1..ethcore_db::NUM_COLUMNS {
|
||||||
// but at least 16 MiB for each column
|
// but at least 16 MiB for each column
|
||||||
memory_per_column.insert(Some(i), std::cmp::max(rest_budget, 16));
|
memory_per_column.insert(i, std::cmp::max(rest_budget, 16));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
memory_per_column
|
memory_per_column
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Spreads the `total` (in MiB) memory budget across the light db columns.
|
/// Spreads the `total` (in MiB) memory budget across the light db columns.
|
||||||
pub fn memory_per_column_light(total: usize) -> HashMap<Option<u32>, usize> {
|
pub fn memory_per_column_light(total: usize) -> HashMap<u32, usize> {
|
||||||
let mut memory_per_column = HashMap::new();
|
let mut memory_per_column = HashMap::new();
|
||||||
let num_columns = ethcore_db::NUM_COLUMNS.expect("NUM_COLUMNS is Some; qed");
|
|
||||||
// spread the memory budget evenly across columns
|
// spread the memory budget evenly across columns
|
||||||
// light client doesn't use the state column
|
// light client doesn't use the state column
|
||||||
let per_column = total / (num_columns as usize - 1);
|
let per_column = total / (ethcore_db::NUM_COLUMNS as usize - 1);
|
||||||
for i in 1..num_columns {
|
|
||||||
|
// Note: `col0` (State) is not used for the light client so setting it to a low value.
|
||||||
|
memory_per_column.insert(0, 1);
|
||||||
|
for i in 1..ethcore_db::NUM_COLUMNS {
|
||||||
// but at least 4 MiB for each column
|
// but at least 4 MiB for each column
|
||||||
memory_per_column.insert(Some(i), std::cmp::max(per_column, 4));
|
memory_per_column.insert(i, std::cmp::max(per_column, 4));
|
||||||
}
|
}
|
||||||
memory_per_column
|
memory_per_column
|
||||||
}
|
}
|
||||||
|
@ -29,24 +29,24 @@ use super::blooms::migrate_blooms;
|
|||||||
/// The migration from v10 to v11.
|
/// The migration from v10 to v11.
|
||||||
/// Adds a column for node info.
|
/// Adds a column for node info.
|
||||||
pub const TO_V11: ChangeColumns = ChangeColumns {
|
pub const TO_V11: ChangeColumns = ChangeColumns {
|
||||||
pre_columns: Some(6),
|
pre_columns: 6,
|
||||||
post_columns: Some(7),
|
post_columns: 7,
|
||||||
version: 11,
|
version: 11,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// The migration from v11 to v12.
|
/// The migration from v11 to v12.
|
||||||
/// Adds a column for light chain storage.
|
/// Adds a column for light chain storage.
|
||||||
pub const TO_V12: ChangeColumns = ChangeColumns {
|
pub const TO_V12: ChangeColumns = ChangeColumns {
|
||||||
pre_columns: Some(7),
|
pre_columns: 7,
|
||||||
post_columns: Some(8),
|
post_columns: 8,
|
||||||
version: 12,
|
version: 12,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// The migration from v12 to v14.
|
/// The migration from v12 to v14.
|
||||||
/// Adds a column for private transactions state storage.
|
/// Adds a column for private transactions state storage.
|
||||||
pub const TO_V14: ChangeColumns = ChangeColumns {
|
pub const TO_V14: ChangeColumns = ChangeColumns {
|
||||||
pre_columns: Some(8),
|
pre_columns: 8,
|
||||||
post_columns: Some(9),
|
post_columns: 9,
|
||||||
version: 14,
|
version: 14,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ fn take_spec_name_override() -> Option<String> {
|
|||||||
|
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
fn global_cleanup() {
|
fn global_cleanup() {
|
||||||
// We need to cleanup all sockets before spawning another Parity process. This makes sure everything is cleaned up.
|
// We need to clean up all sockets before spawning another Parity process. This makes sure everything is cleaned up.
|
||||||
// The loop is required because of internal reference counter for winsock dll. We don't know how many crates we use do
|
// The loop is required because of internal reference counter for winsock dll. We don't know how many crates we use do
|
||||||
// initialize it. There's at least 2 now.
|
// initialize it. There's at least 2 now.
|
||||||
for _ in 0.. 10 {
|
for _ in 0.. 10 {
|
||||||
|
@ -21,8 +21,8 @@ ethkey = { path = "../accounts/ethkey", optional = true }
|
|||||||
futures = "0.1"
|
futures = "0.1"
|
||||||
hyper = { version = "0.12", default-features = false }
|
hyper = { version = "0.12", default-features = false }
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
kvdb-rocksdb = "0.2.0"
|
kvdb-rocksdb = "0.3.0"
|
||||||
lazy_static = "1.0"
|
lazy_static = "1.0"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
parity-bytes = "0.1"
|
parity-bytes = "0.1"
|
||||||
@ -47,7 +47,7 @@ env_logger = "0.5"
|
|||||||
ethkey = { path = "../accounts/ethkey" }
|
ethkey = { path = "../accounts/ethkey" }
|
||||||
ethcore = { path = "../ethcore", features = ["test-helpers"] }
|
ethcore = { path = "../ethcore", features = ["test-helpers"] }
|
||||||
tempdir = "0.3"
|
tempdir = "0.3"
|
||||||
kvdb-rocksdb = "0.2.0"
|
kvdb-rocksdb = "0.3.0"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
accounts = ["ethcore-accounts", "ethkey"]
|
accounts = ["ethcore-accounts", "ethkey"]
|
||||||
|
@ -34,7 +34,7 @@ const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checke
|
|||||||
|
|
||||||
/// ACL storage of Secret Store
|
/// ACL storage of Secret Store
|
||||||
pub trait AclStorage: Send + Sync {
|
pub trait AclStorage: Send + Sync {
|
||||||
/// Check if requestor can access document with hash `document`
|
/// Check if requester can access document with hash `document`
|
||||||
fn check(&self, requester: Address, document: &ServerKeyId) -> Result<bool, Error>;
|
fn check(&self, requester: Address, document: &ServerKeyId) -> Result<bool, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,7 +132,7 @@ impl CachedContract {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DummyAclStorage {
|
impl DummyAclStorage {
|
||||||
/// Prohibit given requestor access to given documents
|
/// Prohibit given requester access to given documents
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn prohibit(&self, requester: Address, document: ServerKeyId) {
|
pub fn prohibit(&self, requester: Address, document: ServerKeyId) {
|
||||||
self.prohibited.write()
|
self.prohibited.write()
|
||||||
|
@ -120,7 +120,7 @@ impl KeyStorage for PersistentKeyStorage {
|
|||||||
let key: SerializableDocumentKeyShareV3 = key.into();
|
let key: SerializableDocumentKeyShareV3 = key.into();
|
||||||
let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?;
|
let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?;
|
||||||
let mut batch = self.db.transaction();
|
let mut batch = self.db.transaction();
|
||||||
batch.put(Some(0), document.as_bytes(), &key);
|
batch.put(0, document.as_bytes(), &key);
|
||||||
self.db.write(batch).map_err(Into::into)
|
self.db.write(batch).map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,7 +129,7 @@ impl KeyStorage for PersistentKeyStorage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get(&self, document: &ServerKeyId) -> Result<Option<DocumentKeyShare>, Error> {
|
fn get(&self, document: &ServerKeyId) -> Result<Option<DocumentKeyShare>, Error> {
|
||||||
self.db.get(Some(0), document.as_bytes())
|
self.db.get(0, document.as_bytes())
|
||||||
.map_err(|e| Error::Database(e.to_string()))
|
.map_err(|e| Error::Database(e.to_string()))
|
||||||
.and_then(|key| match key {
|
.and_then(|key| match key {
|
||||||
None => Ok(None),
|
None => Ok(None),
|
||||||
@ -142,28 +142,28 @@ impl KeyStorage for PersistentKeyStorage {
|
|||||||
|
|
||||||
fn remove(&self, document: &ServerKeyId) -> Result<(), Error> {
|
fn remove(&self, document: &ServerKeyId) -> Result<(), Error> {
|
||||||
let mut batch = self.db.transaction();
|
let mut batch = self.db.transaction();
|
||||||
batch.delete(Some(0), document.as_bytes());
|
batch.delete(0, document.as_bytes());
|
||||||
self.db.write(batch).map_err(Into::into)
|
self.db.write(batch).map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clear(&self) -> Result<(), Error> {
|
fn clear(&self) -> Result<(), Error> {
|
||||||
let mut batch = self.db.transaction();
|
let mut batch = self.db.transaction();
|
||||||
for (key, _) in self.iter() {
|
for (key, _) in self.iter() {
|
||||||
batch.delete(Some(0), key.as_bytes());
|
batch.delete(0, key.as_bytes());
|
||||||
}
|
}
|
||||||
self.db.write(batch)
|
self.db.write(batch)
|
||||||
.map_err(|e| Error::Database(e.to_string()))
|
.map_err(|e| Error::Database(e.to_string()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn contains(&self, document: &ServerKeyId) -> bool {
|
fn contains(&self, document: &ServerKeyId) -> bool {
|
||||||
self.db.get(Some(0), document.as_bytes())
|
self.db.get(0, document.as_bytes())
|
||||||
.map(|k| k.is_some())
|
.map(|k| k.is_some())
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a> {
|
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item=(ServerKeyId, DocumentKeyShare)> + 'a> {
|
||||||
Box::new(PersistentKeyStorageIterator {
|
Box::new(PersistentKeyStorageIterator {
|
||||||
iter: self.db.iter(Some(0)),
|
iter: self.db.iter(0),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -350,7 +350,7 @@ pub mod tests {
|
|||||||
};
|
};
|
||||||
let key3 = ServerKeyId::from_low_u64_be(3);
|
let key3 = ServerKeyId::from_low_u64_be(3);
|
||||||
|
|
||||||
let db_config = DatabaseConfig::with_columns(Some(1));
|
let db_config = DatabaseConfig::with_columns(1);
|
||||||
let db = Database::open(&db_config, &tempdir.path().display().to_string()).unwrap();
|
let db = Database::open(&db_config, &tempdir.path().display().to_string()).unwrap();
|
||||||
|
|
||||||
let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap();
|
let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap();
|
||||||
|
@ -104,7 +104,7 @@ pub fn open_secretstore_db(data_path: &str) -> Result<Arc<dyn KeyValueDB>, Strin
|
|||||||
db_path.push("db");
|
db_path.push("db");
|
||||||
let db_path = db_path.to_str().ok_or_else(|| "Invalid secretstore path".to_string())?;
|
let db_path = db_path.to_str().ok_or_else(|| "Invalid secretstore path".to_string())?;
|
||||||
|
|
||||||
let config = DatabaseConfig::with_columns(Some(1));
|
let config = DatabaseConfig::with_columns(1);
|
||||||
Ok(Arc::new(Database::open(&config, &db_path).map_err(|e| format!("Error opening database: {:?}", e))?))
|
Ok(Arc::new(Database::open(&config, &db_path).map_err(|e| format!("Error opening database: {:?}", e))?))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,14 +19,9 @@
|
|||||||
|
|
||||||
use std::fmt::{Display, Error as FmtError, Formatter};
|
use std::fmt::{Display, Error as FmtError, Formatter};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::io::{Error as IoError, ErrorKind as IoErrorKind, Read as _, Write as _};
|
use std::io::{Error as IoError, ErrorKind as IoErrorKind, Read as _};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use kvdb::DBTransaction;
|
|
||||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
|
||||||
|
|
||||||
/// We used to store the version in the database (until version 4).
|
|
||||||
const LEGACY_DB_META_KEY_VERSION: &[u8; 7] = b"version";
|
|
||||||
/// Current db version.
|
/// Current db version.
|
||||||
const CURRENT_VERSION: u8 = 4;
|
const CURRENT_VERSION: u8 = 4;
|
||||||
/// Database is assumed to be at the default version, when no version file is found.
|
/// Database is assumed to be at the default version, when no version file is found.
|
||||||
@ -34,14 +29,16 @@ const DEFAULT_VERSION: u8 = 3;
|
|||||||
/// Version file name.
|
/// Version file name.
|
||||||
const VERSION_FILE_NAME: &str = "db_version";
|
const VERSION_FILE_NAME: &str = "db_version";
|
||||||
|
|
||||||
/// Migration related erorrs.
|
/// Migration related errors.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
/// Returned when current version cannot be read or guessed.
|
/// Returned when current version cannot be read or guessed.
|
||||||
UnknownDatabaseVersion,
|
UnknownDatabaseVersion,
|
||||||
/// Existing DB is newer than the known one.
|
/// Existing DB is newer than the known one.
|
||||||
FutureDBVersion,
|
FutureDBVersion,
|
||||||
/// Migration was completed succesfully,
|
/// Migration using parity-ethereum 2.6.7 is required.
|
||||||
|
MigrationWithLegacyVersionRequired,
|
||||||
|
/// Migration was completed successfully,
|
||||||
/// but there was a problem with io.
|
/// but there was a problem with io.
|
||||||
Io(IoError),
|
Io(IoError),
|
||||||
}
|
}
|
||||||
@ -54,6 +51,9 @@ impl Display for Error {
|
|||||||
Error::FutureDBVersion =>
|
Error::FutureDBVersion =>
|
||||||
"Secret Store database was created with newer client version.\
|
"Secret Store database was created with newer client version.\
|
||||||
Upgrade your client or delete DB and resync.".into(),
|
Upgrade your client or delete DB and resync.".into(),
|
||||||
|
Error::MigrationWithLegacyVersionRequired =>
|
||||||
|
"Secret Store database was created with an older client version.\
|
||||||
|
To migrate, use parity-ethereum v2.6.7, then retry using the latest.".into(),
|
||||||
Error::Io(ref err) =>
|
Error::Io(ref err) =>
|
||||||
format!("Unexpected io error on Secret Store database migration: {}.", err),
|
format!("Unexpected io error on Secret Store database migration: {}.", err),
|
||||||
};
|
};
|
||||||
@ -67,75 +67,17 @@ impl From<IoError> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Moves "default" column to column 0 in preparation for a kvdb-rocksdb 0.3 migration.
|
|
||||||
fn migrate_to_v4(parent_dir: &str) -> Result<(), Error> {
|
|
||||||
// Naïve implementation until
|
|
||||||
// https://github.com/facebook/rocksdb/issues/6130 is resolved
|
|
||||||
let old_db_config = DatabaseConfig::with_columns(Some(1));
|
|
||||||
let new_db_config = DatabaseConfig::with_columns(Some(1));
|
|
||||||
const BATCH_SIZE: usize = 1024;
|
|
||||||
|
|
||||||
let old_dir = db_dir(parent_dir);
|
|
||||||
let new_dir = migration_dir(parent_dir);
|
|
||||||
let old_db = Database::open(&old_db_config, &old_dir)?;
|
|
||||||
let new_db = Database::open(&new_db_config, &new_dir)?;
|
|
||||||
|
|
||||||
const OLD_COLUMN: Option<u32> = None;
|
|
||||||
const NEW_COLUMN: Option<u32> = Some(0);
|
|
||||||
|
|
||||||
// remove legacy version key
|
|
||||||
{
|
|
||||||
let mut batch = DBTransaction::with_capacity(1);
|
|
||||||
batch.delete(OLD_COLUMN, LEGACY_DB_META_KEY_VERSION);
|
|
||||||
if let Err(err) = old_db.write(batch) {
|
|
||||||
error!(target: "migration", "Failed to delete db version {}", &err);
|
|
||||||
return Err(err.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut batch = DBTransaction::with_capacity(BATCH_SIZE);
|
|
||||||
for (i, (key, value)) in old_db.iter(OLD_COLUMN).enumerate() {
|
|
||||||
batch.put(NEW_COLUMN, &key, &value);
|
|
||||||
if i % BATCH_SIZE == 0 {
|
|
||||||
new_db.write(batch)?;
|
|
||||||
batch = DBTransaction::with_capacity(BATCH_SIZE);
|
|
||||||
info!(target: "migration", "Migrating Secret Store DB: {} keys written", i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
new_db.write(batch)?;
|
|
||||||
drop(new_db);
|
|
||||||
old_db.restore(&new_dir)?;
|
|
||||||
|
|
||||||
info!(target: "migration", "Secret Store migration finished");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Apply all migrations if possible.
|
/// Apply all migrations if possible.
|
||||||
pub fn upgrade_db(db_path: &str) -> Result<(), Error> {
|
pub fn upgrade_db(db_path: &str) -> Result<(), Error> {
|
||||||
match current_version(db_path)? {
|
match current_version(db_path)? {
|
||||||
old_version if old_version < CURRENT_VERSION => {
|
old_version if old_version < CURRENT_VERSION => {
|
||||||
migrate_to_v4(db_path)?;
|
Err(Error::MigrationWithLegacyVersionRequired)
|
||||||
update_version(db_path)?;
|
|
||||||
Ok(())
|
|
||||||
},
|
},
|
||||||
CURRENT_VERSION => Ok(()),
|
CURRENT_VERSION => Ok(()),
|
||||||
_ => Err(Error::FutureDBVersion),
|
_ => Err(Error::FutureDBVersion),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn db_dir(path: &str) -> String {
|
|
||||||
let mut dir = PathBuf::from(path);
|
|
||||||
dir.push("db");
|
|
||||||
dir.to_string_lossy().to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn migration_dir(path: &str) -> String {
|
|
||||||
let mut dir = PathBuf::from(path);
|
|
||||||
dir.push("migration");
|
|
||||||
dir.to_string_lossy().to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the version file path.
|
/// Returns the version file path.
|
||||||
fn version_file_path(path: &str) -> PathBuf {
|
fn version_file_path(path: &str) -> PathBuf {
|
||||||
let mut file_path = PathBuf::from(path);
|
let mut file_path = PathBuf::from(path);
|
||||||
@ -157,42 +99,3 @@ fn current_version(path: &str) -> Result<u8, Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Writes current database version to the file.
|
|
||||||
/// Creates a new file if the version file does not exist yet.
|
|
||||||
fn update_version(path: &str) -> Result<(), Error> {
|
|
||||||
let mut file = fs::File::create(version_file_path(path))?;
|
|
||||||
file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use tempdir::TempDir;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn migration_works() -> Result<(), Error> {
|
|
||||||
let parent = TempDir::new("secret_store_migration")?.into_path();
|
|
||||||
|
|
||||||
let mut db_path = parent.clone();
|
|
||||||
db_path.push("db");
|
|
||||||
let db_path = db_path.to_str().unwrap();
|
|
||||||
let parent_path = parent.to_str().unwrap();
|
|
||||||
|
|
||||||
let old_db = Database::open(&DatabaseConfig::with_columns(None), db_path)?;
|
|
||||||
|
|
||||||
let mut batch = old_db.transaction();
|
|
||||||
batch.put(None, b"key1", b"value1");
|
|
||||||
batch.put(None, b"key2", b"value2");
|
|
||||||
old_db.write(batch)?;
|
|
||||||
drop(old_db);
|
|
||||||
|
|
||||||
upgrade_db(parent_path)?;
|
|
||||||
let migrated = Database::open(&DatabaseConfig::with_columns(Some(1)), db_path)?;
|
|
||||||
|
|
||||||
assert_eq!(migrated.get(Some(0), b"key1")?.expect("key1"), b"value1".to_vec());
|
|
||||||
assert_eq!(migrated.get(Some(0), b"key2")?.expect("key2"), b"value2".to_vec());
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -10,11 +10,11 @@ edition = "2018"
|
|||||||
parity-bytes = "0.1"
|
parity-bytes = "0.1"
|
||||||
ethereum-types = "0.8.0"
|
ethereum-types = "0.8.0"
|
||||||
hash-db = "0.15.0"
|
hash-db = "0.15.0"
|
||||||
malloc_size_of = { version = "0.2", package = "parity-util-mem" }
|
malloc_size_of = { version = "0.3.0", package = "parity-util-mem" }
|
||||||
keccak-hasher = { path = "../keccak-hasher" }
|
keccak-hasher = { path = "../keccak-hasher" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
memory-db = "0.15.0"
|
memory-db = "0.18.0"
|
||||||
parking_lot = "0.9"
|
parking_lot = "0.9"
|
||||||
fastmap = { path = "../../util/fastmap" }
|
fastmap = { path = "../../util/fastmap" }
|
||||||
rlp = "0.4.0"
|
rlp = "0.4.0"
|
||||||
@ -22,4 +22,4 @@ rlp = "0.4.0"
|
|||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
env_logger = "0.5"
|
env_logger = "0.5"
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
kvdb-memorydb = "0.1.2"
|
kvdb-memorydb = "0.2.0"
|
||||||
|
@ -46,12 +46,12 @@ pub struct ArchiveDB {
|
|||||||
overlay: super::MemoryDB,
|
overlay: super::MemoryDB,
|
||||||
backing: Arc<dyn KeyValueDB>,
|
backing: Arc<dyn KeyValueDB>,
|
||||||
latest_era: Option<u64>,
|
latest_era: Option<u64>,
|
||||||
column: Option<u32>,
|
column: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ArchiveDB {
|
impl ArchiveDB {
|
||||||
/// Create a new instance from a key-value db.
|
/// Create a new instance from a key-value db.
|
||||||
pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> ArchiveDB {
|
pub fn new(backing: Arc<dyn KeyValueDB>, column: u32) -> ArchiveDB {
|
||||||
let latest_era = backing.get(column, &LATEST_ERA_KEY)
|
let latest_era = backing.get(column, &LATEST_ERA_KEY)
|
||||||
.expect("Low-level database error.")
|
.expect("Low-level database error.")
|
||||||
.map(|val| decode::<u64>(&val).expect("decoding db value failed"));
|
.map(|val| decode::<u64>(&val).expect("decoding db value failed"));
|
||||||
@ -108,7 +108,7 @@ impl JournalDB for ArchiveDB {
|
|||||||
|
|
||||||
fn mem_used(&self) -> usize {
|
fn mem_used(&self) -> usize {
|
||||||
self.overlay.malloc_size_of()
|
self.overlay.malloc_size_of()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_empty(&self) -> bool {
|
fn is_empty(&self) -> bool {
|
||||||
self.latest_era.is_none()
|
self.latest_era.is_none()
|
||||||
@ -214,7 +214,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn insert_same_in_fork() {
|
fn insert_same_in_fork() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
|
||||||
|
|
||||||
let x = jdb.insert(EMPTY_PREFIX, b"X");
|
let x = jdb.insert(EMPTY_PREFIX, b"X");
|
||||||
commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap();
|
commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap();
|
||||||
@ -236,7 +236,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn long_history() {
|
fn long_history() {
|
||||||
// history is 3
|
// history is 3
|
||||||
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
|
||||||
let h = jdb.insert(EMPTY_PREFIX, b"foo");
|
let h = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.contains(&h, EMPTY_PREFIX));
|
assert!(jdb.contains(&h, EMPTY_PREFIX));
|
||||||
@ -254,7 +254,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn multiple_owed_removal_not_allowed() {
|
fn multiple_owed_removal_not_allowed() {
|
||||||
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
|
||||||
let h = jdb.insert(EMPTY_PREFIX, b"foo");
|
let h = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.contains(&h, EMPTY_PREFIX));
|
assert!(jdb.contains(&h, EMPTY_PREFIX));
|
||||||
@ -268,7 +268,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn complex() {
|
fn complex() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
|
||||||
|
|
||||||
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
|
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
|
||||||
@ -300,7 +300,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn fork() {
|
fn fork() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
|
||||||
|
|
||||||
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
|
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
|
||||||
@ -326,7 +326,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn overwrite() {
|
fn overwrite() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
|
||||||
|
|
||||||
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
||||||
@ -345,7 +345,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn fork_same_key() {
|
fn fork_same_key() {
|
||||||
// history is 1
|
// history is 1
|
||||||
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
|
||||||
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
||||||
|
|
||||||
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
@ -361,26 +361,26 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen() {
|
fn reopen() {
|
||||||
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
let shared_db = Arc::new(kvdb_memorydb::create(1));
|
||||||
let bar = H256::random();
|
let bar = H256::random();
|
||||||
|
|
||||||
let foo = {
|
let foo = {
|
||||||
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
|
let mut jdb = ArchiveDB::new(shared_db.clone(), 0);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
jdb.emplace(bar.clone(), EMPTY_PREFIX, DBValue::from_slice(b"bar"));
|
jdb.emplace(bar.clone(), EMPTY_PREFIX, b"bar".to_vec());
|
||||||
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
||||||
foo
|
foo
|
||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
|
let mut jdb = ArchiveDB::new(shared_db.clone(), 0);
|
||||||
jdb.remove(&foo, EMPTY_PREFIX);
|
jdb.remove(&foo, EMPTY_PREFIX);
|
||||||
commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = ArchiveDB::new(shared_db, None);
|
let mut jdb = ArchiveDB::new(shared_db, 0);
|
||||||
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
||||||
assert!(jdb.contains(&bar, EMPTY_PREFIX));
|
assert!(jdb.contains(&bar, EMPTY_PREFIX));
|
||||||
commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
|
commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
|
||||||
@ -389,10 +389,10 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen_remove() {
|
fn reopen_remove() {
|
||||||
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
let shared_db = Arc::new(kvdb_memorydb::create(1));
|
||||||
|
|
||||||
let foo = {
|
let foo = {
|
||||||
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
|
let mut jdb = ArchiveDB::new(shared_db.clone(), 0);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
||||||
@ -406,7 +406,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = ArchiveDB::new(shared_db, None);
|
let mut jdb = ArchiveDB::new(shared_db, 0);
|
||||||
jdb.remove(&foo, EMPTY_PREFIX);
|
jdb.remove(&foo, EMPTY_PREFIX);
|
||||||
commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap();
|
commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap();
|
||||||
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
||||||
@ -418,9 +418,9 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen_fork() {
|
fn reopen_fork() {
|
||||||
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
let shared_db = Arc::new(kvdb_memorydb::create(1));
|
||||||
let (foo, _, _) = {
|
let (foo, _, _) = {
|
||||||
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
|
let mut jdb = ArchiveDB::new(shared_db.clone(), 0);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
|
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
|
||||||
@ -435,7 +435,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = ArchiveDB::new(shared_db, None);
|
let mut jdb = ArchiveDB::new(shared_db, 0);
|
||||||
commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
|
commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
|
||||||
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
||||||
}
|
}
|
||||||
@ -443,17 +443,17 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn returns_state() {
|
fn returns_state() {
|
||||||
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
let shared_db = Arc::new(kvdb_memorydb::create(1));
|
||||||
|
|
||||||
let key = {
|
let key = {
|
||||||
let mut jdb = ArchiveDB::new(shared_db.clone(), None);
|
let mut jdb = ArchiveDB::new(shared_db.clone(), 0);
|
||||||
let key = jdb.insert(EMPTY_PREFIX, b"foo");
|
let key = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
||||||
key
|
key
|
||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let jdb = ArchiveDB::new(shared_db, None);
|
let jdb = ArchiveDB::new(shared_db, 0);
|
||||||
let state = jdb.state(&key);
|
let state = jdb.state(&key);
|
||||||
assert!(state.is_some());
|
assert!(state.is_some());
|
||||||
}
|
}
|
||||||
@ -461,11 +461,11 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn inject() {
|
fn inject() {
|
||||||
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None);
|
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0);
|
||||||
let key = jdb.insert(EMPTY_PREFIX, b"dog");
|
let key = jdb.insert(EMPTY_PREFIX, b"dog");
|
||||||
inject_batch(&mut jdb).unwrap();
|
inject_batch(&mut jdb).unwrap();
|
||||||
|
|
||||||
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog"));
|
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec());
|
||||||
jdb.remove(&key, EMPTY_PREFIX);
|
jdb.remove(&key, EMPTY_PREFIX);
|
||||||
inject_batch(&mut jdb).unwrap();
|
inject_batch(&mut jdb).unwrap();
|
||||||
|
|
||||||
|
@ -111,20 +111,20 @@ pub struct EarlyMergeDB {
|
|||||||
backing: Arc<dyn KeyValueDB>,
|
backing: Arc<dyn KeyValueDB>,
|
||||||
refs: Option<Arc<RwLock<HashMap<H256, RefInfo>>>>,
|
refs: Option<Arc<RwLock<HashMap<H256, RefInfo>>>>,
|
||||||
latest_era: Option<u64>,
|
latest_era: Option<u64>,
|
||||||
column: Option<u32>,
|
column: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EarlyMergeDB {
|
impl EarlyMergeDB {
|
||||||
/// Create a new instance from file
|
/// Create a new instance from file
|
||||||
pub fn new(backing: Arc<dyn KeyValueDB>, col: Option<u32>) -> EarlyMergeDB {
|
pub fn new(backing: Arc<dyn KeyValueDB>, column: u32) -> EarlyMergeDB {
|
||||||
let (latest_era, refs) = EarlyMergeDB::read_refs(&*backing, col);
|
let (latest_era, refs) = EarlyMergeDB::read_refs(&*backing, column);
|
||||||
let refs = Some(Arc::new(RwLock::new(refs)));
|
let refs = Some(Arc::new(RwLock::new(refs)));
|
||||||
EarlyMergeDB {
|
EarlyMergeDB {
|
||||||
overlay: new_memory_db(),
|
overlay: new_memory_db(),
|
||||||
backing: backing,
|
backing,
|
||||||
refs: refs,
|
refs,
|
||||||
latest_era: latest_era,
|
latest_era,
|
||||||
column: col,
|
column,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,13 +135,13 @@ impl EarlyMergeDB {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The next three are valid only as long as there is an insert operation of `key` in the journal.
|
// The next three are valid only as long as there is an insert operation of `key` in the journal.
|
||||||
fn set_already_in(batch: &mut DBTransaction, col: Option<u32>, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]); }
|
fn set_already_in(batch: &mut DBTransaction, col: u32, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]); }
|
||||||
fn reset_already_in(batch: &mut DBTransaction, col: Option<u32>, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)); }
|
fn reset_already_in(batch: &mut DBTransaction, col: u32, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)); }
|
||||||
fn is_already_in(backing: &dyn KeyValueDB, col: Option<u32>, key: &H256) -> bool {
|
fn is_already_in(backing: &dyn KeyValueDB, col: u32, key: &H256) -> bool {
|
||||||
backing.get(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
|
backing.get(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert_keys(inserts: &[(H256, DBValue)], backing: &dyn KeyValueDB, col: Option<u32>, refs: &mut HashMap<H256, RefInfo>, batch: &mut DBTransaction) {
|
fn insert_keys(inserts: &[(H256, DBValue)], backing: &dyn KeyValueDB, col: u32, refs: &mut HashMap<H256, RefInfo>, batch: &mut DBTransaction) {
|
||||||
for &(ref h, ref d) in inserts {
|
for &(ref h, ref d) in inserts {
|
||||||
match refs.entry(*h) {
|
match refs.entry(*h) {
|
||||||
Entry::Occupied(mut entry) => {
|
Entry::Occupied(mut entry) => {
|
||||||
@ -174,7 +174,7 @@ impl EarlyMergeDB {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn replay_keys(inserts: &[H256], backing: &dyn KeyValueDB, col: Option<u32>, refs: &mut HashMap<H256, RefInfo>) {
|
fn replay_keys(inserts: &[H256], backing: &dyn KeyValueDB, col: u32, refs: &mut HashMap<H256, RefInfo>) {
|
||||||
trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs);
|
trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs);
|
||||||
for h in inserts {
|
for h in inserts {
|
||||||
match refs.entry(*h) {
|
match refs.entry(*h) {
|
||||||
@ -195,7 +195,7 @@ impl EarlyMergeDB {
|
|||||||
trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs);
|
trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove_keys(deletes: &[H256], refs: &mut HashMap<H256, RefInfo>, batch: &mut DBTransaction, col: Option<u32>, from: RemoveFrom) {
|
fn remove_keys(deletes: &[H256], refs: &mut HashMap<H256, RefInfo>, batch: &mut DBTransaction, col: u32, from: RemoveFrom) {
|
||||||
// with a remove on {queue_refs: 1, in_archive: true}, we have two options:
|
// with a remove on {queue_refs: 1, in_archive: true}, we have two options:
|
||||||
// - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive)
|
// - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive)
|
||||||
// - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue)
|
// - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue)
|
||||||
@ -264,7 +264,7 @@ impl EarlyMergeDB {
|
|||||||
.expect("Low-level database error. Some issue with your hard disk?")
|
.expect("Low-level database error. Some issue with your hard disk?")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_refs(db: &dyn KeyValueDB, col: Option<u32>) -> (Option<u64>, HashMap<H256, RefInfo>) {
|
fn read_refs(db: &dyn KeyValueDB, col: u32) -> (Option<u64>, HashMap<H256, RefInfo>) {
|
||||||
let mut refs = HashMap::new();
|
let mut refs = HashMap::new();
|
||||||
let mut latest_era = None;
|
let mut latest_era = None;
|
||||||
if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") {
|
if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") {
|
||||||
@ -788,34 +788,34 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn new_db() -> EarlyMergeDB {
|
fn new_db() -> EarlyMergeDB {
|
||||||
let backing = Arc::new(kvdb_memorydb::create(0));
|
let backing = Arc::new(kvdb_memorydb::create(1));
|
||||||
EarlyMergeDB::new(backing, None)
|
EarlyMergeDB::new(backing, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen() {
|
fn reopen() {
|
||||||
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
let shared_db = Arc::new(kvdb_memorydb::create(1));
|
||||||
let bar = H256::random();
|
let bar = H256::random();
|
||||||
|
|
||||||
let foo = {
|
let foo = {
|
||||||
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
|
let mut jdb = EarlyMergeDB::new(shared_db.clone(), 0);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
jdb.emplace(bar.clone(), EMPTY_PREFIX, DBValue::from_slice(b"bar"));
|
jdb.emplace(bar.clone(), EMPTY_PREFIX, b"bar".to_vec());
|
||||||
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
foo
|
foo
|
||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
|
let mut jdb = EarlyMergeDB::new(shared_db.clone(), 0);
|
||||||
jdb.remove(&foo, EMPTY_PREFIX);
|
jdb.remove(&foo, EMPTY_PREFIX);
|
||||||
commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = EarlyMergeDB::new(shared_db, None);
|
let mut jdb = EarlyMergeDB::new(shared_db, 0);
|
||||||
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
||||||
assert!(jdb.contains(&bar, EMPTY_PREFIX));
|
assert!(jdb.contains(&bar, EMPTY_PREFIX));
|
||||||
commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
|
commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
|
||||||
@ -964,11 +964,11 @@ mod tests {
|
|||||||
fn reopen_remove_three() {
|
fn reopen_remove_three() {
|
||||||
let _ = ::env_logger::try_init();
|
let _ = ::env_logger::try_init();
|
||||||
|
|
||||||
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
let shared_db = Arc::new(kvdb_memorydb::create(1));
|
||||||
let foo = keccak(b"foo");
|
let foo = keccak(b"foo");
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
|
let mut jdb = EarlyMergeDB::new(shared_db.clone(), 0);
|
||||||
// history is 1
|
// history is 1
|
||||||
jdb.insert(EMPTY_PREFIX, b"foo");
|
jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
||||||
@ -990,7 +990,7 @@ mod tests {
|
|||||||
|
|
||||||
// incantation to reopen the db
|
// incantation to reopen the db
|
||||||
}; {
|
}; {
|
||||||
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
|
let mut jdb = EarlyMergeDB::new(shared_db.clone(), 0);
|
||||||
|
|
||||||
jdb.remove(&foo, EMPTY_PREFIX);
|
jdb.remove(&foo, EMPTY_PREFIX);
|
||||||
commit_batch(&mut jdb, 4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap();
|
commit_batch(&mut jdb, 4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap();
|
||||||
@ -999,7 +999,7 @@ mod tests {
|
|||||||
|
|
||||||
// incantation to reopen the db
|
// incantation to reopen the db
|
||||||
}; {
|
}; {
|
||||||
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
|
let mut jdb = EarlyMergeDB::new(shared_db.clone(), 0);
|
||||||
|
|
||||||
commit_batch(&mut jdb, 5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap();
|
commit_batch(&mut jdb, 5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -1007,7 +1007,7 @@ mod tests {
|
|||||||
|
|
||||||
// incantation to reopen the db
|
// incantation to reopen the db
|
||||||
}; {
|
}; {
|
||||||
let mut jdb = EarlyMergeDB::new(shared_db, None);
|
let mut jdb = EarlyMergeDB::new(shared_db, 0);
|
||||||
|
|
||||||
commit_batch(&mut jdb, 6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap();
|
commit_batch(&mut jdb, 6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -1017,10 +1017,10 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen_fork() {
|
fn reopen_fork() {
|
||||||
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
let shared_db = Arc::new(kvdb_memorydb::create(1));
|
||||||
|
|
||||||
let (foo, bar, baz) = {
|
let (foo, bar, baz) = {
|
||||||
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);
|
let mut jdb = EarlyMergeDB::new(shared_db.clone(), 0);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
|
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
|
||||||
@ -1038,7 +1038,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = EarlyMergeDB::new(shared_db, None);
|
let mut jdb = EarlyMergeDB::new(shared_db, 0);
|
||||||
commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
|
commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
||||||
@ -1053,7 +1053,7 @@ mod tests {
|
|||||||
let key = jdb.insert(EMPTY_PREFIX, b"dog");
|
let key = jdb.insert(EMPTY_PREFIX, b"dog");
|
||||||
inject_batch(&mut jdb).unwrap();
|
inject_batch(&mut jdb).unwrap();
|
||||||
|
|
||||||
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog"));
|
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec());
|
||||||
jdb.remove(&key, EMPTY_PREFIX);
|
jdb.remove(&key, EMPTY_PREFIX);
|
||||||
inject_batch(&mut jdb).unwrap();
|
inject_batch(&mut jdb).unwrap();
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ impl fmt::Display for Algorithm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new `JournalDB` trait object over a generic key-value database.
|
/// Create a new `JournalDB` trait object over a generic key-value database.
|
||||||
pub fn new(backing: Arc<dyn (::kvdb::KeyValueDB)>, algorithm: Algorithm, col: Option<u32>) -> Box<dyn JournalDB> {
|
pub fn new(backing: Arc<dyn (::kvdb::KeyValueDB)>, algorithm: Algorithm, col: u32) -> Box<dyn JournalDB> {
|
||||||
match algorithm {
|
match algorithm {
|
||||||
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(backing, col)),
|
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(backing, col)),
|
||||||
Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(backing, col)),
|
Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(backing, col)),
|
||||||
|
@ -43,7 +43,7 @@ use crate::{error_negatively_reference_hash, new_memory_db};
|
|||||||
pub struct OverlayDB {
|
pub struct OverlayDB {
|
||||||
overlay: super::MemoryDB,
|
overlay: super::MemoryDB,
|
||||||
backing: Arc<dyn KeyValueDB>,
|
backing: Arc<dyn KeyValueDB>,
|
||||||
column: Option<u32>,
|
column: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Payload {
|
struct Payload {
|
||||||
@ -72,7 +72,7 @@ impl Decodable for Payload {
|
|||||||
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
|
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
|
||||||
let payload = Payload {
|
let payload = Payload {
|
||||||
count: rlp.val_at(0)?,
|
count: rlp.val_at(0)?,
|
||||||
value: DBValue::from_slice(rlp.at(1)?.data()?),
|
value: rlp.at(1)?.data()?.to_vec(),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(payload)
|
Ok(payload)
|
||||||
@ -81,7 +81,7 @@ impl Decodable for Payload {
|
|||||||
|
|
||||||
impl OverlayDB {
|
impl OverlayDB {
|
||||||
/// Create a new instance of OverlayDB given a `backing` database.
|
/// Create a new instance of OverlayDB given a `backing` database.
|
||||||
pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> OverlayDB {
|
pub fn new(backing: Arc<dyn KeyValueDB>, column: u32) -> OverlayDB {
|
||||||
OverlayDB {
|
OverlayDB {
|
||||||
overlay: new_memory_db(),
|
overlay: new_memory_db(),
|
||||||
backing,
|
backing,
|
||||||
@ -92,8 +92,8 @@ impl OverlayDB {
|
|||||||
/// Create a new instance of OverlayDB with an anonymous temporary database.
|
/// Create a new instance of OverlayDB with an anonymous temporary database.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn new_temp() -> OverlayDB {
|
pub fn new_temp() -> OverlayDB {
|
||||||
let backing = Arc::new(::kvdb_memorydb::create(0));
|
let backing = Arc::new(::kvdb_memorydb::create(1));
|
||||||
Self::new(backing, None)
|
Self::new(backing, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Commit all operations in a single batch.
|
/// Commit all operations in a single batch.
|
||||||
@ -251,7 +251,7 @@ mod tests {
|
|||||||
fn overlaydb_overlay_insert_and_remove() {
|
fn overlaydb_overlay_insert_and_remove() {
|
||||||
let mut trie = OverlayDB::new_temp();
|
let mut trie = OverlayDB::new_temp();
|
||||||
let h = trie.insert(EMPTY_PREFIX, b"hello world");
|
let h = trie.insert(EMPTY_PREFIX, b"hello world");
|
||||||
assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world"));
|
assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), b"hello world".to_vec());
|
||||||
trie.remove(&h, EMPTY_PREFIX);
|
trie.remove(&h, EMPTY_PREFIX);
|
||||||
assert_eq!(trie.get(&h, EMPTY_PREFIX), None);
|
assert_eq!(trie.get(&h, EMPTY_PREFIX), None);
|
||||||
}
|
}
|
||||||
@ -260,9 +260,9 @@ mod tests {
|
|||||||
fn overlaydb_backing_insert_revert() {
|
fn overlaydb_backing_insert_revert() {
|
||||||
let mut trie = OverlayDB::new_temp();
|
let mut trie = OverlayDB::new_temp();
|
||||||
let h = trie.insert(EMPTY_PREFIX, b"hello world");
|
let h = trie.insert(EMPTY_PREFIX, b"hello world");
|
||||||
assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world"));
|
assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), b"hello world".to_vec());
|
||||||
trie.commit().unwrap();
|
trie.commit().unwrap();
|
||||||
assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world"));
|
assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), b"hello world".to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -300,29 +300,29 @@ mod tests {
|
|||||||
fn overlaydb_complex() {
|
fn overlaydb_complex() {
|
||||||
let mut trie = OverlayDB::new_temp();
|
let mut trie = OverlayDB::new_temp();
|
||||||
let hfoo = trie.insert(EMPTY_PREFIX, b"foo");
|
let hfoo = trie.insert(EMPTY_PREFIX, b"foo");
|
||||||
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
|
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
|
||||||
let hbar = trie.insert(EMPTY_PREFIX, b"bar");
|
let hbar = trie.insert(EMPTY_PREFIX, b"bar");
|
||||||
assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"bar"));
|
assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), b"bar".to_vec());
|
||||||
trie.commit().unwrap();
|
trie.commit().unwrap();
|
||||||
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
|
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
|
||||||
assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"bar"));
|
assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), b"bar".to_vec());
|
||||||
trie.insert(EMPTY_PREFIX, b"foo"); // two refs
|
trie.insert(EMPTY_PREFIX, b"foo"); // two refs
|
||||||
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
|
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
|
||||||
trie.commit().unwrap();
|
trie.commit().unwrap();
|
||||||
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
|
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
|
||||||
assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"bar"));
|
assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), b"bar".to_vec());
|
||||||
trie.remove(&hbar, EMPTY_PREFIX); // zero refs - delete
|
trie.remove(&hbar, EMPTY_PREFIX); // zero refs - delete
|
||||||
assert_eq!(trie.get(&hbar, EMPTY_PREFIX), None);
|
assert_eq!(trie.get(&hbar, EMPTY_PREFIX), None);
|
||||||
trie.remove(&hfoo, EMPTY_PREFIX); // one ref - keep
|
trie.remove(&hfoo, EMPTY_PREFIX); // one ref - keep
|
||||||
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
|
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
|
||||||
trie.commit().unwrap();
|
trie.commit().unwrap();
|
||||||
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
|
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
|
||||||
trie.remove(&hfoo, EMPTY_PREFIX); // zero ref - would delete, but...
|
trie.remove(&hfoo, EMPTY_PREFIX); // zero ref - would delete, but...
|
||||||
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX), None);
|
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX), None);
|
||||||
trie.insert(EMPTY_PREFIX, b"foo"); // one ref - keep after all.
|
trie.insert(EMPTY_PREFIX, b"foo"); // one ref - keep after all.
|
||||||
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
|
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
|
||||||
trie.commit().unwrap();
|
trie.commit().unwrap();
|
||||||
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo"));
|
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), b"foo".to_vec());
|
||||||
trie.remove(&hfoo, EMPTY_PREFIX); // zero ref - delete
|
trie.remove(&hfoo, EMPTY_PREFIX); // zero ref - delete
|
||||||
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX), None);
|
assert_eq!(trie.get(&hfoo, EMPTY_PREFIX), None);
|
||||||
trie.commit().unwrap(); //
|
trie.commit().unwrap(); //
|
||||||
|
@ -74,7 +74,7 @@ pub struct OverlayRecentDB {
|
|||||||
transaction_overlay: super::MemoryDB,
|
transaction_overlay: super::MemoryDB,
|
||||||
backing: Arc<dyn KeyValueDB>,
|
backing: Arc<dyn KeyValueDB>,
|
||||||
journal_overlay: Arc<RwLock<JournalOverlay>>,
|
journal_overlay: Arc<RwLock<JournalOverlay>>,
|
||||||
column: Option<u32>,
|
column: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct DatabaseValue {
|
struct DatabaseValue {
|
||||||
@ -88,7 +88,7 @@ impl Decodable for DatabaseValue {
|
|||||||
let id = rlp.val_at(0)?;
|
let id = rlp.val_at(0)?;
|
||||||
let inserts = rlp.at(1)?.iter().map(|r| {
|
let inserts = rlp.at(1)?.iter().map(|r| {
|
||||||
let k = r.val_at(0)?;
|
let k = r.val_at(0)?;
|
||||||
let v = DBValue::from_slice(r.at(1)?.data()?);
|
let v = r.at(1)?.data()?.to_vec();
|
||||||
Ok((k, v))
|
Ok((k, v))
|
||||||
}).collect::<Result<Vec<_>, _>>()?;
|
}).collect::<Result<Vec<_>, _>>()?;
|
||||||
let deletes = rlp.list_at(2)?;
|
let deletes = rlp.list_at(2)?;
|
||||||
@ -153,12 +153,12 @@ impl Clone for OverlayRecentDB {
|
|||||||
|
|
||||||
impl OverlayRecentDB {
|
impl OverlayRecentDB {
|
||||||
/// Create a new instance.
|
/// Create a new instance.
|
||||||
pub fn new(backing: Arc<dyn KeyValueDB>, col: Option<u32>) -> OverlayRecentDB {
|
pub fn new(backing: Arc<dyn KeyValueDB>, col: u32) -> OverlayRecentDB {
|
||||||
let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&*backing, col)));
|
let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&*backing, col)));
|
||||||
OverlayRecentDB {
|
OverlayRecentDB {
|
||||||
transaction_overlay: new_memory_db(),
|
transaction_overlay: new_memory_db(),
|
||||||
backing: backing,
|
backing,
|
||||||
journal_overlay: journal_overlay,
|
journal_overlay,
|
||||||
column: col,
|
column: col,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -180,7 +180,7 @@ impl OverlayRecentDB {
|
|||||||
.expect("Low-level database error. Some issue with your hard disk?")
|
.expect("Low-level database error. Some issue with your hard disk?")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_overlay(db: &dyn KeyValueDB, col: Option<u32>) -> JournalOverlay {
|
fn read_overlay(db: &dyn KeyValueDB, col: u32) -> JournalOverlay {
|
||||||
let mut journal = HashMap::new();
|
let mut journal = HashMap::new();
|
||||||
let mut overlay = new_memory_db();
|
let mut overlay = new_memory_db();
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
@ -281,9 +281,9 @@ impl JournalDB for OverlayRecentDB {
|
|||||||
fn state(&self, key: &H256) -> Option<Bytes> {
|
fn state(&self, key: &H256) -> Option<Bytes> {
|
||||||
let journal_overlay = self.journal_overlay.read();
|
let journal_overlay = self.journal_overlay.read();
|
||||||
let key = to_short_key(key);
|
let key = to_short_key(key);
|
||||||
journal_overlay.backing_overlay.get(&key, EMPTY_PREFIX).map(|v| v.into_vec())
|
journal_overlay.backing_overlay.get(&key, EMPTY_PREFIX)
|
||||||
.or_else(|| journal_overlay.pending_overlay.get(&key).map(|d| d.clone().into_vec()))
|
.or_else(|| journal_overlay.pending_overlay.get(&key).map(|d| d.clone()))
|
||||||
.or_else(|| self.backing.get_by_prefix(self.column, &key[0..DB_PREFIX_LEN]).map(|b| b.into_vec()))
|
.or_else(|| self.backing.get_by_prefix(self.column, &key[0..DB_PREFIX_LEN]).map(|b| b.to_vec()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
|
fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> {
|
||||||
@ -500,8 +500,8 @@ mod tests {
|
|||||||
use crate::{JournalDB, inject_batch, commit_batch};
|
use crate::{JournalDB, inject_batch, commit_batch};
|
||||||
|
|
||||||
fn new_db() -> OverlayRecentDB {
|
fn new_db() -> OverlayRecentDB {
|
||||||
let backing = Arc::new(kvdb_memorydb::create(0));
|
let backing = Arc::new(kvdb_memorydb::create(1));
|
||||||
OverlayRecentDB::new(backing, None)
|
OverlayRecentDB::new(backing, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -742,28 +742,28 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen() {
|
fn reopen() {
|
||||||
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
let shared_db = Arc::new(kvdb_memorydb::create(1));
|
||||||
let bar = H256::random();
|
let bar = H256::random();
|
||||||
|
|
||||||
let foo = {
|
let foo = {
|
||||||
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
jdb.emplace(bar.clone(), EMPTY_PREFIX, DBValue::from_slice(b"bar"));
|
jdb.emplace(bar.clone(), EMPTY_PREFIX, b"bar".to_vec());
|
||||||
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
foo
|
foo
|
||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
|
||||||
jdb.remove(&foo, EMPTY_PREFIX);
|
jdb.remove(&foo, EMPTY_PREFIX);
|
||||||
commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
|
||||||
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
||||||
assert!(jdb.contains(&bar, EMPTY_PREFIX));
|
assert!(jdb.contains(&bar, EMPTY_PREFIX));
|
||||||
commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
|
commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap();
|
||||||
@ -909,11 +909,11 @@ mod tests {
|
|||||||
fn reopen_remove_three() {
|
fn reopen_remove_three() {
|
||||||
let _ = ::env_logger::try_init();
|
let _ = ::env_logger::try_init();
|
||||||
|
|
||||||
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
let shared_db = Arc::new(kvdb_memorydb::create(1));
|
||||||
let foo = keccak(b"foo");
|
let foo = keccak(b"foo");
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
|
||||||
// history is 1
|
// history is 1
|
||||||
jdb.insert(EMPTY_PREFIX, b"foo");
|
jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap();
|
||||||
@ -935,7 +935,7 @@ mod tests {
|
|||||||
|
|
||||||
// incantation to reopen the db
|
// incantation to reopen the db
|
||||||
}; {
|
}; {
|
||||||
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
|
||||||
|
|
||||||
jdb.remove(&foo, EMPTY_PREFIX);
|
jdb.remove(&foo, EMPTY_PREFIX);
|
||||||
commit_batch(&mut jdb, 4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap();
|
commit_batch(&mut jdb, 4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap();
|
||||||
@ -944,7 +944,7 @@ mod tests {
|
|||||||
|
|
||||||
// incantation to reopen the db
|
// incantation to reopen the db
|
||||||
}; {
|
}; {
|
||||||
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
|
||||||
|
|
||||||
commit_batch(&mut jdb, 5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap();
|
commit_batch(&mut jdb, 5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -952,7 +952,7 @@ mod tests {
|
|||||||
|
|
||||||
// incantation to reopen the db
|
// incantation to reopen the db
|
||||||
}; {
|
}; {
|
||||||
let mut jdb = OverlayRecentDB::new(shared_db, None);
|
let mut jdb = OverlayRecentDB::new(shared_db, 0);
|
||||||
|
|
||||||
commit_batch(&mut jdb, 6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap();
|
commit_batch(&mut jdb, 6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
@ -962,10 +962,10 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn reopen_fork() {
|
fn reopen_fork() {
|
||||||
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
let shared_db = Arc::new(kvdb_memorydb::create(1));
|
||||||
|
|
||||||
let (foo, bar, baz) = {
|
let (foo, bar, baz) = {
|
||||||
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
|
||||||
// history is 1
|
// history is 1
|
||||||
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
let foo = jdb.insert(EMPTY_PREFIX, b"foo");
|
||||||
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
|
let bar = jdb.insert(EMPTY_PREFIX, b"bar");
|
||||||
@ -983,7 +983,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut jdb = OverlayRecentDB::new(shared_db, None);
|
let mut jdb = OverlayRecentDB::new(shared_db, 0);
|
||||||
commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
|
commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap();
|
||||||
assert!(jdb.can_reconstruct_refs());
|
assert!(jdb.can_reconstruct_refs());
|
||||||
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
assert!(jdb.contains(&foo, EMPTY_PREFIX));
|
||||||
@ -1018,7 +1018,7 @@ mod tests {
|
|||||||
let key = jdb.insert(EMPTY_PREFIX, b"dog");
|
let key = jdb.insert(EMPTY_PREFIX, b"dog");
|
||||||
inject_batch(&mut jdb).unwrap();
|
inject_batch(&mut jdb).unwrap();
|
||||||
|
|
||||||
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog"));
|
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec());
|
||||||
jdb.remove(&key, EMPTY_PREFIX);
|
jdb.remove(&key, EMPTY_PREFIX);
|
||||||
inject_batch(&mut jdb).unwrap();
|
inject_batch(&mut jdb).unwrap();
|
||||||
|
|
||||||
@ -1027,10 +1027,10 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn earliest_era() {
|
fn earliest_era() {
|
||||||
let shared_db = Arc::new(kvdb_memorydb::create(0));
|
let shared_db = Arc::new(kvdb_memorydb::create(1));
|
||||||
|
|
||||||
// empty DB
|
// empty DB
|
||||||
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
|
let mut jdb = OverlayRecentDB::new(shared_db.clone(), 0);
|
||||||
assert!(jdb.earliest_era().is_none());
|
assert!(jdb.earliest_era().is_none());
|
||||||
|
|
||||||
// single journalled era.
|
// single journalled era.
|
||||||
@ -1064,7 +1064,7 @@ mod tests {
|
|||||||
|
|
||||||
// reconstructed: no journal entries.
|
// reconstructed: no journal entries.
|
||||||
drop(jdb);
|
drop(jdb);
|
||||||
let jdb = OverlayRecentDB::new(shared_db, None);
|
let jdb = OverlayRecentDB::new(shared_db, 0);
|
||||||
assert_eq!(jdb.earliest_era(), None);
|
assert_eq!(jdb.earliest_era(), None);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -63,12 +63,12 @@ pub struct RefCountedDB {
|
|||||||
latest_era: Option<u64>,
|
latest_era: Option<u64>,
|
||||||
inserts: Vec<H256>,
|
inserts: Vec<H256>,
|
||||||
removes: Vec<H256>,
|
removes: Vec<H256>,
|
||||||
column: Option<u32>,
|
column: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RefCountedDB {
|
impl RefCountedDB {
|
||||||
/// Create a new instance given a `backing` database.
|
/// Create a new instance given a `backing` database.
|
||||||
pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> RefCountedDB {
|
pub fn new(backing: Arc<dyn KeyValueDB>, column: u32) -> RefCountedDB {
|
||||||
let latest_era = backing.get(column, &LATEST_ERA_KEY)
|
let latest_era = backing.get(column, &LATEST_ERA_KEY)
|
||||||
.expect("Low-level database error.")
|
.expect("Low-level database error.")
|
||||||
.map(|v| decode::<u64>(&v).expect("decoding db value failed"));
|
.map(|v| decode::<u64>(&v).expect("decoding db value failed"));
|
||||||
@ -107,7 +107,7 @@ impl JournalDB for RefCountedDB {
|
|||||||
fn mem_used(&self) -> usize {
|
fn mem_used(&self) -> usize {
|
||||||
let mut ops = new_malloc_size_ops();
|
let mut ops = new_malloc_size_ops();
|
||||||
self.inserts.size_of(&mut ops) + self.removes.size_of(&mut ops)
|
self.inserts.size_of(&mut ops) + self.removes.size_of(&mut ops)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_empty(&self) -> bool {
|
fn is_empty(&self) -> bool {
|
||||||
self.latest_era.is_none()
|
self.latest_era.is_none()
|
||||||
@ -229,8 +229,8 @@ mod tests {
|
|||||||
use crate::{JournalDB, inject_batch, commit_batch};
|
use crate::{JournalDB, inject_batch, commit_batch};
|
||||||
|
|
||||||
fn new_db() -> RefCountedDB {
|
fn new_db() -> RefCountedDB {
|
||||||
let backing = Arc::new(kvdb_memorydb::create(0));
|
let backing = Arc::new(kvdb_memorydb::create(1));
|
||||||
RefCountedDB::new(backing, None)
|
RefCountedDB::new(backing, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -342,7 +342,7 @@ mod tests {
|
|||||||
let key = jdb.insert(EMPTY_PREFIX, b"dog");
|
let key = jdb.insert(EMPTY_PREFIX, b"dog");
|
||||||
inject_batch(&mut jdb).unwrap();
|
inject_batch(&mut jdb).unwrap();
|
||||||
|
|
||||||
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog"));
|
assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec());
|
||||||
jdb.remove(&key, EMPTY_PREFIX);
|
jdb.remove(&key, EMPTY_PREFIX);
|
||||||
inject_batch(&mut jdb).unwrap();
|
inject_batch(&mut jdb).unwrap();
|
||||||
|
|
||||||
|
@ -6,5 +6,5 @@ description = "An LRU-cache which operates on memory used"
|
|||||||
license = "GPL3"
|
license = "GPL3"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
parity-util-mem = "0.2.0"
|
parity-util-mem = "0.3.0"
|
||||||
lru-cache = "0.1"
|
lru-cache = "0.1"
|
||||||
|
@ -6,8 +6,8 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
macros = { path = "../macros" }
|
macros = { path = "../macros" }
|
||||||
kvdb = "0.1"
|
kvdb = "0.2"
|
||||||
kvdb-rocksdb = "0.2.0"
|
kvdb-rocksdb = "0.3.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tempdir = "0.3"
|
tempdir = "0.3"
|
||||||
|
@ -58,16 +58,16 @@ impl Default for Config {
|
|||||||
pub struct Batch {
|
pub struct Batch {
|
||||||
inner: BTreeMap<Vec<u8>, Vec<u8>>,
|
inner: BTreeMap<Vec<u8>, Vec<u8>>,
|
||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
column: Option<u32>,
|
column: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Batch {
|
impl Batch {
|
||||||
/// Make a new batch with the given config.
|
/// Make a new batch with the given config.
|
||||||
pub fn new(config: &Config, col: Option<u32>) -> Self {
|
pub fn new(config: &Config, column: u32) -> Self {
|
||||||
Batch {
|
Batch {
|
||||||
inner: BTreeMap::new(),
|
inner: BTreeMap::new(),
|
||||||
batch_size: config.batch_size,
|
batch_size: config.batch_size,
|
||||||
column: col,
|
column,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,39 +98,39 @@ impl Batch {
|
|||||||
/// A generalized migration from the given db to a destination db.
|
/// A generalized migration from the given db to a destination db.
|
||||||
pub trait Migration: 'static {
|
pub trait Migration: 'static {
|
||||||
/// Number of columns in the database before the migration.
|
/// Number of columns in the database before the migration.
|
||||||
fn pre_columns(&self) -> Option<u32> { self.columns() }
|
fn pre_columns(&self) -> u32 { self.columns() }
|
||||||
/// Number of columns in database after the migration.
|
/// Number of columns in database after the migration.
|
||||||
fn columns(&self) -> Option<u32>;
|
fn columns(&self) -> u32;
|
||||||
/// Whether this migration alters any existing columns.
|
/// Whether this migration alters any existing columns.
|
||||||
/// if not, then column families will simply be added and `migrate` will never be called.
|
/// if not, then column families will simply be added and `migrate` will never be called.
|
||||||
fn alters_existing(&self) -> bool { true }
|
fn alters_existing(&self) -> bool { true }
|
||||||
/// Version of the database after the migration.
|
/// Version of the database after the migration.
|
||||||
fn version(&self) -> u32;
|
fn version(&self) -> u32;
|
||||||
/// Migrate a source to a destination.
|
/// Migrate a source to a destination.
|
||||||
fn migrate(&mut self, source: Arc<Database>, config: &Config, destination: &mut Database, col: Option<u32>) -> io::Result<()>;
|
fn migrate(&mut self, source: Arc<Database>, config: &Config, destination: &mut Database, col: u32) -> io::Result<()>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A simple migration over key-value pairs of a single column.
|
/// A simple migration over key-value pairs of a single column.
|
||||||
pub trait SimpleMigration: 'static {
|
pub trait SimpleMigration: 'static {
|
||||||
/// Number of columns in database after the migration.
|
/// Number of columns in database after the migration.
|
||||||
fn columns(&self) -> Option<u32>;
|
fn columns(&self) -> u32;
|
||||||
/// Version of database after the migration.
|
/// Version of database after the migration.
|
||||||
fn version(&self) -> u32;
|
fn version(&self) -> u32;
|
||||||
/// Index of column which should be migrated.
|
/// Index of column which should be migrated.
|
||||||
fn migrated_column_index(&self) -> Option<u32>;
|
fn migrated_column_index(&self) -> u32;
|
||||||
/// Should migrate existing object to new database.
|
/// Should migrate existing object to new database.
|
||||||
/// Returns `None` if the object does not exist in new version of database.
|
/// Returns `None` if the object does not exist in new version of database.
|
||||||
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)>;
|
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: SimpleMigration> Migration for T {
|
impl<T: SimpleMigration> Migration for T {
|
||||||
fn columns(&self) -> Option<u32> { SimpleMigration::columns(self) }
|
fn columns(&self) -> u32 { SimpleMigration::columns(self) }
|
||||||
|
|
||||||
fn version(&self) -> u32 { SimpleMigration::version(self) }
|
|
||||||
|
|
||||||
fn alters_existing(&self) -> bool { true }
|
fn alters_existing(&self) -> bool { true }
|
||||||
|
|
||||||
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> io::Result<()> {
|
fn version(&self) -> u32 { SimpleMigration::version(self) }
|
||||||
|
|
||||||
|
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: u32) -> io::Result<()> {
|
||||||
let migration_needed = col == SimpleMigration::migrated_column_index(self);
|
let migration_needed = col == SimpleMigration::migrated_column_index(self);
|
||||||
let mut batch = Batch::new(config, col);
|
let mut batch = Batch::new(config, col);
|
||||||
|
|
||||||
@ -151,19 +151,19 @@ impl<T: SimpleMigration> Migration for T {
|
|||||||
/// An even simpler migration which just changes the number of columns.
|
/// An even simpler migration which just changes the number of columns.
|
||||||
pub struct ChangeColumns {
|
pub struct ChangeColumns {
|
||||||
/// The amount of columns before this migration.
|
/// The amount of columns before this migration.
|
||||||
pub pre_columns: Option<u32>,
|
pub pre_columns: u32,
|
||||||
/// The amount of columns after this migration.
|
/// The amount of columns after this migration.
|
||||||
pub post_columns: Option<u32>,
|
pub post_columns: u32,
|
||||||
/// The version after this migration.
|
/// The version after this migration.
|
||||||
pub version: u32,
|
pub version: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Migration for ChangeColumns {
|
impl Migration for ChangeColumns {
|
||||||
fn pre_columns(&self) -> Option<u32> { self.pre_columns }
|
fn pre_columns(&self) -> u32 { self.pre_columns }
|
||||||
fn columns(&self) -> Option<u32> { self.post_columns }
|
fn columns(&self) -> u32 { self.post_columns }
|
||||||
fn version(&self) -> u32 { self.version }
|
|
||||||
fn alters_existing(&self) -> bool { false }
|
fn alters_existing(&self) -> bool { false }
|
||||||
fn migrate(&mut self, _: Arc<Database>, _: &Config, _: &mut Database, _: Option<u32>) -> io::Result<()> {
|
fn version(&self) -> u32 { self.version }
|
||||||
|
fn migrate(&mut self, _: Arc<Database>, _: &Config, _: &mut Database, _: u32) -> io::Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -211,7 +211,7 @@ impl Manager {
|
|||||||
/// Creates new migration manager with given configuration.
|
/// Creates new migration manager with given configuration.
|
||||||
pub fn new(config: Config) -> Self {
|
pub fn new(config: Config) -> Self {
|
||||||
Manager {
|
Manager {
|
||||||
config: config,
|
config,
|
||||||
migrations: vec![],
|
migrations: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -239,9 +239,8 @@ impl Manager {
|
|||||||
return Err(other_io_err("Migration impossible"));
|
return Err(other_io_err("Migration impossible"));
|
||||||
};
|
};
|
||||||
|
|
||||||
let columns = migrations.get(0).and_then(|m| m.pre_columns());
|
let columns = migrations.first().expect("checked empty above; qed").pre_columns();
|
||||||
|
trace!(target: "migration", "Expecting database to contain {} columns", columns);
|
||||||
trace!(target: "migration", "Expecting database to contain {:?} columns", columns);
|
|
||||||
let mut db_config = DatabaseConfig {
|
let mut db_config = DatabaseConfig {
|
||||||
max_open_files: 64,
|
max_open_files: 64,
|
||||||
compaction: config.compaction_profile,
|
compaction: config.compaction_profile,
|
||||||
@ -271,16 +270,10 @@ impl Manager {
|
|||||||
let temp_path_str = temp_path.to_str().ok_or_else(|| other_io_err("Migration impossible."))?;
|
let temp_path_str = temp_path.to_str().ok_or_else(|| other_io_err("Migration impossible."))?;
|
||||||
let mut new_db = Database::open(&db_config, temp_path_str)?;
|
let mut new_db = Database::open(&db_config, temp_path_str)?;
|
||||||
|
|
||||||
match current_columns {
|
for col in 0..current_columns {
|
||||||
// migrate only default column
|
migration.migrate(cur_db.clone(), &config, &mut new_db, col)?
|
||||||
None => migration.migrate(cur_db.clone(), &config, &mut new_db, None)?,
|
|
||||||
Some(v) => {
|
|
||||||
// Migrate all columns in previous DB
|
|
||||||
for col in 0..v {
|
|
||||||
migration.migrate(cur_db.clone(), &config, &mut new_db, Some(col))?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// next iteration, we will migrate from this db into the other temp.
|
// next iteration, we will migrate from this db into the other temp.
|
||||||
cur_db = Arc::new(new_db);
|
cur_db = Arc::new(new_db);
|
||||||
temp_idx.swap();
|
temp_idx.swap();
|
||||||
@ -290,13 +283,13 @@ impl Manager {
|
|||||||
} else {
|
} else {
|
||||||
// migrations which simply add or remove column families.
|
// migrations which simply add or remove column families.
|
||||||
// we can do this in-place.
|
// we can do this in-place.
|
||||||
let goal_columns = migration.columns().unwrap_or(0);
|
let goal_columns = migration.columns();
|
||||||
while cur_db.num_columns() < goal_columns {
|
while cur_db.num_columns() < goal_columns {
|
||||||
cur_db.add_column().map_err(other_io_err)?;
|
cur_db.add_column().map_err(other_io_err)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
while cur_db.num_columns() > goal_columns {
|
while cur_db.num_columns() > goal_columns {
|
||||||
cur_db.drop_column().map_err(other_io_err)?;
|
cur_db.remove_last_column().map_err(other_io_err)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ use std::io;
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tempdir::TempDir;
|
use tempdir::TempDir;
|
||||||
use kvdb_rocksdb::Database;
|
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||||
use migration::{Batch, Config, SimpleMigration, Migration, Manager, ChangeColumns};
|
use migration::{Batch, Config, SimpleMigration, Migration, Manager, ChangeColumns};
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -39,11 +39,11 @@ fn db_path(path: &Path) -> PathBuf {
|
|||||||
|
|
||||||
// initialize a database at the given directory with the given values.
|
// initialize a database at the given directory with the given values.
|
||||||
fn make_db(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
|
fn make_db(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
|
||||||
let db = Database::open_default(path.to_str().unwrap()).expect("failed to open temp database");
|
let db = Database::open(&DatabaseConfig::default(), path.to_str().unwrap()).expect("failed to open temp database");
|
||||||
{
|
{
|
||||||
let mut transaction = db.transaction();
|
let mut transaction = db.transaction();
|
||||||
for (k, v) in pairs {
|
for (k, v) in pairs {
|
||||||
transaction.put(None, &k, &v);
|
transaction.put(0, &k, &v);
|
||||||
}
|
}
|
||||||
|
|
||||||
db.write(transaction).expect("failed to write db transaction");
|
db.write(transaction).expect("failed to write db transaction");
|
||||||
@ -52,10 +52,12 @@ fn make_db(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
|
|||||||
|
|
||||||
// helper for verifying a migrated database.
|
// helper for verifying a migrated database.
|
||||||
fn verify_migration(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
|
fn verify_migration(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
|
||||||
let db = Database::open_default(path.to_str().unwrap()).unwrap();
|
let db = Database::open(&DatabaseConfig::default(), path.to_str().expect("valid path")).expect("database should be there");
|
||||||
|
|
||||||
for (k, v) in pairs {
|
for (k, v) in pairs {
|
||||||
let x = db.get(None, &k).unwrap().unwrap();
|
let x = db.get(0, &k)
|
||||||
|
.expect("database IO should work")
|
||||||
|
.expect(&format!("key={:?} should be in column 0 in the db", &k));
|
||||||
|
|
||||||
assert_eq!(&x[..], &v[..]);
|
assert_eq!(&x[..], &v[..]);
|
||||||
}
|
}
|
||||||
@ -64,18 +66,9 @@ fn verify_migration(path: &Path, pairs: BTreeMap<Vec<u8>, Vec<u8>>) {
|
|||||||
struct Migration0;
|
struct Migration0;
|
||||||
|
|
||||||
impl SimpleMigration for Migration0 {
|
impl SimpleMigration for Migration0 {
|
||||||
fn columns(&self) -> Option<u32> {
|
fn columns(&self) -> u32 { 1 }
|
||||||
None
|
fn version(&self) -> u32 { 1 }
|
||||||
}
|
fn migrated_column_index(&self) -> u32 { 0 }
|
||||||
|
|
||||||
fn version(&self) -> u32 {
|
|
||||||
1
|
|
||||||
}
|
|
||||||
|
|
||||||
fn migrated_column_index(&self) -> Option<u32> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn simple_migrate(&mut self, mut key: Vec<u8>, mut value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
|
fn simple_migrate(&mut self, mut key: Vec<u8>, mut value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
|
||||||
key.push(0x11);
|
key.push(0x11);
|
||||||
value.push(0x22);
|
value.push(0x22);
|
||||||
@ -87,18 +80,9 @@ impl SimpleMigration for Migration0 {
|
|||||||
struct Migration1;
|
struct Migration1;
|
||||||
|
|
||||||
impl SimpleMigration for Migration1 {
|
impl SimpleMigration for Migration1 {
|
||||||
fn columns(&self) -> Option<u32> {
|
fn columns(&self) -> u32 { 1 }
|
||||||
None
|
fn version(&self) -> u32 { 2 }
|
||||||
}
|
fn migrated_column_index(&self) -> u32 { 0 }
|
||||||
|
|
||||||
fn version(&self) -> u32 {
|
|
||||||
2
|
|
||||||
}
|
|
||||||
|
|
||||||
fn migrated_column_index(&self) -> Option<u32> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn simple_migrate(&mut self, key: Vec<u8>, _value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
|
fn simple_migrate(&mut self, key: Vec<u8>, _value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
|
||||||
Some((key, vec![]))
|
Some((key, vec![]))
|
||||||
}
|
}
|
||||||
@ -107,20 +91,17 @@ impl SimpleMigration for Migration1 {
|
|||||||
struct AddsColumn;
|
struct AddsColumn;
|
||||||
|
|
||||||
impl Migration for AddsColumn {
|
impl Migration for AddsColumn {
|
||||||
fn pre_columns(&self) -> Option<u32> { None }
|
fn pre_columns(&self) -> u32 { 1 }
|
||||||
|
fn columns(&self) -> u32 { 1 }
|
||||||
fn columns(&self) -> Option<u32> { Some(1) }
|
|
||||||
|
|
||||||
fn version(&self) -> u32 { 1 }
|
fn version(&self) -> u32 { 1 }
|
||||||
|
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: u32) -> io::Result<()> {
|
||||||
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> io::Result<()> {
|
|
||||||
let mut batch = Batch::new(config, col);
|
let mut batch = Batch::new(config, col);
|
||||||
|
|
||||||
for (key, value) in source.iter(col) {
|
for (key, value) in source.iter(col) {
|
||||||
batch.insert(key.into_vec(), value.into_vec(), dest)?;
|
batch.insert(key.into_vec(), value.into_vec(), dest)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if col == Some(1) {
|
if col == 1 {
|
||||||
batch.insert(vec![1, 2, 3], vec![4, 5, 6], dest)?;
|
batch.insert(vec![1, 2, 3], vec![4, 5, 6], dest)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,8 +185,8 @@ fn first_and_noop_migration() {
|
|||||||
make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]);
|
make_db(&db_path, map![vec![] => vec![], vec![1] => vec![1]]);
|
||||||
let expected = map![vec![0x11] => vec![0x22], vec![1, 0x11] => vec![1, 0x22]];
|
let expected = map![vec![0x11] => vec![0x22], vec![1, 0x11] => vec![1, 0x22]];
|
||||||
|
|
||||||
manager.add_migration(Migration0).unwrap();
|
manager.add_migration(Migration0).expect("Migration0 can be added");
|
||||||
let end_path = manager.execute(&db_path, 0).unwrap();
|
let end_path = manager.execute(&db_path, 0).expect("Migration0 runs clean");
|
||||||
|
|
||||||
verify_migration(&end_path, expected);
|
verify_migration(&end_path, expected);
|
||||||
}
|
}
|
||||||
@ -254,8 +235,8 @@ fn change_columns() {
|
|||||||
|
|
||||||
let mut manager = Manager::new(Config::default());
|
let mut manager = Manager::new(Config::default());
|
||||||
manager.add_migration(ChangeColumns {
|
manager.add_migration(ChangeColumns {
|
||||||
pre_columns: None,
|
pre_columns: 1,
|
||||||
post_columns: Some(4),
|
post_columns: 4,
|
||||||
version: 1,
|
version: 1,
|
||||||
}).unwrap();
|
}).unwrap();
|
||||||
|
|
||||||
@ -266,7 +247,7 @@ fn change_columns() {
|
|||||||
|
|
||||||
assert_eq!(db_path, new_path, "Changing columns is an in-place migration.");
|
assert_eq!(db_path, new_path, "Changing columns is an in-place migration.");
|
||||||
|
|
||||||
let config = DatabaseConfig::with_columns(Some(4));
|
let config = DatabaseConfig::with_columns(4);
|
||||||
let db = Database::open(&config, new_path.to_str().unwrap()).unwrap();
|
let db = Database::open(&config, new_path.to_str().unwrap()).unwrap();
|
||||||
assert_eq!(db.num_columns(), 4);
|
assert_eq!(db.num_columns(), 4);
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,7 @@ description = "Merkle-Patricia Trie (Ethereum Style)"
|
|||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
trie-db = "0.16.0"
|
trie-db = "0.18.0"
|
||||||
keccak-hasher = { version = "0.1.1", path = "../keccak-hasher" }
|
keccak-hasher = { version = "0.1.1", path = "../keccak-hasher" }
|
||||||
hash-db = "0.15.0"
|
hash-db = "0.15.0"
|
||||||
rlp = "0.4.4"
|
rlp = "0.4.4"
|
||||||
@ -15,7 +15,7 @@ ethereum-types = "0.8.0"
|
|||||||
elastic-array = "0.10"
|
elastic-array = "0.10"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
memory-db = "0.15.0"
|
memory-db = "0.18.0"
|
||||||
keccak-hash = "0.4.0"
|
keccak-hash = "0.4.0"
|
||||||
journaldb = { path = "../journaldb" }
|
journaldb = { path = "../journaldb" }
|
||||||
criterion = "0.3"
|
criterion = "0.3"
|
||||||
|
@ -78,7 +78,7 @@ impl trie_db::TrieLayout for Layout {
|
|||||||
/// TrieDBMut::new(&mut memdb, &mut root).insert(b"foo", b"bar").unwrap();
|
/// TrieDBMut::new(&mut memdb, &mut root).insert(b"foo", b"bar").unwrap();
|
||||||
/// let t = TrieDB::new(&memdb, &root).unwrap();
|
/// let t = TrieDB::new(&memdb, &root).unwrap();
|
||||||
/// assert!(t.contains(b"foo").unwrap());
|
/// assert!(t.contains(b"foo").unwrap());
|
||||||
/// assert_eq!(t.get(b"foo").unwrap().unwrap(), DBValue::from_slice(b"bar"));
|
/// assert_eq!(t.get(b"foo").unwrap().unwrap(), b"bar".to_vec());
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
pub type TrieDB<'db> = trie::TrieDB<'db, Layout>;
|
pub type TrieDB<'db> = trie::TrieDB<'db, Layout>;
|
||||||
@ -125,7 +125,7 @@ pub type FatDB<'db> = trie::FatDB<'db, Layout>;
|
|||||||
/// assert_eq!(*t.root(), KECCAK_NULL_RLP);
|
/// assert_eq!(*t.root(), KECCAK_NULL_RLP);
|
||||||
/// t.insert(b"foo", b"bar").unwrap();
|
/// t.insert(b"foo", b"bar").unwrap();
|
||||||
/// assert!(t.contains(b"foo").unwrap());
|
/// assert!(t.contains(b"foo").unwrap());
|
||||||
/// assert_eq!(t.get(b"foo").unwrap().unwrap(), DBValue::from_slice(b"bar"));
|
/// assert_eq!(t.get(b"foo").unwrap().unwrap(), b"bar".to_vec());
|
||||||
/// t.remove(b"foo").unwrap();
|
/// t.remove(b"foo").unwrap();
|
||||||
/// assert!(!t.contains(b"foo").unwrap());
|
/// assert!(!t.contains(b"foo").unwrap());
|
||||||
/// }
|
/// }
|
||||||
|
@ -6,6 +6,6 @@ description = "Trie-root helpers, ethereum style"
|
|||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
triehash = "0.8.0"
|
triehash = "0.8.2"
|
||||||
ethereum-types = "0.8.0"
|
ethereum-types = "0.8.0"
|
||||||
keccak-hasher = { path = "../keccak-hasher" }
|
keccak-hasher = { path = "../keccak-hasher" }
|
||||||
|
Loading…
Reference in New Issue
Block a user