= Some(5);
/// Append a path element to the given path and return the string.
pub fn append_path(path: P, item: &str) -> String where P: AsRef {
@@ -160,36 +173,25 @@ impl Client {
) -> Result, ClientError> {
let path = path.to_path_buf();
let gb = spec.genesis_block();
- let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path));
- let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone())));
+ let mut db_config = DatabaseConfig::with_columns(DB_NO_OF_COLUMNS);
+ db_config.cache_size = config.db_cache_size;
+ db_config.compaction = config.db_compaction.compaction_profile();
- let mut state_db_config = match config.db_cache_size {
- None => DatabaseConfig::default(),
- Some(cache_size) => DatabaseConfig::with_cache(cache_size),
- };
-
- state_db_config = state_db_config.compaction(config.db_compaction.compaction_profile());
-
- let mut state_db = journaldb::new(
- &append_path(&path, "state"),
- config.pruning,
- state_db_config
- );
+ let db = Arc::new(Database::open(&db_config, &path.to_str().unwrap()).expect("Error opening database"));
+ let chain = Arc::new(BlockChain::new(config.blockchain, &gb, db.clone()));
+ let tracedb = Arc::new(try!(TraceDB::new(config.tracing, db.clone(), chain.clone())));
+ let mut state_db = journaldb::new(db.clone(), config.pruning, DB_COL_STATE);
if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) {
- state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
+ let batch = DBTransaction::new(&db);
+ state_db.commit(&batch, 0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
+ db.write(batch).expect("Error writing genesis state to state DB");
}
if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) {
warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex());
}
- /* TODO: enable this once the best block issue is resolved
- while !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) {
- warn!("State root not found for block #{} ({}), recovering...", chain.best_block_number(), chain.best_block_hash().hex());
- chain.rewind();
- }*/
-
let engine = Arc::new(spec.engine);
let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone());
@@ -204,6 +206,7 @@ impl Client {
chain: chain,
tracedb: tracedb,
engine: engine,
+ db: db,
state_db: Mutex::new(state_db),
block_queue: block_queue,
report: RwLock::new(Default::default()),
@@ -432,21 +435,23 @@ impl Client {
//let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new));
+ let batch = DBTransaction::new(&self.db);
// CHECK! I *think* this is fine, even if the state_root is equal to another
// already-imported block of the same number.
// TODO: Prove it with a test.
- block.drain().commit(number, hash, ancient).expect("State DB commit failed.");
+ block.drain().commit(&batch, number, hash, ancient).expect("State DB commit failed.");
- // And update the chain after commit to prevent race conditions
- // (when something is in chain but you are not able to fetch details)
- let route = self.chain.insert_block(block_data, receipts);
- self.tracedb.import(TraceImportRequest {
+ let route = self.chain.insert_block(&batch, block_data, receipts);
+ self.tracedb.import(&batch, TraceImportRequest {
traces: traces.into(),
block_hash: hash.clone(),
block_number: number,
enacted: route.enacted.clone(),
retracted: route.retracted.len()
});
+ // Final commit to the DB
+ self.db.write(batch).expect("State DB write failed.");
+
self.update_last_hashes(&parent, hash);
route
}
@@ -674,17 +679,17 @@ impl BlockChainClient for Client {
fn replay(&self, id: TransactionID, analytics: CallAnalytics) -> Result {
let address = try!(self.transaction_address(id).ok_or(ReplayError::TransactionNotFound));
- let block_data = try!(self.block(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned));
+ let header_data = try!(self.block_header(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned));
+ let body_data = try!(self.block_body(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned));
let mut state = try!(self.state_at_beginning(BlockID::Hash(address.block_hash)).ok_or(ReplayError::StatePruned));
- let block = BlockView::new(&block_data);
- let txs = block.transactions();
+ let txs = BodyView::new(&body_data).transactions();
if address.index >= txs.len() {
return Err(ReplayError::TransactionNotFound);
}
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
- let view = block.header_view();
+ let view = HeaderView::new(&header_data);
let last_hashes = self.build_last_hashes(view.hash());
let mut env_info = EnvInfo {
number: view.number(),
@@ -719,20 +724,16 @@ impl BlockChainClient for Client {
}
}
+ fn best_block_header(&self) -> Bytes {
+ self.chain.best_block_header()
+ }
+
fn block_header(&self, id: BlockID) -> Option {
- Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec()))
+ Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_header_data(&hash))
}
fn block_body(&self, id: BlockID) -> Option {
- Self::block_hash(&self.chain, id).and_then(|hash| {
- self.chain.block(&hash).map(|bytes| {
- let rlp = Rlp::new(&bytes);
- let mut body = RlpStream::new_list(2);
- body.append_raw(rlp.at(1).as_raw(), 1);
- body.append_raw(rlp.at(2).as_raw(), 1);
- body.out()
- })
- })
+ Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_body(&hash))
}
fn block(&self, id: BlockID) -> Option {
@@ -789,13 +790,13 @@ impl BlockChainClient for Client {
fn uncle(&self, id: UncleID) -> Option {
let index = id.position;
- self.block(id.block).and_then(|block| BlockView::new(&block).uncle_rlp_at(index))
+ self.block_body(id.block).and_then(|body| BodyView::new(&body).uncle_rlp_at(index))
}
fn transaction_receipt(&self, id: TransactionID) -> Option {
- self.transaction_address(id).and_then(|address| {
- let t = self.chain.block(&address.block_hash)
- .and_then(|block| BlockView::new(&block).localized_transaction_at(address.index));
+ self.transaction_address(id).and_then(|address| self.chain.block_number(&address.block_hash).and_then(|block_number| {
+ let t = self.chain.block_body(&address.block_hash)
+ .and_then(|block| BodyView::new(&block).localized_transaction_at(&address.block_hash, block_number, address.index));
match (t, self.chain.transaction_receipt(&address)) {
(Some(tx), Some(receipt)) => {
@@ -834,7 +835,7 @@ impl BlockChainClient for Client {
},
_ => None
}
- })
+ }))
}
fn tree_route(&self, from: &H256, to: &H256) -> Option {
@@ -910,7 +911,7 @@ impl BlockChainClient for Client {
blocks.into_iter()
.filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash)))
.filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
- .filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes())))
+ .filter_map(|(number, hash, receipts)| self.chain.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes())))
.flat_map(|(number, hash, receipts, hashes)| {
let mut log_index = 0;
receipts.into_iter()
diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs
index 9b13d7be3..cdffe4302 100644
--- a/ethcore/src/client/test_client.rs
+++ b/ethcore/src/client/test_client.rs
@@ -248,7 +248,8 @@ impl TestBlockChainClient {
pub fn get_temp_journal_db() -> GuardedTempResult> {
let temp = RandomTempPath::new();
- let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default());
+ let db = Database::open_default(temp.as_str()).unwrap();
+ let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, None);
GuardedTempResult {
_temp: temp,
result: Some(journal_db)
@@ -363,6 +364,10 @@ impl BlockChainClient for TestBlockChainClient {
unimplemented!();
}
+ fn best_block_header(&self) -> Bytes {
+ self.block_header(BlockID::Hash(self.chain_info().best_block_hash)).expect("Best block always have header.")
+ }
+
fn block_header(&self, id: BlockID) -> Option {
self.block_hash(id).and_then(|hash| self.blocks.read().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec()))
}
diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs
index 7d13dd23d..348b90c90 100644
--- a/ethcore/src/client/traits.rs
+++ b/ethcore/src/client/traits.rs
@@ -145,10 +145,7 @@ pub trait BlockChainClient : Sync + Send {
fn chain_info(&self) -> BlockChainInfo;
/// Get the best block header.
- fn best_block_header(&self) -> Bytes {
- // TODO: lock blockchain only once
- self.block_header(BlockID::Hash(self.chain_info().best_block_hash)).unwrap()
- }
+ fn best_block_header(&self) -> Bytes;
/// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option>;
diff --git a/ethcore/src/db.rs b/ethcore/src/db.rs
index 57b4cfdc6..eab0e2eb5 100644
--- a/ethcore/src/db.rs
+++ b/ethcore/src/db.rs
@@ -62,14 +62,14 @@ pub trait Key {
/// Should be used to write value into database.
pub trait Writable {
/// Writes the value into the database.
- fn write(&self, key: &Key, value: &T) where T: Encodable, R: Deref;
+ fn write(&self, col: Option, key: &Key, value: &T) where T: Encodable, R: Deref;
/// Writes the value into the database and updates the cache.
- fn write_with_cache(&self, cache: &mut Cache, key: K, value: T, policy: CacheUpdatePolicy) where
+ fn write_with_cache(&self, col: Option, cache: &mut Cache, key: K, value: T, policy: CacheUpdatePolicy) where
K: Key + Hash + Eq,
T: Encodable,
R: Deref {
- self.write(&key, &value);
+ self.write(col, &key, &value);
match policy {
CacheUpdatePolicy::Overwrite => {
cache.insert(key, value);
@@ -81,20 +81,20 @@ pub trait Writable {
}
/// Writes the values into the database and updates the cache.
- fn extend_with_cache(&self, cache: &mut Cache, values: HashMap, policy: CacheUpdatePolicy) where
+ fn extend_with_cache(&self, col: Option, cache: &mut Cache, values: HashMap, policy: CacheUpdatePolicy) where
K: Key + Hash + Eq,
T: Encodable,
R: Deref {
match policy {
CacheUpdatePolicy::Overwrite => {
for (key, value) in values.into_iter() {
- self.write(&key, &value);
+ self.write(col, &key, &value);
cache.insert(key, value);
}
},
CacheUpdatePolicy::Remove => {
for (key, value) in &values {
- self.write(key, value);
+ self.write(col, key, value);
cache.remove(key);
}
},
@@ -105,12 +105,12 @@ pub trait Writable {
/// Should be used to read values from database.
pub trait Readable {
/// Returns value for given key.
- fn read(&self, key: &Key) -> Option where
+ fn read(&self, col: Option, key: &Key) -> Option where
T: Decodable,
R: Deref;
/// Returns value for given key either in cache or in database.
- fn read_with_cache(&self, cache: &RwLock, key: &K) -> Option where
+ fn read_with_cache(&self, col: Option, cache: &RwLock, key: &K) -> Option where
K: Key + Eq + Hash + Clone,
T: Clone + Decodable,
C: Cache {
@@ -121,7 +121,7 @@ pub trait Readable {
}
}
- self.read(key).map(|value: T|{
+ self.read(col, key).map(|value: T|{
let mut write = cache.write();
write.insert(key.clone(), value.clone());
value
@@ -129,10 +129,10 @@ pub trait Readable {
}
/// Returns true if given value exists.
- fn exists(&self, key: &Key) -> bool where R: Deref;
+ fn exists(&self, col: Option, key: &Key) -> bool where R: Deref;
/// Returns true if given value exists either in cache or in database.
- fn exists_with_cache(&self, cache: &RwLock, key: &K) -> bool where
+ fn exists_with_cache(&self, col: Option, cache: &RwLock, key: &K) -> bool where
K: Eq + Hash + Key,
R: Deref,
C: Cache {
@@ -143,13 +143,13 @@ pub trait Readable {
}
}
- self.exists::(key)
+ self.exists::(col, key)
}
}
impl Writable for DBTransaction {
- fn write(&self, key: &Key, value: &T) where T: Encodable, R: Deref {
- let result = self.put(&key.key(), &encode(value));
+ fn write(&self, col: Option, key: &Key, value: &T) where T: Encodable, R: Deref {
+ let result = self.put(col, &key.key(), &encode(value));
if let Err(err) = result {
panic!("db put failed, key: {:?}, err: {:?}", &key.key() as &[u8], err);
}
@@ -157,8 +157,8 @@ impl Writable for DBTransaction {
}
impl Readable for Database {
- fn read(&self, key: &Key) -> Option where T: Decodable, R: Deref {
- let result = self.get(&key.key());
+ fn read(&self, col: Option, key: &Key) -> Option where T: Decodable, R: Deref {
+ let result = self.get(col, &key.key());
match result {
Ok(option) => option.map(|v| decode(&v)),
@@ -168,8 +168,8 @@ impl Readable for Database {
}
}
- fn exists(&self, key: &Key) -> bool where R: Deref {
- let result = self.get(&key.key());
+ fn exists(&self, col: Option, key: &Key) -> bool where R: Deref {
+ let result = self.get(col, &key.key());
match result {
Ok(v) => v.is_some(),
diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs
index f1be3e4e0..adba16703 100644
--- a/ethcore/src/json_tests/executive.rs
+++ b/ethcore/src/json_tests/executive.rs
@@ -38,7 +38,7 @@ struct CallCreate {
impl From for CallCreate {
fn from(c: ethjson::vm::Call) -> Self {
- let dst: Option<_> = c.destination.into();
+ let dst: Option = c.destination.into();
CallCreate {
data: c.data.into(),
destination: dst.map(Into::into),
diff --git a/ethcore/src/json_tests/transaction.rs b/ethcore/src/json_tests/transaction.rs
index 673ff8650..7c9a3327e 100644
--- a/ethcore/src/json_tests/transaction.rs
+++ b/ethcore/src/json_tests/transaction.rs
@@ -49,7 +49,7 @@ fn do_json_test(json_data: &[u8]) -> Vec {
fail_unless(t.gas_price == tx.gas_price.into());
fail_unless(t.nonce == tx.nonce.into());
fail_unless(t.value == tx.value.into());
- let to: Option<_> = tx.to.into();
+ let to: Option = tx.to.into();
let to: Option = to.map(Into::into);
match t.action {
Action::Call(dest) => fail_unless(Some(dest) == to),
diff --git a/ethcore/src/migrations/blocks/v8.rs b/ethcore/src/migrations/blocks/v8.rs
index 041ceaac8..798be0790 100644
--- a/ethcore/src/migrations/blocks/v8.rs
+++ b/ethcore/src/migrations/blocks/v8.rs
@@ -16,19 +16,22 @@
//! This migration compresses the state db.
-use util::migration::SimpleMigration;
+use util::migration::{SimpleMigration, Progress};
use util::rlp::{Compressible, UntrustedRlp, View, RlpType};
/// Compressing migration.
#[derive(Default)]
-pub struct V8;
+pub struct V8(Progress);
impl SimpleMigration for V8 {
fn version(&self) -> u32 {
8
}
+ fn columns(&self) -> Option { None }
+
fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)> {
+ self.0.tick();
Some((key,UntrustedRlp::new(&value).compress(RlpType::Blocks).to_vec()))
}
}
diff --git a/ethcore/src/migrations/extras/v6.rs b/ethcore/src/migrations/extras/v6.rs
index af2d0389b..9b746b9d2 100644
--- a/ethcore/src/migrations/extras/v6.rs
+++ b/ethcore/src/migrations/extras/v6.rs
@@ -34,9 +34,10 @@ impl ToV6 {
}
impl SimpleMigration for ToV6 {
- fn version(&self) -> u32 {
- 6
- }
+
+ fn columns(&self) -> Option { None }
+
+ fn version(&self) -> u32 { 6 }
fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)> {
diff --git a/ethcore/src/migrations/mod.rs b/ethcore/src/migrations/mod.rs
index e3e1f4031..5c0c6f420 100644
--- a/ethcore/src/migrations/mod.rs
+++ b/ethcore/src/migrations/mod.rs
@@ -19,3 +19,7 @@
pub mod state;
pub mod blocks;
pub mod extras;
+
+mod v9;
+pub use self::v9::ToV9;
+pub use self::v9::Extract;
diff --git a/ethcore/src/migrations/state/v7.rs b/ethcore/src/migrations/state/v7.rs
index faa289bd7..036ff707c 100644
--- a/ethcore/src/migrations/state/v7.rs
+++ b/ethcore/src/migrations/state/v7.rs
@@ -22,7 +22,7 @@ use std::collections::HashMap;
use util::Bytes;
use util::hash::{Address, FixedHash, H256};
use util::kvdb::Database;
-use util::migration::{Batch, Config, Error, Migration, SimpleMigration};
+use util::migration::{Batch, Config, Error, Migration, SimpleMigration, Progress};
use util::rlp::{decode, Rlp, RlpStream, Stream, View};
use util::sha3::Hashable;
@@ -63,19 +63,16 @@ fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Option {
/// Version for `ArchiveDB`.
#[derive(Default)]
-pub struct ArchiveV7(usize);
+pub struct ArchiveV7(Progress);
impl SimpleMigration for ArchiveV7 {
- fn version(&self) -> u32 {
- 7
- }
+
+ fn columns(&self) -> Option { None }
+
+ fn version(&self) -> u32 { 7 }
fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)> {
- self.0 += 1;
- if self.0 == 100_000 {
- self.0 = 0;
- flush!(".");
- }
+ self.0.tick();
if key.len() != 32 {
// metadata key, ignore.
@@ -109,7 +106,7 @@ impl OverlayRecentV7 {
// walk all journal entries in the database backwards.
// find migrations for any possible inserted keys.
fn walk_journal(&mut self, source: &Database) -> Result<(), Error> {
- if let Some(val) = try!(source.get(V7_LATEST_ERA_KEY).map_err(Error::Custom)) {
+ if let Some(val) = try!(source.get(None, V7_LATEST_ERA_KEY).map_err(Error::Custom)) {
let mut era = decode::(&val);
loop {
let mut index: usize = 0;
@@ -120,7 +117,7 @@ impl OverlayRecentV7 {
r.out()
};
- if let Some(journal_raw) = try!(source.get(&entry_key).map_err(Error::Custom)) {
+ if let Some(journal_raw) = try!(source.get(None, &entry_key).map_err(Error::Custom)) {
let rlp = Rlp::new(&journal_raw);
// migrate all inserted keys.
@@ -153,7 +150,7 @@ impl OverlayRecentV7 {
// replace all possible inserted/deleted keys with their migrated counterparts
// and commit the altered entries.
fn migrate_journal(&self, source: &Database, mut batch: Batch, dest: &mut Database) -> Result<(), Error> {
- if let Some(val) = try!(source.get(V7_LATEST_ERA_KEY).map_err(Error::Custom)) {
+ if let Some(val) = try!(source.get(None, V7_LATEST_ERA_KEY).map_err(Error::Custom)) {
try!(batch.insert(V7_LATEST_ERA_KEY.into(), val.to_owned(), dest));
let mut era = decode::(&val);
@@ -166,7 +163,7 @@ impl OverlayRecentV7 {
r.out()
};
- if let Some(journal_raw) = try!(source.get(&entry_key).map_err(Error::Custom)) {
+ if let Some(journal_raw) = try!(source.get(None, &entry_key).map_err(Error::Custom)) {
let rlp = Rlp::new(&journal_raw);
let id: H256 = rlp.val_at(0);
let mut inserted_keys: Vec<(H256, Bytes)> = Vec::new();
@@ -221,22 +218,25 @@ impl OverlayRecentV7 {
}
impl Migration for OverlayRecentV7 {
+
+ fn columns(&self) -> Option { None }
+
fn version(&self) -> u32 { 7 }
// walk all records in the database, attempting to migrate any possible and
// keeping records of those that we do. then migrate the journal using
// this information.
- fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database) -> Result<(), Error> {
- let mut batch = Batch::new(config);
+ fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> {
+ let mut batch = Batch::new(config, col);
// check version metadata.
- match try!(source.get(V7_VERSION_KEY).map_err(Error::Custom)) {
+ match try!(source.get(None, V7_VERSION_KEY).map_err(Error::Custom)) {
Some(ref version) if decode::(&*version) == DB_VERSION => {}
_ => return Err(Error::MigrationImpossible), // missing or wrong version
}
let mut count = 0;
- for (key, value) in source.iter() {
+ for (key, value) in source.iter(None) {
count += 1;
if count == 100_000 {
count = 0;
diff --git a/ethcore/src/migrations/v9.rs b/ethcore/src/migrations/v9.rs
new file mode 100644
index 000000000..0c8e77588
--- /dev/null
+++ b/ethcore/src/migrations/v9.rs
@@ -0,0 +1,82 @@
+// Copyright 2015, 2016 Ethcore (UK) Ltd.
+// This file is part of Parity.
+
+// Parity is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity. If not, see .
+
+
+//! This migration consolidates all databases into single one using Column Families.
+
+use util::{Rlp, RlpStream, View, Stream};
+use util::kvdb::Database;
+use util::migration::{Batch, Config, Error, Migration, Progress};
+
+/// Which part of block to preserve
+pub enum Extract {
+ /// Extract block header RLP.
+ Header,
+ /// Extract block body RLP.
+ Body,
+ /// Don't change the value.
+ All,
+}
+
+/// Consolidation of extras/block/state databases into single one.
+pub struct ToV9 {
+ progress: Progress,
+ column: Option,
+ extract: Extract,
+}
+
+impl ToV9 {
+ /// Creates new V9 migration and assigns all `(key,value)` pairs from `source` DB to given Column Family
+ pub fn new(column: Option, extract: Extract) -> Self {
+ ToV9 {
+ progress: Progress::default(),
+ column: column,
+ extract: extract,
+ }
+ }
+}
+
+impl Migration for ToV9 {
+
+ fn columns(&self) -> Option { Some(5) }
+
+ fn version(&self) -> u32 { 9 }
+
+ fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> {
+ let mut batch = Batch::new(config, self.column);
+
+ for (key, value) in source.iter(col) {
+ self.progress.tick();
+ match self.extract {
+ Extract::Header => {
+ try!(batch.insert(key.to_vec(), Rlp::new(&value).at(0).as_raw().to_vec(), dest))
+ },
+ Extract::Body => {
+ let mut body = RlpStream::new_list(2);
+ let block_rlp = Rlp::new(&value);
+ body.append_raw(block_rlp.at(1).as_raw(), 1);
+ body.append_raw(block_rlp.at(2).as_raw(), 1);
+ try!(batch.insert(key.to_vec(), body.out(), dest))
+ },
+ Extract::All => {
+ try!(batch.insert(key.to_vec(), value.to_vec(), dest))
+ }
+ }
+ }
+
+ batch.commit(dest)
+ }
+}
diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs
index 08ba2ca24..47d95722a 100644
--- a/ethcore/src/snapshot/mod.rs
+++ b/ethcore/src/snapshot/mod.rs
@@ -27,7 +27,8 @@ use error::Error;
use ids::BlockID;
use views::{BlockView, HeaderView};
-use util::{Bytes, Hashable, HashDB, JournalDB, snappy, TrieDB, TrieDBMut, TrieMut};
+use util::{Bytes, Hashable, HashDB, JournalDB, snappy, TrieDB, TrieDBMut, TrieMut, DBTransaction};
+use util::error::UtilError;
use util::hash::{FixedHash, H256};
use util::rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType};
@@ -359,7 +360,9 @@ impl StateRebuilder {
try!(rebuild_account_trie(db.as_hashdb_mut(), account_chunk, out_pairs_chunk));
// commit the db changes we made in this thread.
- try!(db.commit(0, &H256::zero(), None));
+ let batch = DBTransaction::new(&db.backing());
+ try!(db.commit(&batch, 0, &H256::zero(), None));
+ try!(db.backing().write(batch).map_err(UtilError::SimpleString));
Ok(())
});
@@ -388,7 +391,9 @@ impl StateRebuilder {
}
}
- try!(self.db.commit(0, &H256::zero(), None));
+ let batch = DBTransaction::new(&self.db.backing());
+ try!(self.db.commit(&batch, 0, &H256::zero(), None));
+ try!(self.db.backing().write(batch).map_err(|e| Error::Util(e.into())));
Ok(())
}
diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs
index fe717aa09..b88c773c2 100644
--- a/ethcore/src/tests/helpers.rs
+++ b/ethcore/src/tests/helpers.rs
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see .
-use client::{BlockChainClient, Client, ClientConfig};
+use client::{self, BlockChainClient, Client, ClientConfig};
use common::*;
use spec::*;
use block::{OpenBlock, Drain};
@@ -246,12 +246,23 @@ pub fn get_test_client_with_blocks(blocks: Vec) -> GuardedTempResult Arc {
+ Arc::new(
+ Database::open(&DatabaseConfig::with_columns(client::DB_NO_OF_COLUMNS), path)
+ .expect("Opening database for tests should always work.")
+ )
+}
+
pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult {
let temp = RandomTempPath::new();
- let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path());
+ let db = new_db(temp.as_str());
+ let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
+
+ let batch = db.transaction();
for block_order in 1..block_number {
- bc.insert_block(&create_unverifiable_block(block_order, bc.best_block_hash()), vec![]);
+ bc.insert_block(&batch, &create_unverifiable_block(block_order, bc.best_block_hash()), vec![]);
}
+ db.write(batch).unwrap();
GuardedTempResult:: {
_temp: temp,
@@ -261,10 +272,15 @@ pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult GuardedTempResult {
let temp = RandomTempPath::new();
- let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path());
+ let db = new_db(temp.as_str());
+ let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
+
+
+ let batch = db.transaction();
for block_order in 1..block_number {
- bc.insert_block(&create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]);
+ bc.insert_block(&batch, &create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]);
}
+ db.write(batch).unwrap();
GuardedTempResult:: {
_temp: temp,
@@ -274,7 +290,8 @@ pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempRes
pub fn generate_dummy_empty_blockchain() -> GuardedTempResult {
let temp = RandomTempPath::new();
- let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path());
+ let db = new_db(temp.as_str());
+ let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
GuardedTempResult:: {
_temp: temp,
@@ -284,7 +301,8 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult {
pub fn get_temp_journal_db() -> GuardedTempResult> {
let temp = RandomTempPath::new();
- let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default());
+ let journal_db = get_temp_journal_db_in(temp.as_path());
+
GuardedTempResult {
_temp: temp,
result: Some(journal_db)
@@ -294,6 +312,7 @@ pub fn get_temp_journal_db() -> GuardedTempResult> {
pub fn get_temp_state() -> GuardedTempResult {
let temp = RandomTempPath::new();
let journal_db = get_temp_journal_db_in(temp.as_path());
+
GuardedTempResult {
_temp: temp,
result: Some(State::new(journal_db, U256::from(0), Default::default())),
@@ -301,7 +320,8 @@ pub fn get_temp_state() -> GuardedTempResult {
}
pub fn get_temp_journal_db_in(path: &Path) -> Box {
- journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default())
+ let db = new_db(path.to_str().expect("Only valid utf8 paths for tests."));
+ journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, None)
}
pub fn get_temp_state_in(path: &Path) -> State {
diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs
index 8fb0f531f..c0dab5d17 100644
--- a/ethcore/src/trace/db.rs
+++ b/ethcore/src/trace/db.rs
@@ -18,15 +18,15 @@
use std::ops::{Deref, DerefMut};
use std::collections::HashMap;
use std::sync::Arc;
-use std::path::Path;
use bloomchain::{Number, Config as BloomConfig};
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
-use util::{H256, H264, Database, DatabaseConfig, DBTransaction, RwLock};
+use util::{H256, H264, Database, DBTransaction, RwLock};
use header::BlockNumber;
use trace::{LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras, Error};
use db::{Key, Writable, Readable, CacheUpdatePolicy};
use blooms;
use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
+use client::DB_COL_TRACE;
const TRACE_DB_VER: &'static [u8] = b"1.0";
@@ -94,7 +94,7 @@ pub struct TraceDB where T: DatabaseExtras {
traces: RwLock>,
blooms: RwLock>,
// db
- tracesdb: Database,
+ tracesdb: Arc,
// config,
bloom_config: BloomConfig,
// tracing enabled
@@ -106,24 +106,15 @@ pub struct TraceDB where T: DatabaseExtras {
impl BloomGroupDatabase for TraceDB where T: DatabaseExtras {
fn blooms_at(&self, position: &GroupPosition) -> Option {
let position = TraceGroupPosition::from(position.clone());
- self.tracesdb.read_with_cache(&self.blooms, &position).map(Into::into)
+ self.tracesdb.read_with_cache(DB_COL_TRACE, &self.blooms, &position).map(Into::into)
}
}
impl TraceDB where T: DatabaseExtras {
/// Creates new instance of `TraceDB`.
- pub fn new(config: Config, path: &Path, extras: Arc) -> Result {
- let mut tracedb_path = path.to_path_buf();
- tracedb_path.push("tracedb");
- let tracesdb = match config.db_cache_size {
- None => Database::open_default(tracedb_path.to_str().unwrap()).unwrap(),
- Some(db_cache) => Database::open(
- &DatabaseConfig::with_cache(db_cache),
- tracedb_path.to_str().unwrap()).unwrap(),
- };
-
+ pub fn new(config: Config, tracesdb: Arc, extras: Arc) -> Result {
// check if in previously tracing was enabled
- let old_tracing = match tracesdb.get(b"enabled").unwrap() {
+ let old_tracing = match tracesdb.get(DB_COL_TRACE, b"enabled").unwrap() {
Some(ref value) if value as &[u8] == &[0x1] => Switch::On,
Some(ref value) if value as &[u8] == &[0x0] => Switch::Off,
Some(_) => { panic!("tracesdb is corrupted") },
@@ -137,8 +128,10 @@ impl TraceDB where T: DatabaseExtras {
false => [0x0]
};
- tracesdb.put(b"enabled", &encoded_tracing).unwrap();
- tracesdb.put(b"version", TRACE_DB_VER).unwrap();
+ let batch = DBTransaction::new(&tracesdb);
+ batch.put(DB_COL_TRACE, b"enabled", &encoded_tracing).unwrap();
+ batch.put(DB_COL_TRACE, b"version", TRACE_DB_VER).unwrap();
+ tracesdb.write(batch).unwrap();
let db = TraceDB {
traces: RwLock::new(HashMap::new()),
@@ -154,7 +147,7 @@ impl TraceDB where T: DatabaseExtras {
/// Returns traces for block with hash.
fn traces(&self, block_hash: &H256) -> Option {
- self.tracesdb.read_with_cache(&self.traces, block_hash)
+ self.tracesdb.read_with_cache(DB_COL_TRACE, &self.traces, block_hash)
}
/// Returns vector of transaction traces for given block.
@@ -217,20 +210,18 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras {
/// Traces of import request's enacted blocks are expected to be already in database
/// or to be the currently inserted trace.
- fn import(&self, request: ImportRequest) {
+ fn import(&self, batch: &DBTransaction, request: ImportRequest) {
// fast return if tracing is disabled
if !self.tracing_enabled() {
return;
}
- let batch = DBTransaction::new();
-
// at first, let's insert new block traces
{
let mut traces = self.traces.write();
// it's important to use overwrite here,
// cause this value might be queried by hash later
- batch.write_with_cache(traces.deref_mut(), request.block_hash, request.traces, CacheUpdatePolicy::Overwrite);
+ batch.write_with_cache(DB_COL_TRACE, traces.deref_mut(), request.block_hash, request.traces, CacheUpdatePolicy::Overwrite);
}
// now let's rebuild the blooms
@@ -256,10 +247,8 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras {
.collect::>();
let mut blooms = self.blooms.write();
- batch.extend_with_cache(blooms.deref_mut(), blooms_to_insert, CacheUpdatePolicy::Remove);
+ batch.extend_with_cache(DB_COL_TRACE, blooms.deref_mut(), blooms_to_insert, CacheUpdatePolicy::Remove);
}
-
- self.tracesdb.write(batch).unwrap();
}
fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec) -> Option {
@@ -362,13 +351,14 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras {
mod tests {
use std::collections::HashMap;
use std::sync::Arc;
- use util::{Address, U256, H256};
+ use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction};
use devtools::RandomTempPath;
use header::BlockNumber;
- use trace::{Config, Switch, TraceDB, Database, DatabaseExtras, ImportRequest};
+ use trace::{Config, Switch, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest};
use trace::{Filter, LocalizedTrace, AddressesFilter};
use trace::trace::{Call, Action, Res};
use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
+ use client::DB_NO_OF_COLUMNS;
use types::executed::CallType;
struct NoopExtras;
@@ -408,28 +398,33 @@ mod tests {
}
}
+ fn new_db(path: &str) -> Arc {
+ Arc::new(Database::open(&DatabaseConfig::with_columns(DB_NO_OF_COLUMNS), path).unwrap())
+ }
+
#[test]
fn test_reopening_db_with_tracing_off() {
let temp = RandomTempPath::new();
+ let db = new_db(temp.as_str());
let mut config = Config::default();
// set autotracing
config.enabled = Switch::Auto;
{
- let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
+ let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false);
}
{
- let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
+ let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false);
}
config.enabled = Switch::Off;
{
- let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
+ let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false);
}
}
@@ -437,32 +432,33 @@ mod tests {
#[test]
fn test_reopening_db_with_tracing_on() {
let temp = RandomTempPath::new();
+ let db = new_db(temp.as_str());
let mut config = Config::default();
// set tracing on
config.enabled = Switch::On;
{
- let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
+ let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
{
- let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
+ let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
config.enabled = Switch::Auto;
{
- let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
+ let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
config.enabled = Switch::Off;
{
- let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
+ let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false);
}
}
@@ -471,18 +467,19 @@ mod tests {
#[should_panic]
fn test_invalid_reopening_db() {
let temp = RandomTempPath::new();
+ let db = new_db(temp.as_str());
let mut config = Config::default();
// set tracing on
config.enabled = Switch::Off;
{
- let tracedb = TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap();
+ let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
config.enabled = Switch::On;
- TraceDB::new(config.clone(), temp.as_path(), Arc::new(NoopExtras)).unwrap(); // should panic!
+ TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); // should panic!
}
fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest {
@@ -531,6 +528,7 @@ mod tests {
#[test]
fn test_import() {
let temp = RandomTempPath::new();
+ let db = Arc::new(Database::open(&DatabaseConfig::with_columns(DB_NO_OF_COLUMNS), temp.as_str()).unwrap());
let mut config = Config::default();
config.enabled = Switch::On;
let block_0 = H256::from(0xa1);
@@ -544,11 +542,13 @@ mod tests {
extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
extras.transaction_hashes.insert(1, vec![tx_1.clone()]);
- let tracedb = TraceDB::new(config, temp.as_path(), Arc::new(extras)).unwrap();
+ let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)).unwrap();
// import block 0
let request = create_simple_import_request(0, block_0.clone());
- tracedb.import(request);
+ let batch = DBTransaction::new(&db);
+ tracedb.import(&batch, request);
+ db.write(batch).unwrap();
let filter = Filter {
range: (0..0),
@@ -562,7 +562,9 @@ mod tests {
// import block 1
let request = create_simple_import_request(1, block_1.clone());
- tracedb.import(request);
+ let batch = DBTransaction::new(&db);
+ tracedb.import(&batch, request);
+ db.write(batch).unwrap();
let filter = Filter {
range: (0..1),
diff --git a/ethcore/src/trace/mod.rs b/ethcore/src/trace/mod.rs
index 3a3e24192..277227729 100644
--- a/ethcore/src/trace/mod.rs
+++ b/ethcore/src/trace/mod.rs
@@ -35,7 +35,7 @@ pub use self::executive_tracer::{ExecutiveTracer, ExecutiveVMTracer};
pub use types::trace_types::filter::{Filter, AddressesFilter};
pub use self::import::ImportRequest;
pub use self::localized::LocalizedTrace;
-use util::{Bytes, Address, U256, H256};
+use util::{Bytes, Address, U256, H256, DBTransaction};
use self::trace::{Call, Create};
use action_params::ActionParams;
use header::BlockNumber;
@@ -121,7 +121,7 @@ pub trait Database {
fn tracing_enabled(&self) -> bool;
/// Imports new block traces.
- fn import(&self, request: ImportRequest);
+ fn import(&self, batch: &DBTransaction, request: ImportRequest);
/// Returns localized trace at given position.
fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec) -> Option;
diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs
index f0f67c4c2..ed094a1d2 100644
--- a/ethcore/src/verification/verification.rs
+++ b/ethcore/src/verification/verification.rs
@@ -287,6 +287,14 @@ mod tests {
self.blocks.get(hash).cloned()
}
+ fn block_header_data(&self, hash: &H256) -> Option {
+ self.block(hash).map(|b| BlockView::new(&b).header_rlp().as_raw().to_vec())
+ }
+
+ fn block_body(&self, hash: &H256) -> Option {
+ self.block(hash).map(|b| BlockChain::block_to_body(&b))
+ }
+
/// Get the familial details concerning a block.
fn block_details(&self, hash: &H256) -> Option {
self.blocks.get(hash).map(|bytes| {
diff --git a/ethcore/src/views/block.rs b/ethcore/src/views/block.rs
index 42fd52a20..fdcae383b 100644
--- a/ethcore/src/views/block.rs
+++ b/ethcore/src/views/block.rs
@@ -56,6 +56,11 @@ impl<'a> BlockView<'a> {
self.rlp.val_at(0)
}
+ /// Return header rlp.
+ pub fn header_rlp(&self) -> Rlp {
+ self.rlp.at(0)
+ }
+
/// Create new header view obto block head rlp.
pub fn header_view(&self) -> HeaderView<'a> {
HeaderView::new_from_rlp(self.rlp.at(0))
diff --git a/ethcore/src/views/body.rs b/ethcore/src/views/body.rs
new file mode 100644
index 000000000..8f1295f31
--- /dev/null
+++ b/ethcore/src/views/body.rs
@@ -0,0 +1,144 @@
+// Copyright 2015, 2016 Ethcore (UK) Ltd.
+// This file is part of Parity.
+
+// Parity is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity. If not, see .
+
+//! View onto block body rlp.
+
+use util::*;
+use header::*;
+use transaction::*;
+use super::{TransactionView, HeaderView};
+
+/// View onto block rlp.
+pub struct BodyView<'a> {
+ rlp: Rlp<'a>
+}
+
+impl<'a> BodyView<'a> {
+ /// Creates new view onto block from raw bytes.
+ pub fn new(bytes: &'a [u8]) -> BodyView<'a> {
+ BodyView {
+ rlp: Rlp::new(bytes)
+ }
+ }
+
+ /// Creates new view onto block from rlp.
+ pub fn new_from_rlp(rlp: Rlp<'a>) -> BodyView<'a> {
+ BodyView {
+ rlp: rlp
+ }
+ }
+
+ /// Return reference to underlaying rlp.
+ pub fn rlp(&self) -> &Rlp<'a> {
+ &self.rlp
+ }
+
+ /// Return List of transactions in given block.
+ pub fn transactions(&self) -> Vec {
+ self.rlp.val_at(0)
+ }
+
+ /// Return List of transactions with additional localization info.
+ pub fn localized_transactions(&self, block_hash: &H256, block_number: BlockNumber) -> Vec {
+ self.transactions()
+ .into_iter()
+ .enumerate()
+ .map(|(i, t)| LocalizedTransaction {
+ signed: t,
+ block_hash: block_hash.clone(),
+ block_number: block_number,
+ transaction_index: i
+ }).collect()
+ }
+
+ /// Return number of transactions in given block, without deserializing them.
+ pub fn transactions_count(&self) -> usize {
+ self.rlp.at(0).item_count()
+ }
+
+ /// Return List of transactions in given block.
+ pub fn transaction_views(&self) -> Vec {
+ self.rlp.at(0).iter().map(TransactionView::new_from_rlp).collect()
+ }
+
+ /// Return transaction hashes.
+ pub fn transaction_hashes(&self) -> Vec {
+ self.rlp.at(0).iter().map(|rlp| rlp.as_raw().sha3()).collect()
+ }
+
+ /// Returns transaction at given index without deserializing unnecessary data.
+ pub fn transaction_at(&self, index: usize) -> Option {
+ self.rlp.at(0).iter().nth(index).map(|rlp| rlp.as_val())
+ }
+
+ /// Returns localized transaction at given index.
+ pub fn localized_transaction_at(&self, block_hash: &H256, block_number: BlockNumber, index: usize) -> Option {
+ self.transaction_at(index).map(|t| LocalizedTransaction {
+ signed: t,
+ block_hash: block_hash.clone(),
+ block_number: block_number,
+ transaction_index: index
+ })
+ }
+
+ /// Return list of uncles of given block.
+ pub fn uncles(&self) -> Vec {
+ self.rlp.val_at(1)
+ }
+
+ /// Return number of uncles in given block, without deserializing them.
+ pub fn uncles_count(&self) -> usize {
+ self.rlp.at(1).item_count()
+ }
+
+ /// Return List of transactions in given block.
+ pub fn uncle_views(&self) -> Vec {
+ self.rlp.at(1).iter().map(HeaderView::new_from_rlp).collect()
+ }
+
+ /// Return list of uncle hashes of given block.
+ pub fn uncle_hashes(&self) -> Vec {
+ self.rlp.at(1).iter().map(|rlp| rlp.as_raw().sha3()).collect()
+ }
+
+ /// Return nth uncle.
+ pub fn uncle_at(&self, index: usize) -> Option {
+ self.rlp.at(1).iter().nth(index).map(|rlp| rlp.as_val())
+ }
+
+ /// Return nth uncle rlp.
+ pub fn uncle_rlp_at(&self, index: usize) -> Option {
+ self.rlp.at(1).iter().nth(index).map(|rlp| rlp.as_raw().to_vec())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use util::*;
+ use super::BodyView;
+ use blockchain::BlockChain;
+
+ #[test]
+ fn test_block_view() {
+ // that's rlp of block created with ethash engine.
+ let rlp = "f90261f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23f862f86002018304cb2f94ec0e71ad0a90ffe1909d27dac207f7680abba42d01801ba03a347e72953c860f32b1eb2c78a680d8734b2ea08085d949d729479796f218d5a047ea6239d9e31ccac8af3366f5ca37184d26e7646e3191a3aeb81c4cf74de500c0".from_hex().unwrap();
+ let body = BlockChain::block_to_body(&rlp);
+ let view = BodyView::new(&body);
+ assert_eq!(view.transactions_count(), 1);
+ assert_eq!(view.uncles_count(), 0);
+ }
+}
+
diff --git a/ethcore/src/views/mod.rs b/ethcore/src/views/mod.rs
index c0102be3d..e8267e15a 100644
--- a/ethcore/src/views/mod.rs
+++ b/ethcore/src/views/mod.rs
@@ -19,7 +19,9 @@
mod block;
mod header;
mod transaction;
+mod body;
pub use self::block::BlockView;
pub use self::header::HeaderView;
+pub use self::body::BodyView;
pub use self::transaction::TransactionView;
diff --git a/evmbin/Cargo.lock b/evmbin/Cargo.lock
index 14c6d9bcb..f135b3b0b 100644
--- a/evmbin/Cargo.lock
+++ b/evmbin/Cargo.lock
@@ -159,6 +159,7 @@ name = "ethash"
version = "1.3.0"
dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"sha3 0.1.0",
]
@@ -250,8 +251,9 @@ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)",
"nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)",
+ "rocksdb 0.4.5",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -301,6 +303,7 @@ dependencies = [
"serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -622,6 +625,17 @@ name = "odds"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
+[[package]]
+name = "parking_lot"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
[[package]]
name = "primal"
version = "0.2.3"
@@ -724,16 +738,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rocksdb"
version = "0.4.5"
-source = "git+https://github.com/ethcore/rust-rocksdb#9be41e05923616dfa28741c58b22776d479751e6"
dependencies = [
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)",
+ "rocksdb-sys 0.3.0",
]
[[package]]
name = "rocksdb-sys"
version = "0.3.0"
-source = "git+https://github.com/ethcore/rust-rocksdb#9be41e05923616dfa28741c58b22776d479751e6"
dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -831,6 +843,11 @@ name = "slab"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+[[package]]
+name = "smallvec"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
[[package]]
name = "solicit"
version = "0.4.4"
diff --git a/parity/dir.rs b/parity/dir.rs
index e1400e8e8..bb92e1277 100644
--- a/parity/dir.rs
+++ b/parity/dir.rs
@@ -52,13 +52,20 @@ impl Directories {
Ok(())
}
- /// Get the path for the databases given the root path and information on the databases.
- pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
+ /// Get the root path for database
+ pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
let mut dir = Path::new(&self.db).to_path_buf();
dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default()));
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
dir
}
+
+ /// Get the path for the databases given the genesis_hash and information on the databases.
+ pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
+ let mut dir = self.db_version_path(genesis_hash, fork_name, pruning);
+ dir.push("db");
+ dir
+ }
}
#[cfg(test)]
diff --git a/parity/helpers.rs b/parity/helpers.rs
index 76d8250e5..881dd9c8f 100644
--- a/parity/helpers.rs
+++ b/parity/helpers.rs
@@ -238,7 +238,7 @@ pub fn execute_upgrades(
_ => {},
}
- let client_path = dirs.client_path(genesis_hash, fork_name, pruning);
+ let client_path = dirs.db_version_path(genesis_hash, fork_name, pruning);
migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e))
}
diff --git a/parity/migration.rs b/parity/migration.rs
index 33a000c56..cbfa78ccc 100644
--- a/parity/migration.rs
+++ b/parity/migration.rs
@@ -20,14 +20,18 @@ use std::io::{Read, Write, Error as IoError, ErrorKind};
use std::path::{Path, PathBuf};
use std::fmt::{Display, Formatter, Error as FmtError};
use util::journaldb::Algorithm;
-use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError};
-use util::kvdb::CompactionProfile;
+use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError, Migration};
+use util::kvdb::{CompactionProfile, Database, DatabaseConfig};
use ethcore::migrations;
+use ethcore::client;
+use ethcore::migrations::Extract;
/// Database is assumed to be at default version, when no version file is found.
const DEFAULT_VERSION: u32 = 5;
/// Current version of database models.
-const CURRENT_VERSION: u32 = 8;
+const CURRENT_VERSION: u32 = 9;
+/// First version of the consolidated database.
+const CONSOLIDATION_VERSION: u32 = 9;
/// Defines how many items are migrated to the new version of database at once.
const BATCH_SIZE: usize = 1024;
/// Version file name.
@@ -111,27 +115,13 @@ fn update_version(path: &Path) -> Result<(), Error> {
Ok(())
}
-/// State database path.
-fn state_database_path(path: &Path) -> PathBuf {
+/// Consolidated database path
+fn consolidated_database_path(path: &Path) -> PathBuf {
let mut state_path = path.to_owned();
- state_path.push("state");
+ state_path.push("db");
state_path
}
-/// Blocks database path.
-fn blocks_database_path(path: &Path) -> PathBuf {
- let mut blocks_path = path.to_owned();
- blocks_path.push("blocks");
- blocks_path
-}
-
-/// Extras database path.
-fn extras_database_path(path: &Path) -> PathBuf {
- let mut extras_path = path.to_owned();
- extras_path.push("extras");
- extras_path
-}
-
/// Database backup
fn backup_database_path(path: &Path) -> PathBuf {
let mut backup_path = path.to_owned();
@@ -141,40 +131,55 @@ fn backup_database_path(path: &Path) -> PathBuf {
}
/// Default migration settings.
-fn default_migration_settings(compaction_profile: CompactionProfile) -> MigrationConfig {
+pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> MigrationConfig {
MigrationConfig {
batch_size: BATCH_SIZE,
- compaction_profile: compaction_profile,
+ compaction_profile: *compaction_profile,
}
}
-/// Migrations on the blocks database.
-fn blocks_database_migrations(compaction_profile: CompactionProfile) -> Result {
- let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
- try!(manager.add_migration(migrations::blocks::V8::default()).map_err(|_| Error::MigrationImpossible));
+/// Migrations on the consolidated database.
+fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result {
+ let manager = MigrationManager::new(default_migration_settings(compaction_profile));
Ok(manager)
}
-/// Migrations on the extras database.
-fn extras_database_migrations(compaction_profile: CompactionProfile) -> Result {
- let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
- try!(manager.add_migration(migrations::extras::ToV6).map_err(|_| Error::MigrationImpossible));
- Ok(manager)
-}
+/// Consolidates legacy databases into single one.
+fn consolidate_database(
+ old_db_path: PathBuf,
+ new_db_path: PathBuf,
+ column: Option,
+ extract: Extract,
+ compaction_profile: &CompactionProfile) -> Result<(), Error> {
+ fn db_error(e: String) -> Error {
+ warn!("Cannot open Database for consolidation: {:?}", e);
+ Error::MigrationFailed
+ }
-/// Migrations on the state database.
-fn state_database_migrations(pruning: Algorithm, compaction_profile: CompactionProfile) -> Result {
- let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
- let res = match pruning {
- Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()),
- Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()),
- _ => return Err(Error::UnsuportedPruningMethod),
+ let mut migration = migrations::ToV9::new(column, extract);
+ let config = default_migration_settings(compaction_profile);
+ let mut db_config = DatabaseConfig {
+ max_open_files: 64,
+ cache_size: None,
+ compaction: config.compaction_profile.clone(),
+ columns: None,
};
- try!(res.map_err(|_| Error::MigrationImpossible));
- Ok(manager)
+ let old_path_str = try!(old_db_path.to_str().ok_or(Error::MigrationImpossible));
+ let new_path_str = try!(new_db_path.to_str().ok_or(Error::MigrationImpossible));
+
+ let cur_db = try!(Database::open(&db_config, old_path_str).map_err(db_error));
+ // open new DB with proper number of columns
+ db_config.columns = migration.columns();
+ let mut new_db = try!(Database::open(&db_config, new_path_str).map_err(db_error));
+
+ // Migrate to new database (default column only)
+ try!(migration.migrate(&cur_db, &config, &mut new_db, None));
+
+ Ok(())
}
+
/// Migrates database at given position with given migration rules.
fn migrate_database(version: u32, db_path: PathBuf, mut migrations: MigrationManager) -> Result<(), Error> {
// check if migration is needed
@@ -216,17 +221,108 @@ pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionPr
// migrate the databases.
// main db directory may already exists, so let's check if we have blocks dir
- if version < CURRENT_VERSION && exists(&blocks_database_path(path)) {
- println!("Migrating database from version {} to {}", version, CURRENT_VERSION);
- try!(migrate_database(version, blocks_database_path(path), try!(blocks_database_migrations(compaction_profile.clone()))));
- try!(migrate_database(version, extras_database_path(path), try!(extras_database_migrations(compaction_profile.clone()))));
- try!(migrate_database(version, state_database_path(path), try!(state_database_migrations(pruning, compaction_profile))));
- println!("Migration finished");
- } else if version > CURRENT_VERSION {
+ if version > CURRENT_VERSION {
return Err(Error::FutureDBVersion);
}
+ // We are in the latest version, yay!
+ if version == CURRENT_VERSION {
+ return Ok(())
+ }
+
+ // Perform pre-consolidation migrations
+ if version < CONSOLIDATION_VERSION && exists(&legacy::blocks_database_path(path)) {
+ println!("Migrating database from version {} to {}", version, CONSOLIDATION_VERSION);
+ try!(migrate_database(version, legacy::blocks_database_path(path), try!(legacy::blocks_database_migrations(&compaction_profile))));
+ try!(migrate_database(version, legacy::extras_database_path(path), try!(legacy::extras_database_migrations(&compaction_profile))));
+ try!(migrate_database(version, legacy::state_database_path(path), try!(legacy::state_database_migrations(pruning, &compaction_profile))));
+ let db_path = consolidated_database_path(path);
+ // Remove the database dir (it shouldn't exist anyway, but it might when migration was interrupted)
+ let _ = fs::remove_dir_all(db_path.clone());
+ try!(consolidate_database(legacy::blocks_database_path(path), db_path.clone(), client::DB_COL_HEADERS, Extract::Header, &compaction_profile));
+ try!(consolidate_database(legacy::blocks_database_path(path), db_path.clone(), client::DB_COL_BODIES, Extract::Header, &compaction_profile));
+ try!(consolidate_database(legacy::extras_database_path(path), db_path.clone(), client::DB_COL_EXTRA, Extract::All, &compaction_profile));
+ try!(consolidate_database(legacy::state_database_path(path), db_path.clone(), client::DB_COL_STATE, Extract::All, &compaction_profile));
+ try!(consolidate_database(legacy::trace_database_path(path), db_path.clone(), client::DB_COL_TRACE, Extract::All, &compaction_profile));
+ let _ = fs::remove_dir_all(legacy::blocks_database_path(path));
+ let _ = fs::remove_dir_all(legacy::extras_database_path(path));
+ let _ = fs::remove_dir_all(legacy::state_database_path(path));
+ let _ = fs::remove_dir_all(legacy::trace_database_path(path));
+ println!("Migration finished");
+ }
+
+ // Further migrations
+ if version >= CONSOLIDATION_VERSION && version < CURRENT_VERSION && exists(&consolidated_database_path(path)) {
+ println!("Migrating database from version {} to {}", ::std::cmp::max(CONSOLIDATION_VERSION, version), CURRENT_VERSION);
+ try!(migrate_database(version, consolidated_database_path(path), try!(consolidated_database_migrations(&compaction_profile))));
+ println!("Migration finished");
+ }
+
// update version file.
update_version(path)
}
+/// Old migrations utilities
+mod legacy {
+ use super::*;
+ use std::path::{Path, PathBuf};
+ use util::journaldb::Algorithm;
+ use util::migration::{Manager as MigrationManager};
+ use util::kvdb::CompactionProfile;
+ use ethcore::migrations;
+
+ /// Blocks database path.
+ pub fn blocks_database_path(path: &Path) -> PathBuf {
+ let mut blocks_path = path.to_owned();
+ blocks_path.push("blocks");
+ blocks_path
+ }
+
+ /// Extras database path.
+ pub fn extras_database_path(path: &Path) -> PathBuf {
+ let mut extras_path = path.to_owned();
+ extras_path.push("extras");
+ extras_path
+ }
+
+ /// State database path.
+ pub fn state_database_path(path: &Path) -> PathBuf {
+ let mut state_path = path.to_owned();
+ state_path.push("state");
+ state_path
+ }
+
+ /// Trace database path.
+ pub fn trace_database_path(path: &Path) -> PathBuf {
+ let mut blocks_path = path.to_owned();
+ blocks_path.push("tracedb");
+ blocks_path
+ }
+
+ /// Migrations on the blocks database.
+ pub fn blocks_database_migrations(compaction_profile: &CompactionProfile) -> Result {
+ let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
+ try!(manager.add_migration(migrations::blocks::V8::default()).map_err(|_| Error::MigrationImpossible));
+ Ok(manager)
+ }
+
+ /// Migrations on the extras database.
+ pub fn extras_database_migrations(compaction_profile: &CompactionProfile) -> Result {
+ let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
+ try!(manager.add_migration(migrations::extras::ToV6).map_err(|_| Error::MigrationImpossible));
+ Ok(manager)
+ }
+
+ /// Migrations on the state database.
+ pub fn state_database_migrations(pruning: Algorithm, compaction_profile: &CompactionProfile) -> Result {
+ let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
+ let res = match pruning {
+ Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()),
+ Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()),
+ _ => return Err(Error::UnsuportedPruningMethod),
+ };
+
+ try!(res.map_err(|_| Error::MigrationImpossible));
+ Ok(manager)
+ }
+}
diff --git a/parity/params.rs b/parity/params.rs
index c48afa37d..6e105f524 100644
--- a/parity/params.rs
+++ b/parity/params.rs
@@ -15,9 +15,11 @@
// along with Parity. If not, see .
use std::str::FromStr;
+use std::sync::Arc;
use std::time::Duration;
-use util::{contents, DatabaseConfig, journaldb, H256, Address, U256, version_data};
+use util::{contents, Database, DatabaseConfig, journaldb, H256, Address, U256, version_data};
use util::journaldb::Algorithm;
+use ethcore::client;
use ethcore::spec::Spec;
use ethcore::ethereum;
use ethcore::miner::{GasPricer, GasPriceCalibratorOptions};
@@ -103,11 +105,15 @@ impl Pruning {
algo_types.push(Algorithm::default());
algo_types.into_iter().max_by_key(|i| {
- let mut client_path = dirs.client_path(genesis_hash, fork_name, *i);
- client_path.push("state");
- let db = journaldb::new(client_path.to_str().unwrap(), *i, DatabaseConfig::default());
+ let client_path = dirs.client_path(genesis_hash, fork_name, *i);
+ let config = DatabaseConfig::with_columns(client::DB_NO_OF_COLUMNS);
+ let db = match Database::open(&config, client_path.to_str().unwrap()) {
+ Ok(db) => db,
+ Err(_) => return 0,
+ };
+ let db = journaldb::new(Arc::new(db), *i, client::DB_COL_STATE);
trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era());
- db.latest_era()
+ db.latest_era().unwrap_or(0)
}).unwrap()
}
}
diff --git a/test.sh b/test.sh
index af51fd3fd..18a7bb6b6 100755
--- a/test.sh
+++ b/test.sh
@@ -14,5 +14,5 @@ case $1 in
esac
. ./scripts/targets.sh
-cargo test --release --verbose $FEATURES $TARGETS $1 \
+cargo test --release $FEATURES $TARGETS $1 \
diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs
index 4bf377cf7..b8c5d1664 100644
--- a/util/src/journaldb/archivedb.rs
+++ b/util/src/journaldb/archivedb.rs
@@ -20,9 +20,9 @@ use common::*;
use rlp::*;
use hashdb::*;
use memorydb::*;
-use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, VERSION_KEY};
+use super::{DB_PREFIX_LEN, LATEST_ERA_KEY};
use super::traits::JournalDB;
-use kvdb::{Database, DBTransaction, DatabaseConfig};
+use kvdb::{Database, DBTransaction};
#[cfg(test)]
use std::env;
@@ -30,9 +30,6 @@ use std::env;
/// Would be nich to use rocksdb columns for this eventually.
const AUX_FLAG: u8 = 255;
-/// Database version.
-const DB_VERSION : u32 = 0x103;
-
/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay
/// and latent-removal semantics.
///
@@ -44,28 +41,18 @@ pub struct ArchiveDB {
overlay: MemoryDB,
backing: Arc,
latest_era: Option,
+ column: Option,
}
impl ArchiveDB {
/// Create a new instance from file
- pub fn new(path: &str, config: DatabaseConfig) -> ArchiveDB {
- let backing = Database::open(&config, path).unwrap_or_else(|e| {
- panic!("Error opening state db: {}", e);
- });
- if !backing.is_empty() {
- match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) {
- Ok(Some(DB_VERSION)) => {},
- v => panic!("Incompatible DB version, expected {}, got {:?}; to resolve, remove {} and restart.", DB_VERSION, v, path)
- }
- } else {
- backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database");
- }
-
- let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val));
+ pub fn new(backing: Arc, col: Option) -> ArchiveDB {
+ let latest_era = backing.get(col, &LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val));
ArchiveDB {
overlay: MemoryDB::new(),
- backing: Arc::new(backing),
+ backing: backing,
latest_era: latest_era,
+ column: col,
}
}
@@ -74,18 +61,19 @@ impl ArchiveDB {
fn new_temp() -> ArchiveDB {
let mut dir = env::temp_dir();
dir.push(H32::random().hex());
- Self::new(dir.to_str().unwrap(), DatabaseConfig::default())
+ let backing = Arc::new(Database::open_default(dir.to_str().unwrap()).unwrap());
+ Self::new(backing, None)
}
fn payload(&self, key: &H256) -> Option