Extract the hard dependency on rocksdb from the light client (#8034)

* Extract the hard dependency on rocksdb from the light client

* Remove TODO
This commit is contained in:
Pierre Krieger 2018-03-01 19:53:15 +01:00 committed by Rando
parent ca0d1f5eb7
commit d4205da484
11 changed files with 59 additions and 78 deletions

1
Cargo.lock generated
View File

@ -553,7 +553,6 @@ dependencies = [
"keccak-hash 0.1.0", "keccak-hash 0.1.0",
"kvdb 0.1.0", "kvdb 0.1.0",
"kvdb-memorydb 0.1.0", "kvdb-memorydb 0.1.0",
"kvdb-rocksdb 0.1.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"memory-cache 0.1.0", "memory-cache 0.1.0",
"memorydb 0.1.1", "memorydb 0.1.1",

View File

@ -69,9 +69,9 @@ keccak-hash = { path = "../util/hash" }
triehash = { path = "../util/triehash" } triehash = { path = "../util/triehash" }
unexpected = { path = "../util/unexpected" } unexpected = { path = "../util/unexpected" }
journaldb = { path = "../util/journaldb" } journaldb = { path = "../util/journaldb" }
tempdir = "0.3"
[dev-dependencies] [dev-dependencies]
tempdir = "0.3"
trie-standardmap = { path = "../util/trie-standardmap" } trie-standardmap = { path = "../util/trie-standardmap" }
[features] [features]

View File

@ -35,11 +35,10 @@ stats = { path = "../../util/stats" }
keccak-hash = { path = "../../util/hash" } keccak-hash = { path = "../../util/hash" }
triehash = { path = "../../util/triehash" } triehash = { path = "../../util/triehash" }
kvdb = { path = "../../util/kvdb" } kvdb = { path = "../../util/kvdb" }
kvdb-rocksdb = { path = "../../util/kvdb-rocksdb" }
kvdb-memorydb = { path = "../../util/kvdb-memorydb" }
memory-cache = { path = "../../util/memory_cache" } memory-cache = { path = "../../util/memory_cache" }
[dev-dependencies] [dev-dependencies]
kvdb-memorydb = { path = "../../util/kvdb-memorydb" }
tempdir = "0.3" tempdir = "0.3"
[features] [features]

View File

@ -36,7 +36,6 @@ use ethereum_types::{H256, U256};
use futures::{IntoFuture, Future}; use futures::{IntoFuture, Future};
use kvdb::{self, KeyValueDB}; use kvdb::{self, KeyValueDB};
use kvdb_rocksdb::CompactionProfile;
use self::fetch::ChainDataFetcher; use self::fetch::ChainDataFetcher;
use self::header_chain::{AncestryIter, HeaderChain}; use self::header_chain::{AncestryIter, HeaderChain};
@ -57,12 +56,6 @@ pub struct Config {
pub queue: queue::Config, pub queue: queue::Config,
/// Chain column in database. /// Chain column in database.
pub chain_column: Option<u32>, pub chain_column: Option<u32>,
/// Database cache size. `None` => rocksdb default.
pub db_cache_size: Option<usize>,
/// State db compaction profile
pub db_compaction: CompactionProfile,
/// Should db have WAL enabled?
pub db_wal: bool,
/// Should it do full verification of blocks? /// Should it do full verification of blocks?
pub verify_full: bool, pub verify_full: bool,
/// Should it check the seal of blocks? /// Should it check the seal of blocks?
@ -74,9 +67,6 @@ impl Default for Config {
Config { Config {
queue: Default::default(), queue: Default::default(),
chain_column: None, chain_column: None,
db_cache_size: None,
db_compaction: CompactionProfile::default(),
db_wal: true,
verify_full: true, verify_full: true,
check_seal: true, check_seal: true,
} }
@ -205,28 +195,6 @@ impl<T: ChainDataFetcher> Client<T> {
self.listeners.write().push(listener); self.listeners.write().push(listener);
} }
/// Create a new `Client` backed purely in-memory.
/// This will ignore all database options in the configuration.
pub fn in_memory(
config: Config,
spec: &Spec,
fetcher: T,
io_channel: IoChannel<ClientIoMessage>,
cache: Arc<Mutex<Cache>>
) -> Self {
let db = ::kvdb_memorydb::create(0);
Client::new(
config,
Arc::new(db),
None,
spec,
fetcher,
io_channel,
cache
).expect("New DB creation infallible; qed")
}
/// Import a header to the queue for additional verification. /// Import a header to the queue for additional verification.
pub fn import_header(&self, header: Header) -> Result<H256, BlockImportError> { pub fn import_header(&self, header: Header) -> Result<H256, BlockImportError> {
self.queue.import(header).map_err(Into::into) self.queue.import(header).map_err(Into::into)

View File

@ -18,15 +18,13 @@
//! Just handles block import messages and passes them to the client. //! Just handles block import messages and passes them to the client.
use std::fmt; use std::fmt;
use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use ethcore::db; use ethcore::db;
use ethcore::service::ClientIoMessage; use ethcore::service::ClientIoMessage;
use ethcore::spec::Spec; use ethcore::spec::Spec;
use io::{IoContext, IoError, IoHandler, IoService}; use io::{IoContext, IoError, IoHandler, IoService};
use kvdb; use kvdb::{self, KeyValueDB};
use kvdb_rocksdb::{Database, DatabaseConfig};
use cache::Cache; use cache::Cache;
use parking_lot::Mutex; use parking_lot::Mutex;
@ -59,19 +57,7 @@ pub struct Service<T> {
impl<T: ChainDataFetcher> Service<T> { impl<T: ChainDataFetcher> Service<T> {
/// Start the service: initialize I/O workers and client itself. /// Start the service: initialize I/O workers and client itself.
pub fn start(config: ClientConfig, spec: &Spec, fetcher: T, path: &Path, cache: Arc<Mutex<Cache>>) -> Result<Self, Error> { pub fn start(config: ClientConfig, spec: &Spec, fetcher: T, db: Arc<KeyValueDB>, cache: Arc<Mutex<Cache>>) -> Result<Self, Error> {
// initialize database.
let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS);
db_config.memory_budget = config.db_cache_size;
db_config.compaction = config.db_compaction;
db_config.wal = config.db_wal;
let db = Arc::new(Database::open(
&db_config,
&path.to_str().expect("DB path could not be converted to string.")
).map_err(Error::Database)?);
let io_service = IoService::<ClientIoMessage>::start().map_err(Error::Io)?; let io_service = IoService::<ClientIoMessage>::start().map_err(Error::Io)?;
let client = Arc::new(Client::new(config, let client = Arc::new(Client::new(config,
@ -123,14 +109,15 @@ mod tests {
use client::fetch; use client::fetch;
use time::Duration; use time::Duration;
use parking_lot::Mutex; use parking_lot::Mutex;
use tempdir::TempDir; use kvdb_memorydb;
use ethcore::db::NUM_COLUMNS;
#[test] #[test]
fn it_works() { fn it_works() {
let tempdir = TempDir::new("").unwrap(); let db = Arc::new(kvdb_memorydb::create(NUM_COLUMNS.unwrap_or(0)));
let spec = Spec::new_test(); let spec = Spec::new_test();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
Service::start(Default::default(), &spec, fetch::unavailable(), tempdir.path(), cache).unwrap(); Service::start(Default::default(), &spec, fetch::unavailable(), db, cache).unwrap();
} }
} }

View File

@ -80,9 +80,9 @@ extern crate vm;
extern crate keccak_hash as hash; extern crate keccak_hash as hash;
extern crate triehash; extern crate triehash;
extern crate kvdb; extern crate kvdb;
extern crate kvdb_memorydb;
extern crate kvdb_rocksdb;
extern crate memory_cache; extern crate memory_cache;
#[cfg(test)]
extern crate kvdb_memorydb;
#[cfg(test)] #[cfg(test)]
extern crate tempdir; extern crate tempdir;

View File

@ -27,11 +27,10 @@ use ethereum_types::{H256, U256, Address};
use parking_lot::RwLock; use parking_lot::RwLock;
use journaldb; use journaldb;
use kvdb::DBValue; use kvdb::DBValue;
use kvdb_rocksdb::{Database, DatabaseConfig}; use kvdb_memorydb;
use bytes::Bytes; use bytes::Bytes;
use rlp::*; use rlp::*;
use ethkey::{Generator, Random}; use ethkey::{Generator, Random};
use tempdir::TempDir;
use transaction::{self, Transaction, LocalizedTransaction, PendingTransaction, SignedTransaction, Action}; use transaction::{self, Transaction, LocalizedTransaction, PendingTransaction, SignedTransaction, Action};
use blockchain::{TreeRoute, BlockReceipts}; use blockchain::{TreeRoute, BlockReceipts};
use client::{ use client::{
@ -352,12 +351,10 @@ impl TestBlockChainClient {
} }
} }
pub fn get_temp_state_db() -> (StateDB, TempDir) { pub fn get_temp_state_db() -> StateDB {
let tempdir = TempDir::new("").unwrap(); let db = kvdb_memorydb::create(NUM_COLUMNS.unwrap_or(0));
let db = Database::open(&DatabaseConfig::with_columns(NUM_COLUMNS), tempdir.path().to_str().unwrap()).unwrap();
let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE); let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE);
let state_db = StateDB::new(journal_db, 1024 * 1024); StateDB::new(journal_db, 1024 * 1024)
(state_db, tempdir)
} }
impl MiningBlockChainClient for TestBlockChainClient { impl MiningBlockChainClient for TestBlockChainClient {
@ -370,8 +367,7 @@ impl MiningBlockChainClient for TestBlockChainClient {
fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock { fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock {
let engine = &*self.spec.engine; let engine = &*self.spec.engine;
let genesis_header = self.spec.genesis_header(); let genesis_header = self.spec.genesis_header();
let (state_db, _tempdir) = get_temp_state_db(); let db = self.spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db = self.spec.ensure_db_good(state_db, &Default::default()).unwrap();
let last_hashes = vec![genesis_header.hash()]; let last_hashes = vec![genesis_header.hash()];
let mut open_block = OpenBlock::new( let mut open_block = OpenBlock::new(

View File

@ -115,6 +115,8 @@ extern crate vm;
extern crate wasm; extern crate wasm;
extern crate memory_cache; extern crate memory_cache;
extern crate journaldb; extern crate journaldb;
#[cfg(test)]
extern crate tempdir;
#[macro_use] #[macro_use]
extern crate macros; extern crate macros;
@ -130,8 +132,6 @@ extern crate evm;
#[cfg(feature = "jit" )] #[cfg(feature = "jit" )]
extern crate evmjit; extern crate evmjit;
extern crate tempdir;
pub extern crate ethstore; pub extern crate ethstore;
pub mod account_provider; pub mod account_provider;

View File

@ -27,11 +27,13 @@ use bytes::ToPretty;
use rlp::PayloadInfo; use rlp::PayloadInfo;
use ethcore::service::ClientService; use ethcore::service::ClientService;
use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, BlockChainClient, BlockId}; use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, BlockChainClient, BlockId};
use ethcore::db::NUM_COLUMNS;
use ethcore::error::ImportError; use ethcore::error::ImportError;
use ethcore::miner::Miner; use ethcore::miner::Miner;
use ethcore::verification::queue::VerifierSettings; use ethcore::verification::queue::VerifierSettings;
use cache::CacheConfig; use cache::CacheConfig;
use informant::{Informant, FullNodeInformantData, MillisecondDuration}; use informant::{Informant, FullNodeInformantData, MillisecondDuration};
use kvdb_rocksdb::{Database, DatabaseConfig};
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool};
use helpers::{to_client_config, execute_upgrades}; use helpers::{to_client_config, execute_upgrades};
use dir::Directories; use dir::Directories;
@ -197,9 +199,6 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
let mut config = LightClientConfig { let mut config = LightClientConfig {
queue: Default::default(), queue: Default::default(),
chain_column: ::ethcore::db::COL_LIGHT_CHAIN, chain_column: ::ethcore::db::COL_LIGHT_CHAIN,
db_cache_size: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024),
db_compaction: compaction,
db_wal: cmd.wal,
verify_full: true, verify_full: true,
check_seal: cmd.check_seal, check_seal: cmd.check_seal,
}; };
@ -207,9 +206,24 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
config.queue.verifier_settings = cmd.verifier_settings; config.queue.verifier_settings = cmd.verifier_settings;
// initialize database.
let db = {
let db_config = DatabaseConfig {
memory_budget: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024),
compaction: compaction,
wal: cmd.wal,
.. DatabaseConfig::with_columns(NUM_COLUMNS)
};
Arc::new(Database::open(
&db_config,
&client_path.to_str().expect("DB path could not be converted to string.")
).map_err(|e| format!("Failed to open database: {}", e))?)
};
// TODO: could epoch signals be avilable at the end of the file? // TODO: could epoch signals be avilable at the end of the file?
let fetch = ::light::client::fetch::unavailable(); let fetch = ::light::client::fetch::unavailable();
let service = LightClientService::start(config, &spec, fetch, &client_path, cache) let service = LightClientService::start(config, &spec, fetch, db, cache)
.map_err(|e| format!("Failed to start client: {}", e))?; .map_err(|e| format!("Failed to start client: {}", e))?;
// free up the spec in memory. // free up the spec in memory.

View File

@ -24,6 +24,7 @@ use ansi_term::Colour;
use ctrlc::CtrlC; use ctrlc::CtrlC;
use ethcore::account_provider::{AccountProvider, AccountProviderSettings}; use ethcore::account_provider::{AccountProvider, AccountProviderSettings};
use ethcore::client::{Client, Mode, DatabaseCompactionProfile, VMType, BlockChainClient}; use ethcore::client::{Client, Mode, DatabaseCompactionProfile, VMType, BlockChainClient};
use ethcore::db::NUM_COLUMNS;
use ethcore::ethstore::ethkey; use ethcore::ethstore::ethkey;
use ethcore::miner::{Miner, MinerService, MinerOptions}; use ethcore::miner::{Miner, MinerService, MinerOptions};
use ethcore::miner::{StratumOptions, Stratum}; use ethcore::miner::{StratumOptions, Stratum};
@ -38,6 +39,7 @@ use hash_fetch::fetch::{Fetch, Client as FetchClient};
use hash_fetch; use hash_fetch;
use informant::{Informant, LightNodeInformantData, FullNodeInformantData}; use informant::{Informant, LightNodeInformantData, FullNodeInformantData};
use journaldb::Algorithm; use journaldb::Algorithm;
use kvdb_rocksdb::{Database, DatabaseConfig};
use light::Cache as LightDataCache; use light::Cache as LightDataCache;
use miner::external::ExternalMiner; use miner::external::ExternalMiner;
use node_filter::NodeFilter; use node_filter::NodeFilter;
@ -222,9 +224,6 @@ fn execute_light_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger
let mut config = light_client::Config { let mut config = light_client::Config {
queue: Default::default(), queue: Default::default(),
chain_column: ::ethcore::db::COL_LIGHT_CHAIN, chain_column: ::ethcore::db::COL_LIGHT_CHAIN,
db_cache_size: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024),
db_compaction: compaction,
db_wal: cmd.wal,
verify_full: true, verify_full: true,
check_seal: cmd.check_seal, check_seal: cmd.check_seal,
}; };
@ -241,7 +240,22 @@ fn execute_light_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger
sync: sync_handle.clone(), sync: sync_handle.clone(),
}; };
let service = light_client::Service::start(config, &spec, fetch, &db_dirs.client_path(algorithm), cache.clone()) // initialize database.
let db = {
let db_config = DatabaseConfig {
memory_budget: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024),
compaction: compaction,
wal: cmd.wal,
.. DatabaseConfig::with_columns(NUM_COLUMNS)
};
Arc::new(Database::open(
&db_config,
&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string.")
).map_err(|e| format!("Error opening database: {}", e))?)
};
let service = light_client::Service::start(config, &spec, fetch, db, cache.clone())
.map_err(|e| format!("Error starting light client: {}", e))?; .map_err(|e| format!("Error starting light client: {}", e))?;
let client = service.client(); let client = service.client();
let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default())); let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default()));

View File

@ -25,6 +25,7 @@ use tests::helpers::{TestNet, Peer as PeerLike, TestPacket};
use ethcore::client::TestBlockChainClient; use ethcore::client::TestBlockChainClient;
use ethcore::spec::Spec; use ethcore::spec::Spec;
use io::IoChannel; use io::IoChannel;
use kvdb_memorydb;
use light::client::fetch::{self, Unavailable}; use light::client::fetch::{self, Unavailable};
use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams}; use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams};
use light::provider::LightProvider; use light::provider::LightProvider;
@ -218,13 +219,16 @@ impl TestNet<Peer> {
// skip full verification because the blocks are bad. // skip full verification because the blocks are bad.
config.verify_full = false; config.verify_full = false;
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6)))); let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
let client = LightClient::in_memory( let db = kvdb_memorydb::create(0);
let client = LightClient::new(
config, config,
Arc::new(db),
None,
&Spec::new_test(), &Spec::new_test(),
fetch::unavailable(), // TODO: allow fetch from full nodes. fetch::unavailable(), // TODO: allow fetch from full nodes.
IoChannel::disconnected(), IoChannel::disconnected(),
cache cache
); ).expect("New DB creation infallible; qed");
peers.push(Arc::new(Peer::new_light(Arc::new(client)))) peers.push(Arc::new(Peer::new_light(Arc::new(client))))
} }