// Copyright 2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see .
use std::sync::Arc;
use std::time::Duration;
use ethcore::client::DatabaseCompactionProfile;
use ethcore::db::NUM_COLUMNS;
use ethcore::spec::{SpecParams, OptimizeFor};
use kvdb_rocksdb::{Database, DatabaseConfig};
use light::client::fetch::Unavailable as UnavailableDataFetcher;
use light::Cache as LightDataCache;
use params::{SpecType, Pruning};
use helpers::{execute_upgrades, compaction_profile};
use dir::Directories;
use cache::CacheConfig;
use user_defaults::UserDefaults;
// Number of minutes before a given gas price corpus should expire.
// Light client only.
const GAS_CORPUS_EXPIRATION_MINUTES: u64 = 60 * 6;
#[derive(Debug, PartialEq)]
pub struct ExportHsyncCmd {
pub cache_config: CacheConfig,
pub dirs: Directories,
pub spec: SpecType,
pub pruning: Pruning,
pub compaction: DatabaseCompactionProfile,
pub wal: bool,
}
pub fn execute(cmd: ExportHsyncCmd) -> Result {
use light::client as light_client;
use parking_lot::Mutex;
// load spec
let spec = cmd.spec.spec(SpecParams::new(cmd.dirs.cache.as_ref(), OptimizeFor::Memory))?;
// load genesis hash
let genesis_hash = spec.genesis_header().hash();
// database paths
let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
// load user defaults
let user_defaults = UserDefaults::load(&user_defaults_path)?;
// select pruning algorithm
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
let compaction = compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path());
// execute upgrades
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction.clone())?;
// create dirs used by parity
cmd.dirs.create_dirs(false, false, false)?;
// TODO: configurable cache size.
let cache = LightDataCache::new(Default::default(), Duration::from_secs(60 * GAS_CORPUS_EXPIRATION_MINUTES));
let cache = Arc::new(Mutex::new(cache));
// start client and create transaction queue.
let mut config = light_client::Config {
queue: Default::default(),
chain_column: ::ethcore::db::COL_LIGHT_CHAIN,
verify_full: true,
check_seal: true,
no_hardcoded_sync: true,
};
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
// initialize database.
let db = {
let db_config = DatabaseConfig {
memory_budget: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024),
compaction: compaction,
wal: cmd.wal,
.. DatabaseConfig::with_columns(NUM_COLUMNS)
};
Arc::new(Database::open(
&db_config,
&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string.")
).map_err(|e| format!("Error opening database: {}", e))?)
};
let service = light_client::Service::start(config, &spec, UnavailableDataFetcher, db, cache)
.map_err(|e| format!("Error starting light client: {}", e))?;
let hs = service.client().read_hardcoded_sync()
.map_err(|e| format!("Error reading hardcoded sync: {}", e))?;
if let Some(hs) = hs {
Ok(::serde_json::to_string_pretty(&hs.to_json()).expect("generated JSON is always valid"))
} else {
Err("Error: cannot generate hardcoded sync because the database is empty.".into())
}
}