2019-01-07 11:33:07 +01:00
|
|
|
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of Parity Ethereum.
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
2016-07-25 16:09:47 +02:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
2016-07-25 16:09:47 +02:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2019-01-07 11:33:07 +01:00
|
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2020-08-05 06:08:03 +02:00
|
|
|
use std::{
|
|
|
|
fs, io,
|
|
|
|
io::{BufRead, BufReader},
|
|
|
|
str::from_utf8,
|
|
|
|
sync::Arc,
|
|
|
|
thread::sleep,
|
|
|
|
time::{Duration, Instant},
|
|
|
|
};
|
2019-11-11 21:57:38 +01:00
|
|
|
|
2020-08-05 06:08:03 +02:00
|
|
|
use ansi_term::Colour;
|
2017-09-06 20:47:45 +02:00
|
|
|
use bytes::ToPretty;
|
2016-07-25 16:09:47 +02:00
|
|
|
use cache::CacheConfig;
|
2020-08-05 06:08:03 +02:00
|
|
|
use db;
|
2016-07-25 16:09:47 +02:00
|
|
|
use dir::Directories;
|
2020-08-05 06:08:03 +02:00
|
|
|
use ethcore::{
|
|
|
|
client::{
|
2020-07-29 10:57:15 +02:00
|
|
|
Balance, BlockChainClient, BlockChainReset, BlockId, DatabaseCompactionProfile,
|
|
|
|
ImportExportBlocks, Mode, Nonce, VMType,
|
2020-08-05 06:08:03 +02:00
|
|
|
},
|
|
|
|
error::{Error as EthcoreError, ErrorKind as EthcoreErrorKind, ImportErrorKind},
|
|
|
|
miner::Miner,
|
2020-07-29 10:57:15 +02:00
|
|
|
verification::queue::VerifierSettings,
|
2020-08-05 06:08:03 +02:00
|
|
|
};
|
2018-04-09 16:14:33 +02:00
|
|
|
use ethcore_private_tx;
|
2020-08-05 06:08:03 +02:00
|
|
|
use ethcore_service::ClientService;
|
|
|
|
use ethereum_types::{Address, H256, U256};
|
|
|
|
use hash::{keccak, KECCAK_NULL_RLP};
|
|
|
|
use helpers::{execute_upgrades, to_client_config};
|
|
|
|
use informant::{FullNodeInformantData, Informant, MillisecondDuration};
|
|
|
|
use params::{fatdb_switch_to_bool, tracing_switch_to_bool, Pruning, SpecType, Switch};
|
|
|
|
use rlp::PayloadInfo;
|
|
|
|
use rustc_hex::FromHex;
|
|
|
|
use types::data_format::DataFormat;
|
|
|
|
use user_defaults::UserDefaults;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub enum BlockchainCmd {
|
2020-08-05 06:08:03 +02:00
|
|
|
Kill(KillBlockchain),
|
|
|
|
Import(ImportBlockchain),
|
|
|
|
Export(ExportBlockchain),
|
|
|
|
ExportState(ExportState),
|
|
|
|
Reset(ResetBlockchain),
|
2019-01-16 16:37:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct ResetBlockchain {
|
2020-08-05 06:08:03 +02:00
|
|
|
pub dirs: Directories,
|
|
|
|
pub spec: SpecType,
|
|
|
|
pub pruning: Pruning,
|
|
|
|
pub pruning_history: u64,
|
|
|
|
pub pruning_memory: usize,
|
|
|
|
pub tracing: Switch,
|
|
|
|
pub fat_db: Switch,
|
|
|
|
pub compaction: DatabaseCompactionProfile,
|
|
|
|
pub cache_config: CacheConfig,
|
|
|
|
pub num: u32,
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
2016-12-12 17:19:41 +01:00
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct KillBlockchain {
|
2020-08-05 06:08:03 +02:00
|
|
|
pub spec: SpecType,
|
|
|
|
pub dirs: Directories,
|
|
|
|
pub pruning: Pruning,
|
2016-12-12 17:19:41 +01:00
|
|
|
}
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct ImportBlockchain {
|
2020-08-05 06:08:03 +02:00
|
|
|
pub spec: SpecType,
|
|
|
|
pub cache_config: CacheConfig,
|
|
|
|
pub dirs: Directories,
|
|
|
|
pub file_path: Option<String>,
|
|
|
|
pub format: Option<DataFormat>,
|
|
|
|
pub pruning: Pruning,
|
|
|
|
pub pruning_history: u64,
|
|
|
|
pub pruning_memory: usize,
|
|
|
|
pub compaction: DatabaseCompactionProfile,
|
|
|
|
pub tracing: Switch,
|
|
|
|
pub fat_db: Switch,
|
|
|
|
pub vm_type: VMType,
|
|
|
|
pub check_seal: bool,
|
|
|
|
pub with_color: bool,
|
|
|
|
pub verifier_settings: VerifierSettings,
|
|
|
|
pub light: bool,
|
|
|
|
pub max_round_blocks_to_import: usize,
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct ExportBlockchain {
|
2020-08-05 06:08:03 +02:00
|
|
|
pub spec: SpecType,
|
|
|
|
pub cache_config: CacheConfig,
|
|
|
|
pub dirs: Directories,
|
|
|
|
pub file_path: Option<String>,
|
|
|
|
pub format: Option<DataFormat>,
|
|
|
|
pub pruning: Pruning,
|
|
|
|
pub pruning_history: u64,
|
|
|
|
pub pruning_memory: usize,
|
|
|
|
pub compaction: DatabaseCompactionProfile,
|
|
|
|
pub fat_db: Switch,
|
|
|
|
pub tracing: Switch,
|
|
|
|
pub from_block: BlockId,
|
|
|
|
pub to_block: BlockId,
|
|
|
|
pub check_seal: bool,
|
|
|
|
pub max_round_blocks_to_import: usize,
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
2016-11-27 11:11:56 +01:00
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct ExportState {
|
2020-08-05 06:08:03 +02:00
|
|
|
pub spec: SpecType,
|
|
|
|
pub cache_config: CacheConfig,
|
|
|
|
pub dirs: Directories,
|
|
|
|
pub file_path: Option<String>,
|
|
|
|
pub format: Option<DataFormat>,
|
|
|
|
pub pruning: Pruning,
|
|
|
|
pub pruning_history: u64,
|
|
|
|
pub pruning_memory: usize,
|
|
|
|
pub compaction: DatabaseCompactionProfile,
|
|
|
|
pub fat_db: Switch,
|
|
|
|
pub tracing: Switch,
|
|
|
|
pub at: BlockId,
|
|
|
|
pub storage: bool,
|
|
|
|
pub code: bool,
|
|
|
|
pub min_balance: Option<U256>,
|
|
|
|
pub max_balance: Option<U256>,
|
|
|
|
pub max_round_blocks_to_import: usize,
|
2016-11-27 11:11:56 +01:00
|
|
|
}
|
|
|
|
|
2016-12-16 11:00:17 +01:00
|
|
|
pub fn execute(cmd: BlockchainCmd) -> Result<(), String> {
|
2020-08-05 06:08:03 +02:00
|
|
|
match cmd {
|
|
|
|
BlockchainCmd::Kill(kill_cmd) => kill_db(kill_cmd),
|
|
|
|
BlockchainCmd::Import(import_cmd) => {
|
|
|
|
if import_cmd.light {
|
|
|
|
execute_import_light(import_cmd)
|
|
|
|
} else {
|
|
|
|
execute_import(import_cmd)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BlockchainCmd::Export(export_cmd) => execute_export(export_cmd),
|
|
|
|
BlockchainCmd::ExportState(export_cmd) => execute_export_state(export_cmd),
|
|
|
|
BlockchainCmd::Reset(reset_cmd) => execute_reset(reset_cmd),
|
|
|
|
}
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
2017-07-27 13:50:12 +02:00
|
|
|
fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
|
2020-08-05 06:08:03 +02:00
|
|
|
use light::{
|
|
|
|
cache::Cache as LightDataCache,
|
|
|
|
client::{Config as LightClientConfig, Service as LightClientService},
|
|
|
|
};
|
|
|
|
use parking_lot::Mutex;
|
|
|
|
|
|
|
|
let timer = Instant::now();
|
|
|
|
|
|
|
|
// load spec file
|
|
|
|
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
|
|
|
|
|
|
|
|
// load genesis hash
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
|
|
|
|
// database paths
|
|
|
|
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone());
|
|
|
|
|
|
|
|
// user defaults path
|
|
|
|
let user_defaults_path = db_dirs.user_defaults_path();
|
|
|
|
|
|
|
|
// load user defaults
|
|
|
|
let user_defaults = UserDefaults::load(&user_defaults_path)?;
|
|
|
|
|
|
|
|
// select pruning algorithm
|
|
|
|
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
|
|
|
|
|
|
|
// prepare client and snapshot paths.
|
|
|
|
let client_path = db_dirs.client_path(algorithm);
|
|
|
|
|
|
|
|
// execute upgrades
|
|
|
|
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?;
|
|
|
|
|
|
|
|
// create dirs used by parity
|
|
|
|
cmd.dirs.create_dirs(false, false)?;
|
|
|
|
|
|
|
|
let cache = Arc::new(Mutex::new(LightDataCache::new(
|
|
|
|
Default::default(),
|
|
|
|
Duration::new(0, 0),
|
|
|
|
)));
|
|
|
|
|
|
|
|
let mut config = LightClientConfig {
|
|
|
|
queue: Default::default(),
|
|
|
|
chain_column: ethcore_db::COL_LIGHT_CHAIN,
|
|
|
|
verify_full: true,
|
|
|
|
check_seal: cmd.check_seal,
|
|
|
|
no_hardcoded_sync: true,
|
|
|
|
};
|
|
|
|
|
|
|
|
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
|
|
|
config.queue.verifier_settings = cmd.verifier_settings;
|
|
|
|
|
|
|
|
// initialize database.
|
|
|
|
let db = db::open_db(
|
|
|
|
&client_path
|
|
|
|
.to_str()
|
|
|
|
.expect("DB path could not be converted to string."),
|
|
|
|
&cmd.cache_config,
|
|
|
|
&cmd.compaction,
|
|
|
|
)
|
|
|
|
.map_err(|e| format!("Failed to open database: {:?}", e))?;
|
|
|
|
|
|
|
|
// TODO: could epoch signals be available at the end of the file?
|
|
|
|
let fetch = ::light::client::fetch::unavailable();
|
|
|
|
let service = LightClientService::start(config, &spec, fetch, db, cache)
|
|
|
|
.map_err(|e| format!("Failed to start client: {}", e))?;
|
|
|
|
|
|
|
|
// free up the spec in memory.
|
|
|
|
drop(spec);
|
|
|
|
|
|
|
|
let client = service.client();
|
|
|
|
|
|
|
|
let mut instream: Box<dyn io::Read> = match cmd.file_path {
|
|
|
|
Some(f) => {
|
|
|
|
Box::new(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f))?)
|
|
|
|
}
|
|
|
|
None => Box::new(io::stdin()),
|
|
|
|
};
|
|
|
|
|
|
|
|
const READAHEAD_BYTES: usize = 8;
|
|
|
|
|
|
|
|
let mut first_bytes: Vec<u8> = vec![0; READAHEAD_BYTES];
|
|
|
|
let mut first_read = 0;
|
|
|
|
|
|
|
|
let format = match cmd.format {
|
|
|
|
Some(format) => format,
|
|
|
|
None => {
|
|
|
|
first_read = instream
|
|
|
|
.read(&mut first_bytes)
|
|
|
|
.map_err(|_| "Error reading from the file/stream.")?;
|
|
|
|
match first_bytes[0] {
|
|
|
|
0xf9 => DataFormat::Binary,
|
|
|
|
_ => DataFormat::Hex,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let do_import = |bytes: Vec<u8>| {
|
|
|
|
while client.queue_info().is_full() {
|
|
|
|
sleep(Duration::from_secs(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
let header: ::types::header::Header = ::rlp::Rlp::new(&bytes)
|
|
|
|
.val_at(0)
|
|
|
|
.map_err(|e| format!("Bad block: {}", e))?;
|
|
|
|
|
|
|
|
if client.best_block_header().number() >= header.number() {
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
|
|
|
if header.number() % 10000 == 0 {
|
|
|
|
info!("#{}", header.number());
|
|
|
|
}
|
|
|
|
|
|
|
|
match client.import_header(header) {
|
|
|
|
Err(EthcoreError(EthcoreErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => {
|
|
|
|
trace!("Skipping block already in chain.");
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
return Err(format!("Cannot import block: {:?}", e));
|
|
|
|
}
|
|
|
|
Ok(_) => {}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
};
|
|
|
|
|
|
|
|
match format {
|
|
|
|
DataFormat::Binary => loop {
|
|
|
|
let mut bytes = if first_read > 0 {
|
|
|
|
first_bytes.clone()
|
|
|
|
} else {
|
|
|
|
vec![0; READAHEAD_BYTES]
|
|
|
|
};
|
|
|
|
let n = if first_read > 0 {
|
|
|
|
first_read
|
|
|
|
} else {
|
|
|
|
instream
|
|
|
|
.read(&mut bytes)
|
|
|
|
.map_err(|_| "Error reading from the file/stream.")?
|
|
|
|
};
|
|
|
|
if n == 0 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
first_read = 0;
|
|
|
|
let s = PayloadInfo::from(&bytes)
|
|
|
|
.map_err(|e| format!("Invalid RLP in the file/stream: {:?}", e))?
|
|
|
|
.total();
|
|
|
|
bytes.resize(s, 0);
|
|
|
|
instream
|
|
|
|
.read_exact(&mut bytes[n..])
|
|
|
|
.map_err(|_| "Error reading from the file/stream.")?;
|
|
|
|
do_import(bytes)?;
|
|
|
|
},
|
|
|
|
DataFormat::Hex => {
|
|
|
|
for line in BufReader::new(instream).lines() {
|
|
|
|
let s = line.map_err(|_| "Error reading from the file/stream.")?;
|
|
|
|
let s = if first_read > 0 {
|
|
|
|
from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])
|
|
|
|
} else {
|
|
|
|
s
|
|
|
|
};
|
|
|
|
first_read = 0;
|
|
|
|
let bytes = s.from_hex().map_err(|_| "Invalid hex in file/stream.")?;
|
|
|
|
do_import(bytes)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
client.flush_queue();
|
|
|
|
|
|
|
|
let ms = timer.elapsed().as_milliseconds();
|
|
|
|
let report = client.report();
|
|
|
|
|
|
|
|
info!(
|
|
|
|
"Import completed in {} seconds, {} headers, {} hdr/s",
|
|
|
|
ms / 1000,
|
|
|
|
report.blocks_imported,
|
|
|
|
(report.blocks_imported * 1000) as u64 / ms,
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(())
|
2017-07-27 13:50:12 +02:00
|
|
|
}
|
|
|
|
|
2016-12-16 11:00:17 +01:00
|
|
|
fn execute_import(cmd: ImportBlockchain) -> Result<(), String> {
|
2020-08-05 06:08:03 +02:00
|
|
|
let timer = Instant::now();
|
|
|
|
|
|
|
|
// load spec file
|
|
|
|
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
|
|
|
|
|
|
|
|
// load genesis hash
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
|
|
|
|
// database paths
|
|
|
|
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone());
|
|
|
|
|
|
|
|
// user defaults path
|
|
|
|
let user_defaults_path = db_dirs.user_defaults_path();
|
|
|
|
|
|
|
|
// load user defaults
|
|
|
|
let mut user_defaults = UserDefaults::load(&user_defaults_path)?;
|
|
|
|
|
|
|
|
// select pruning algorithm
|
|
|
|
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
|
|
|
|
|
|
|
// check if tracing is on
|
|
|
|
let tracing = tracing_switch_to_bool(cmd.tracing, &user_defaults)?;
|
|
|
|
|
|
|
|
// check if fatdb is on
|
|
|
|
let fat_db = fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)?;
|
|
|
|
|
|
|
|
// prepare client and snapshot paths.
|
|
|
|
let client_path = db_dirs.client_path(algorithm);
|
|
|
|
let snapshot_path = db_dirs.snapshot_path();
|
|
|
|
|
|
|
|
// execute upgrades
|
|
|
|
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?;
|
|
|
|
|
|
|
|
// create dirs used by parity
|
|
|
|
cmd.dirs.create_dirs(false, false)?;
|
|
|
|
|
|
|
|
// prepare client config
|
|
|
|
let mut client_config = to_client_config(
|
|
|
|
&cmd.cache_config,
|
|
|
|
spec.name.to_lowercase(),
|
|
|
|
Mode::Active,
|
|
|
|
tracing,
|
|
|
|
fat_db,
|
|
|
|
cmd.compaction,
|
|
|
|
cmd.vm_type,
|
|
|
|
"".into(),
|
|
|
|
algorithm,
|
|
|
|
cmd.pruning_history,
|
|
|
|
cmd.pruning_memory,
|
|
|
|
cmd.check_seal,
|
|
|
|
12,
|
|
|
|
);
|
|
|
|
|
|
|
|
client_config.queue.verifier_settings = cmd.verifier_settings;
|
|
|
|
|
|
|
|
let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
|
|
|
|
let client_db = restoration_db_handler
|
|
|
|
.open(&client_path)
|
|
|
|
.map_err(|e| format!("Failed to open database {:?}", e))?;
|
|
|
|
|
|
|
|
// build client
|
|
|
|
let service = ClientService::start(
|
|
|
|
client_config,
|
|
|
|
&spec,
|
|
|
|
client_db,
|
|
|
|
&snapshot_path,
|
|
|
|
restoration_db_handler,
|
|
|
|
&cmd.dirs.ipc_path(),
|
|
|
|
// TODO [ToDr] don't use test miner here
|
|
|
|
// (actually don't require miner at all)
|
|
|
|
Arc::new(Miner::new_for_tests(&spec, None)),
|
|
|
|
Arc::new(ethcore_private_tx::DummySigner),
|
|
|
|
Box::new(ethcore_private_tx::NoopEncryptor),
|
|
|
|
Default::default(),
|
|
|
|
Default::default(),
|
|
|
|
)
|
|
|
|
.map_err(|e| format!("Client service error: {:?}", e))?;
|
|
|
|
|
|
|
|
// free up the spec in memory.
|
|
|
|
drop(spec);
|
|
|
|
|
|
|
|
let client = service.client();
|
|
|
|
|
|
|
|
let instream: Box<dyn io::Read> = match cmd.file_path {
|
|
|
|
Some(f) => {
|
|
|
|
Box::new(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f))?)
|
|
|
|
}
|
|
|
|
None => Box::new(io::stdin()),
|
|
|
|
};
|
|
|
|
|
|
|
|
let informant = Arc::new(Informant::new(
|
|
|
|
FullNodeInformantData {
|
|
|
|
client: client.clone(),
|
|
|
|
sync: None,
|
|
|
|
net: None,
|
|
|
|
},
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
cmd.with_color,
|
|
|
|
));
|
|
|
|
|
|
|
|
service
|
|
|
|
.register_io_handler(informant)
|
|
|
|
.map_err(|_| "Unable to register informant handler".to_owned())?;
|
|
|
|
|
|
|
|
client.import_blocks(instream, cmd.format)?;
|
|
|
|
|
|
|
|
// save user defaults
|
|
|
|
user_defaults.pruning = algorithm;
|
|
|
|
user_defaults.tracing = tracing;
|
|
|
|
user_defaults.fat_db = fat_db;
|
|
|
|
user_defaults.save(&user_defaults_path)?;
|
|
|
|
|
|
|
|
let report = client.report();
|
|
|
|
|
|
|
|
let ms = timer.elapsed().as_milliseconds();
|
|
|
|
info!("Import completed in {} seconds, {} blocks, {} blk/s, {} transactions, {} tx/s, {} Mgas, {} Mgas/s",
|
2016-09-15 16:56:10 +02:00
|
|
|
ms / 1000,
|
|
|
|
report.blocks_imported,
|
|
|
|
(report.blocks_imported * 1000) as u64 / ms,
|
|
|
|
report.transactions_applied,
|
|
|
|
(report.transactions_applied * 1000) as u64 / ms,
|
2018-09-04 20:13:51 +02:00
|
|
|
report.gas_processed / 1_000_000,
|
|
|
|
(report.gas_processed / (ms * 1000)).low_u64(),
|
2016-12-16 11:00:17 +01:00
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
Ok(())
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
2016-11-27 11:11:56 +01:00
|
|
|
fn start_client(
|
2020-08-05 06:08:03 +02:00
|
|
|
dirs: Directories,
|
|
|
|
spec: SpecType,
|
|
|
|
pruning: Pruning,
|
|
|
|
pruning_history: u64,
|
|
|
|
pruning_memory: usize,
|
|
|
|
tracing: Switch,
|
|
|
|
fat_db: Switch,
|
|
|
|
compaction: DatabaseCompactionProfile,
|
|
|
|
cache_config: CacheConfig,
|
|
|
|
require_fat_db: bool,
|
|
|
|
max_round_blocks_to_import: usize,
|
2016-12-04 18:01:50 +01:00
|
|
|
) -> Result<ClientService, String> {
|
2020-08-05 06:08:03 +02:00
|
|
|
// load spec file
|
|
|
|
let spec = spec.spec(&dirs.cache)?;
|
|
|
|
|
|
|
|
// load genesis hash
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
|
|
|
|
// database paths
|
|
|
|
let db_dirs = dirs.database(genesis_hash, None, spec.data_dir.clone());
|
|
|
|
|
|
|
|
// user defaults path
|
|
|
|
let user_defaults_path = db_dirs.user_defaults_path();
|
|
|
|
|
|
|
|
// load user defaults
|
|
|
|
let user_defaults = UserDefaults::load(&user_defaults_path)?;
|
|
|
|
|
|
|
|
// select pruning algorithm
|
|
|
|
let algorithm = pruning.to_algorithm(&user_defaults);
|
|
|
|
|
|
|
|
// check if tracing is on
|
|
|
|
let tracing = tracing_switch_to_bool(tracing, &user_defaults)?;
|
|
|
|
|
|
|
|
// check if fatdb is on
|
|
|
|
let fat_db = fatdb_switch_to_bool(fat_db, &user_defaults, algorithm)?;
|
|
|
|
if !fat_db && require_fat_db {
|
|
|
|
return Err("This command requires Parity to be synced with --fat-db on.".to_owned());
|
|
|
|
}
|
|
|
|
|
|
|
|
// prepare client and snapshot paths.
|
|
|
|
let client_path = db_dirs.client_path(algorithm);
|
|
|
|
let snapshot_path = db_dirs.snapshot_path();
|
|
|
|
|
|
|
|
// execute upgrades
|
|
|
|
execute_upgrades(&dirs.base, &db_dirs, algorithm, &compaction)?;
|
|
|
|
|
|
|
|
// create dirs used by parity
|
|
|
|
dirs.create_dirs(false, false)?;
|
|
|
|
|
|
|
|
// prepare client config
|
|
|
|
let client_config = to_client_config(
|
|
|
|
&cache_config,
|
|
|
|
spec.name.to_lowercase(),
|
|
|
|
Mode::Active,
|
|
|
|
tracing,
|
|
|
|
fat_db,
|
|
|
|
compaction,
|
|
|
|
VMType::default(),
|
|
|
|
"".into(),
|
|
|
|
algorithm,
|
|
|
|
pruning_history,
|
|
|
|
pruning_memory,
|
|
|
|
true,
|
|
|
|
max_round_blocks_to_import,
|
|
|
|
);
|
|
|
|
|
|
|
|
let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
|
|
|
|
let client_db = restoration_db_handler
|
|
|
|
.open(&client_path)
|
|
|
|
.map_err(|e| format!("Failed to open database {:?}", e))?;
|
|
|
|
|
|
|
|
let service = ClientService::start(
|
|
|
|
client_config,
|
|
|
|
&spec,
|
|
|
|
client_db,
|
|
|
|
&snapshot_path,
|
|
|
|
restoration_db_handler,
|
|
|
|
&dirs.ipc_path(),
|
|
|
|
// It's fine to use test version here,
|
|
|
|
// since we don't care about miner parameters at all
|
|
|
|
Arc::new(Miner::new_for_tests(&spec, None)),
|
|
|
|
Arc::new(ethcore_private_tx::DummySigner),
|
|
|
|
Box::new(ethcore_private_tx::NoopEncryptor),
|
|
|
|
Default::default(),
|
|
|
|
Default::default(),
|
|
|
|
)
|
|
|
|
.map_err(|e| format!("Client service error: {:?}", e))?;
|
|
|
|
|
|
|
|
drop(spec);
|
|
|
|
Ok(service)
|
2016-11-27 11:11:56 +01:00
|
|
|
}
|
|
|
|
|
2016-12-16 11:00:17 +01:00
|
|
|
fn execute_export(cmd: ExportBlockchain) -> Result<(), String> {
|
2020-08-05 06:08:03 +02:00
|
|
|
let service = start_client(
|
|
|
|
cmd.dirs,
|
|
|
|
cmd.spec,
|
|
|
|
cmd.pruning,
|
|
|
|
cmd.pruning_history,
|
|
|
|
cmd.pruning_memory,
|
|
|
|
cmd.tracing,
|
|
|
|
cmd.fat_db,
|
|
|
|
cmd.compaction,
|
|
|
|
cmd.cache_config,
|
|
|
|
false,
|
|
|
|
cmd.max_round_blocks_to_import,
|
|
|
|
)?;
|
|
|
|
let client = service.client();
|
|
|
|
|
|
|
|
let out: Box<dyn io::Write> = match cmd.file_path {
|
|
|
|
Some(f) => Box::new(
|
|
|
|
fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f))?,
|
|
|
|
),
|
|
|
|
None => Box::new(io::stdout()),
|
|
|
|
};
|
|
|
|
|
|
|
|
client.export_blocks(out, cmd.from_block, cmd.to_block, cmd.format)?;
|
|
|
|
|
|
|
|
info!("Export completed.");
|
|
|
|
Ok(())
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
2016-12-16 11:00:17 +01:00
|
|
|
fn execute_export_state(cmd: ExportState) -> Result<(), String> {
|
2020-08-05 06:08:03 +02:00
|
|
|
let service = start_client(
|
|
|
|
cmd.dirs,
|
|
|
|
cmd.spec,
|
|
|
|
cmd.pruning,
|
|
|
|
cmd.pruning_history,
|
|
|
|
cmd.pruning_memory,
|
|
|
|
cmd.tracing,
|
|
|
|
cmd.fat_db,
|
|
|
|
cmd.compaction,
|
|
|
|
cmd.cache_config,
|
|
|
|
true,
|
|
|
|
cmd.max_round_blocks_to_import,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
let client = service.client();
|
|
|
|
|
|
|
|
let mut out: Box<dyn io::Write> = match cmd.file_path {
|
|
|
|
Some(f) => Box::new(
|
|
|
|
fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f))?,
|
|
|
|
),
|
|
|
|
None => Box::new(io::stdout()),
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut last: Option<Address> = None;
|
|
|
|
let at = cmd.at;
|
|
|
|
let mut i = 0usize;
|
|
|
|
|
|
|
|
out.write_fmt(format_args!("{{ \"state\": {{",))
|
|
|
|
.expect("Couldn't write to stream.");
|
|
|
|
loop {
|
|
|
|
let accounts = client
|
|
|
|
.list_accounts(at, last.as_ref(), 1000)
|
|
|
|
.ok_or("Specified block not found")?;
|
|
|
|
if accounts.is_empty() {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for account in accounts.into_iter() {
|
|
|
|
let balance = client
|
|
|
|
.balance(&account, at.into())
|
|
|
|
.unwrap_or_else(U256::zero);
|
|
|
|
if cmd.min_balance.map_or(false, |m| balance < m)
|
|
|
|
|| cmd.max_balance.map_or(false, |m| balance > m)
|
|
|
|
{
|
|
|
|
last = Some(account);
|
|
|
|
continue; //filtered out
|
|
|
|
}
|
|
|
|
|
|
|
|
if i != 0 {
|
|
|
|
out.write(b",").expect("Write error");
|
|
|
|
}
|
|
|
|
out.write_fmt(format_args!(
|
|
|
|
"\n\"0x{:x}\": {{\"balance\": \"{:x}\", \"nonce\": \"{:x}\"",
|
|
|
|
account,
|
|
|
|
balance,
|
|
|
|
client.nonce(&account, at).unwrap_or_else(U256::zero)
|
|
|
|
))
|
|
|
|
.expect("Write error");
|
|
|
|
let code = client
|
|
|
|
.code(&account, at.into())
|
|
|
|
.unwrap_or(None)
|
|
|
|
.unwrap_or_else(Vec::new);
|
|
|
|
if !code.is_empty() {
|
|
|
|
out.write_fmt(format_args!(", \"code_hash\": \"0x{:x}\"", keccak(&code)))
|
|
|
|
.expect("Write error");
|
|
|
|
if cmd.code {
|
|
|
|
out.write_fmt(format_args!(", \"code\": \"{}\"", code.to_hex()))
|
|
|
|
.expect("Write error");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let storage_root = client.storage_root(&account, at).unwrap_or(KECCAK_NULL_RLP);
|
|
|
|
if storage_root != KECCAK_NULL_RLP {
|
|
|
|
out.write_fmt(format_args!(", \"storage_root\": \"0x{:x}\"", storage_root))
|
|
|
|
.expect("Write error");
|
|
|
|
if cmd.storage {
|
|
|
|
out.write_fmt(format_args!(", \"storage\": {{"))
|
|
|
|
.expect("Write error");
|
|
|
|
let mut last_storage: Option<H256> = None;
|
|
|
|
loop {
|
|
|
|
let keys = client
|
|
|
|
.list_storage(at, &account, last_storage.as_ref(), 1000)
|
|
|
|
.ok_or("Specified block not found")?;
|
|
|
|
if keys.is_empty() {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for key in keys.into_iter() {
|
|
|
|
if last_storage.is_some() {
|
|
|
|
out.write(b",").expect("Write error");
|
|
|
|
}
|
|
|
|
out.write_fmt(format_args!(
|
|
|
|
"\n\t\"0x{:x}\": \"0x{:x}\"",
|
|
|
|
key,
|
|
|
|
client
|
|
|
|
.storage_at(&account, &key, at.into())
|
|
|
|
.unwrap_or_else(Default::default)
|
|
|
|
))
|
|
|
|
.expect("Write error");
|
|
|
|
last_storage = Some(key);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out.write(b"\n}").expect("Write error");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out.write(b"}").expect("Write error");
|
|
|
|
i += 1;
|
|
|
|
if i % 10000 == 0 {
|
|
|
|
info!("Account #{}", i);
|
|
|
|
}
|
|
|
|
last = Some(account);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out.write_fmt(format_args!("\n}}}}")).expect("Write error");
|
|
|
|
info!("Export completed.");
|
|
|
|
Ok(())
|
2016-11-27 11:11:56 +01:00
|
|
|
}
|
|
|
|
|
2019-01-16 16:37:26 +01:00
|
|
|
fn execute_reset(cmd: ResetBlockchain) -> Result<(), String> {
|
2020-08-05 06:08:03 +02:00
|
|
|
let service = start_client(
|
|
|
|
cmd.dirs,
|
|
|
|
cmd.spec,
|
|
|
|
cmd.pruning,
|
|
|
|
cmd.pruning_history,
|
|
|
|
cmd.pruning_memory,
|
|
|
|
cmd.tracing,
|
|
|
|
cmd.fat_db,
|
|
|
|
cmd.compaction,
|
|
|
|
cmd.cache_config,
|
|
|
|
false,
|
|
|
|
0,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
let client = service.client();
|
|
|
|
client.reset(cmd.num)?;
|
|
|
|
info!("{}", Colour::Green.bold().paint("Successfully reset db!"));
|
|
|
|
|
|
|
|
Ok(())
|
2019-01-16 16:37:26 +01:00
|
|
|
}
|
|
|
|
|
2016-12-16 11:00:17 +01:00
|
|
|
pub fn kill_db(cmd: KillBlockchain) -> Result<(), String> {
|
2020-08-05 06:08:03 +02:00
|
|
|
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir);
|
|
|
|
let user_defaults_path = db_dirs.user_defaults_path();
|
|
|
|
let mut user_defaults = UserDefaults::load(&user_defaults_path)?;
|
|
|
|
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
|
|
|
let dir = db_dirs.db_path(algorithm);
|
|
|
|
fs::remove_dir_all(&dir).map_err(|e| format!("Error removing database: {:?}", e))?;
|
|
|
|
user_defaults.is_first_launch = true;
|
|
|
|
user_defaults.save(&user_defaults_path)?;
|
|
|
|
info!("Database deleted.");
|
|
|
|
Ok(())
|
2016-12-12 17:19:41 +01:00
|
|
|
}
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
2020-08-05 06:08:03 +02:00
|
|
|
use super::DataFormat;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_data_format_parsing() {
|
|
|
|
assert_eq!(DataFormat::Binary, "binary".parse().unwrap());
|
|
|
|
assert_eq!(DataFormat::Binary, "bin".parse().unwrap());
|
|
|
|
assert_eq!(DataFormat::Hex, "hex".parse().unwrap());
|
|
|
|
}
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|