2017-01-25 18:51:41 +01:00
|
|
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
2016-07-25 16:09:47 +02:00
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
use std::str::{FromStr, from_utf8};
|
|
|
|
use std::{io, fs};
|
|
|
|
use std::io::{BufReader, BufRead};
|
2016-09-15 16:56:10 +02:00
|
|
|
use std::time::{Instant, Duration};
|
2016-07-25 16:09:47 +02:00
|
|
|
use std::thread::sleep;
|
|
|
|
use std::sync::Arc;
|
2017-07-06 11:36:15 +02:00
|
|
|
use rustc_hex::FromHex;
|
2017-08-31 11:35:41 +02:00
|
|
|
use hash::{keccak, KECCAK_NULL_RLP};
|
2017-09-04 16:36:49 +02:00
|
|
|
use bigint::prelude::U256;
|
|
|
|
use bigint::hash::H256;
|
2017-09-06 20:47:45 +02:00
|
|
|
use util::Address;
|
|
|
|
use bytes::ToPretty;
|
2016-09-01 14:55:07 +02:00
|
|
|
use rlp::PayloadInfo;
|
2016-07-25 16:09:47 +02:00
|
|
|
use ethcore::service::ClientService;
|
2016-11-22 10:24:22 +01:00
|
|
|
use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, BlockChainClient, BlockId};
|
2016-07-25 16:09:47 +02:00
|
|
|
use ethcore::error::ImportError;
|
|
|
|
use ethcore::miner::Miner;
|
2016-12-02 18:21:54 +01:00
|
|
|
use ethcore::verification::queue::VerifierSettings;
|
2016-07-25 16:09:47 +02:00
|
|
|
use cache::CacheConfig;
|
2017-07-10 13:21:11 +02:00
|
|
|
use informant::{Informant, FullNodeInformantData, MillisecondDuration};
|
2016-10-03 11:13:10 +02:00
|
|
|
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool};
|
2016-07-25 16:09:47 +02:00
|
|
|
use helpers::{to_client_config, execute_upgrades};
|
|
|
|
use dir::Directories;
|
2016-09-26 19:21:25 +02:00
|
|
|
use user_defaults::UserDefaults;
|
2016-07-25 16:09:47 +02:00
|
|
|
use fdlimit;
|
|
|
|
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub enum DataFormat {
|
|
|
|
Hex,
|
|
|
|
Binary,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for DataFormat {
|
|
|
|
fn default() -> Self {
|
|
|
|
DataFormat::Binary
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl FromStr for DataFormat {
|
|
|
|
type Err = String;
|
|
|
|
|
|
|
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
|
|
match s {
|
|
|
|
"binary" | "bin" => Ok(DataFormat::Binary),
|
|
|
|
"hex" => Ok(DataFormat::Hex),
|
|
|
|
x => Err(format!("Invalid format: {}", x))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub enum BlockchainCmd {
|
2016-12-12 17:19:41 +01:00
|
|
|
Kill(KillBlockchain),
|
2016-07-25 16:09:47 +02:00
|
|
|
Import(ImportBlockchain),
|
|
|
|
Export(ExportBlockchain),
|
2016-11-27 11:11:56 +01:00
|
|
|
ExportState(ExportState),
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
2016-12-12 17:19:41 +01:00
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct KillBlockchain {
|
|
|
|
pub spec: SpecType,
|
|
|
|
pub dirs: Directories,
|
|
|
|
pub pruning: Pruning,
|
|
|
|
}
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct ImportBlockchain {
|
|
|
|
pub spec: SpecType,
|
|
|
|
pub cache_config: CacheConfig,
|
|
|
|
pub dirs: Directories,
|
|
|
|
pub file_path: Option<String>,
|
|
|
|
pub format: Option<DataFormat>,
|
|
|
|
pub pruning: Pruning,
|
2016-10-14 14:44:56 +02:00
|
|
|
pub pruning_history: u64,
|
2017-01-20 13:25:53 +01:00
|
|
|
pub pruning_memory: usize,
|
2016-07-25 16:09:47 +02:00
|
|
|
pub compaction: DatabaseCompactionProfile,
|
2016-07-29 15:36:00 +02:00
|
|
|
pub wal: bool,
|
2016-07-25 16:09:47 +02:00
|
|
|
pub tracing: Switch,
|
2016-10-03 11:13:10 +02:00
|
|
|
pub fat_db: Switch,
|
2016-07-25 16:09:47 +02:00
|
|
|
pub vm_type: VMType,
|
2016-10-24 15:09:13 +02:00
|
|
|
pub check_seal: bool,
|
2016-11-02 19:42:21 +01:00
|
|
|
pub with_color: bool,
|
2016-12-02 18:21:54 +01:00
|
|
|
pub verifier_settings: VerifierSettings,
|
2017-07-27 13:50:12 +02:00
|
|
|
pub light: bool,
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct ExportBlockchain {
|
|
|
|
pub spec: SpecType,
|
|
|
|
pub cache_config: CacheConfig,
|
|
|
|
pub dirs: Directories,
|
|
|
|
pub file_path: Option<String>,
|
|
|
|
pub format: Option<DataFormat>,
|
|
|
|
pub pruning: Pruning,
|
2016-10-14 14:44:56 +02:00
|
|
|
pub pruning_history: u64,
|
2017-01-20 13:25:53 +01:00
|
|
|
pub pruning_memory: usize,
|
2016-07-25 16:09:47 +02:00
|
|
|
pub compaction: DatabaseCompactionProfile,
|
2016-07-29 15:36:00 +02:00
|
|
|
pub wal: bool,
|
2016-10-03 11:13:10 +02:00
|
|
|
pub fat_db: Switch,
|
2016-07-25 16:09:47 +02:00
|
|
|
pub tracing: Switch,
|
2016-11-22 10:24:22 +01:00
|
|
|
pub from_block: BlockId,
|
|
|
|
pub to_block: BlockId,
|
2016-10-24 15:09:13 +02:00
|
|
|
pub check_seal: bool,
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
2016-11-27 11:11:56 +01:00
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct ExportState {
|
|
|
|
pub spec: SpecType,
|
|
|
|
pub cache_config: CacheConfig,
|
|
|
|
pub dirs: Directories,
|
|
|
|
pub file_path: Option<String>,
|
|
|
|
pub format: Option<DataFormat>,
|
|
|
|
pub pruning: Pruning,
|
|
|
|
pub pruning_history: u64,
|
2017-01-20 13:25:53 +01:00
|
|
|
pub pruning_memory: usize,
|
2016-11-27 11:11:56 +01:00
|
|
|
pub compaction: DatabaseCompactionProfile,
|
|
|
|
pub wal: bool,
|
|
|
|
pub fat_db: Switch,
|
|
|
|
pub tracing: Switch,
|
2016-12-04 18:13:23 +01:00
|
|
|
pub at: BlockId,
|
2016-11-27 11:11:56 +01:00
|
|
|
pub storage: bool,
|
|
|
|
pub code: bool,
|
2016-11-27 18:16:43 +01:00
|
|
|
pub min_balance: Option<U256>,
|
|
|
|
pub max_balance: Option<U256>,
|
2016-11-27 11:11:56 +01:00
|
|
|
}
|
|
|
|
|
2016-12-16 11:00:17 +01:00
|
|
|
pub fn execute(cmd: BlockchainCmd) -> Result<(), String> {
|
2016-07-25 16:09:47 +02:00
|
|
|
match cmd {
|
2016-12-12 17:19:41 +01:00
|
|
|
BlockchainCmd::Kill(kill_cmd) => kill_db(kill_cmd),
|
2017-07-27 13:50:12 +02:00
|
|
|
BlockchainCmd::Import(import_cmd) => {
|
|
|
|
if import_cmd.light {
|
|
|
|
execute_import_light(import_cmd)
|
|
|
|
} else {
|
|
|
|
execute_import(import_cmd)
|
|
|
|
}
|
|
|
|
}
|
2016-07-25 16:09:47 +02:00
|
|
|
BlockchainCmd::Export(export_cmd) => execute_export(export_cmd),
|
2016-11-27 11:11:56 +01:00
|
|
|
BlockchainCmd::ExportState(export_cmd) => execute_export_state(export_cmd),
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-27 13:50:12 +02:00
|
|
|
fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
|
|
|
|
use light::client::{Service as LightClientService, Config as LightClientConfig};
|
|
|
|
use light::cache::Cache as LightDataCache;
|
2017-09-02 20:09:13 +02:00
|
|
|
use parking_lot::Mutex;
|
2017-07-27 13:50:12 +02:00
|
|
|
|
|
|
|
let timer = Instant::now();
|
|
|
|
|
|
|
|
// load spec file
|
|
|
|
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
|
|
|
|
|
|
|
|
// load genesis hash
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
|
|
|
|
// database paths
|
|
|
|
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone());
|
|
|
|
|
|
|
|
// user defaults path
|
|
|
|
let user_defaults_path = db_dirs.user_defaults_path();
|
|
|
|
|
|
|
|
// load user defaults
|
|
|
|
let user_defaults = UserDefaults::load(&user_defaults_path)?;
|
|
|
|
|
|
|
|
fdlimit::raise_fd_limit();
|
|
|
|
|
|
|
|
// select pruning algorithm
|
|
|
|
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
|
|
|
|
|
|
|
// prepare client and snapshot paths.
|
|
|
|
let client_path = db_dirs.client_path(algorithm);
|
|
|
|
|
|
|
|
// execute upgrades
|
|
|
|
let compaction = cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path());
|
|
|
|
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction)?;
|
|
|
|
|
|
|
|
// create dirs used by parity
|
|
|
|
cmd.dirs.create_dirs(false, false, false)?;
|
|
|
|
|
2017-09-02 20:09:13 +02:00
|
|
|
let cache = Arc::new(Mutex::new(
|
2017-07-27 13:50:12 +02:00
|
|
|
LightDataCache::new(Default::default(), ::time::Duration::seconds(0))
|
|
|
|
));
|
|
|
|
|
|
|
|
let mut config = LightClientConfig {
|
|
|
|
queue: Default::default(),
|
|
|
|
chain_column: ::ethcore::db::COL_LIGHT_CHAIN,
|
|
|
|
db_cache_size: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024),
|
|
|
|
db_compaction: compaction,
|
|
|
|
db_wal: cmd.wal,
|
|
|
|
verify_full: true,
|
|
|
|
check_seal: cmd.check_seal,
|
|
|
|
};
|
|
|
|
|
|
|
|
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
|
|
|
config.queue.verifier_settings = cmd.verifier_settings;
|
|
|
|
|
2017-09-05 17:54:05 +02:00
|
|
|
// TODO: could epoch signals be avilable at the end of the file?
|
|
|
|
let fetch = ::light::client::fetch::unavailable();
|
|
|
|
let service = LightClientService::start(config, &spec, fetch, &client_path, cache)
|
2017-07-27 13:50:12 +02:00
|
|
|
.map_err(|e| format!("Failed to start client: {}", e))?;
|
|
|
|
|
|
|
|
// free up the spec in memory.
|
|
|
|
drop(spec);
|
|
|
|
|
|
|
|
let client = service.client();
|
|
|
|
|
|
|
|
let mut instream: Box<io::Read> = match cmd.file_path {
|
|
|
|
Some(f) => Box::new(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f))?),
|
|
|
|
None => Box::new(io::stdin()),
|
|
|
|
};
|
|
|
|
|
|
|
|
const READAHEAD_BYTES: usize = 8;
|
|
|
|
|
|
|
|
let mut first_bytes: Vec<u8> = vec![0; READAHEAD_BYTES];
|
|
|
|
let mut first_read = 0;
|
|
|
|
|
|
|
|
let format = match cmd.format {
|
|
|
|
Some(format) => format,
|
|
|
|
None => {
|
|
|
|
first_read = instream.read(&mut first_bytes).map_err(|_| "Error reading from the file/stream.")?;
|
|
|
|
match first_bytes[0] {
|
|
|
|
0xf9 => DataFormat::Binary,
|
|
|
|
_ => DataFormat::Hex,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let do_import = |bytes: Vec<u8>| {
|
|
|
|
while client.queue_info().is_full() { sleep(Duration::from_secs(1)); }
|
|
|
|
|
|
|
|
let header: ::ethcore::header::Header = ::rlp::UntrustedRlp::new(&bytes).val_at(0)
|
|
|
|
.map_err(|e| format!("Bad block: {}", e))?;
|
|
|
|
|
|
|
|
if client.best_block_header().number() >= header.number() { return Ok(()) }
|
|
|
|
|
|
|
|
if header.number() % 10000 == 0 {
|
|
|
|
info!("#{}", header.number());
|
|
|
|
}
|
|
|
|
|
|
|
|
match client.import_header(header) {
|
|
|
|
Err(BlockImportError::Import(ImportError::AlreadyInChain)) => {
|
|
|
|
trace!("Skipping block already in chain.");
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
return Err(format!("Cannot import block: {:?}", e));
|
|
|
|
},
|
|
|
|
Ok(_) => {},
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
};
|
|
|
|
|
|
|
|
match format {
|
|
|
|
DataFormat::Binary => {
|
|
|
|
loop {
|
|
|
|
let mut bytes = if first_read > 0 {first_bytes.clone()} else {vec![0; READAHEAD_BYTES]};
|
|
|
|
let n = if first_read > 0 {
|
|
|
|
first_read
|
|
|
|
} else {
|
|
|
|
instream.read(&mut bytes).map_err(|_| "Error reading from the file/stream.")?
|
|
|
|
};
|
|
|
|
if n == 0 { break; }
|
|
|
|
first_read = 0;
|
|
|
|
let s = PayloadInfo::from(&bytes).map_err(|e| format!("Invalid RLP in the file/stream: {:?}", e))?.total();
|
|
|
|
bytes.resize(s, 0);
|
|
|
|
instream.read_exact(&mut bytes[n..]).map_err(|_| "Error reading from the file/stream.")?;
|
|
|
|
do_import(bytes)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DataFormat::Hex => {
|
|
|
|
for line in BufReader::new(instream).lines() {
|
|
|
|
let s = line.map_err(|_| "Error reading from the file/stream.")?;
|
|
|
|
let s = if first_read > 0 {from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])} else {s};
|
|
|
|
first_read = 0;
|
|
|
|
let bytes = s.from_hex().map_err(|_| "Invalid hex in file/stream.")?;
|
|
|
|
do_import(bytes)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
client.flush_queue();
|
|
|
|
|
|
|
|
let ms = timer.elapsed().as_milliseconds();
|
|
|
|
let report = client.report();
|
|
|
|
|
|
|
|
info!("Import completed in {} seconds, {} headers, {} hdr/s",
|
|
|
|
ms / 1000,
|
|
|
|
report.blocks_imported,
|
|
|
|
(report.blocks_imported * 1000) as u64 / ms,
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2016-12-16 11:00:17 +01:00
|
|
|
fn execute_import(cmd: ImportBlockchain) -> Result<(), String> {
|
2016-09-15 16:56:10 +02:00
|
|
|
let timer = Instant::now();
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
// load spec file
|
2017-07-10 12:57:40 +02:00
|
|
|
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
// load genesis hash
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
|
2016-09-26 19:21:25 +02:00
|
|
|
// database paths
|
2016-12-12 16:51:07 +01:00
|
|
|
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone());
|
2016-09-26 19:21:25 +02:00
|
|
|
|
|
|
|
// user defaults path
|
|
|
|
let user_defaults_path = db_dirs.user_defaults_path();
|
|
|
|
|
|
|
|
// load user defaults
|
2016-12-27 12:53:56 +01:00
|
|
|
let mut user_defaults = UserDefaults::load(&user_defaults_path)?;
|
2016-09-26 19:21:25 +02:00
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
fdlimit::raise_fd_limit();
|
|
|
|
|
|
|
|
// select pruning algorithm
|
2016-09-26 19:21:25 +02:00
|
|
|
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-10-03 11:13:10 +02:00
|
|
|
// check if tracing is on
|
2016-12-27 12:53:56 +01:00
|
|
|
let tracing = tracing_switch_to_bool(cmd.tracing, &user_defaults)?;
|
2016-10-03 11:13:10 +02:00
|
|
|
|
|
|
|
// check if fatdb is on
|
2016-12-27 12:53:56 +01:00
|
|
|
let fat_db = fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)?;
|
2016-10-03 11:13:10 +02:00
|
|
|
|
2016-09-07 15:27:28 +02:00
|
|
|
// prepare client and snapshot paths.
|
2016-09-26 19:21:25 +02:00
|
|
|
let client_path = db_dirs.client_path(algorithm);
|
|
|
|
let snapshot_path = db_dirs.snapshot_path();
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
// execute upgrades
|
2016-12-27 12:53:56 +01:00
|
|
|
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()))?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-12-13 23:38:29 +01:00
|
|
|
// create dirs used by parity
|
2017-02-20 16:13:21 +01:00
|
|
|
cmd.dirs.create_dirs(false, false, false)?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
// prepare client config
|
2016-12-02 18:21:54 +01:00
|
|
|
let mut client_config = to_client_config(
|
2016-12-28 13:44:51 +01:00
|
|
|
&cmd.cache_config,
|
2017-03-13 12:10:53 +01:00
|
|
|
spec.name.to_lowercase(),
|
2016-12-28 13:44:51 +01:00
|
|
|
Mode::Active,
|
|
|
|
tracing,
|
|
|
|
fat_db,
|
|
|
|
cmd.compaction,
|
|
|
|
cmd.wal,
|
|
|
|
cmd.vm_type,
|
|
|
|
"".into(),
|
|
|
|
algorithm,
|
|
|
|
cmd.pruning_history,
|
2017-01-20 13:25:53 +01:00
|
|
|
cmd.pruning_memory,
|
2016-12-02 18:21:54 +01:00
|
|
|
cmd.check_seal
|
|
|
|
);
|
|
|
|
|
|
|
|
client_config.queue.verifier_settings = cmd.verifier_settings;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
// build client
|
2016-12-27 12:53:56 +01:00
|
|
|
let service = ClientService::start(
|
2016-07-25 16:09:47 +02:00
|
|
|
client_config,
|
2016-08-05 23:33:55 +02:00
|
|
|
&spec,
|
2016-09-07 15:27:28 +02:00
|
|
|
&client_path,
|
|
|
|
&snapshot_path,
|
|
|
|
&cmd.dirs.ipc_path(),
|
2016-08-05 23:33:55 +02:00
|
|
|
Arc::new(Miner::with_spec(&spec)),
|
2016-12-27 12:53:56 +01:00
|
|
|
).map_err(|e| format!("Client service error: {:?}", e))?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-11-15 19:07:23 +01:00
|
|
|
// free up the spec in memory.
|
|
|
|
drop(spec);
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
let client = service.client();
|
|
|
|
|
|
|
|
let mut instream: Box<io::Read> = match cmd.file_path {
|
2016-12-27 12:53:56 +01:00
|
|
|
Some(f) => Box::new(fs::File::open(&f).map_err(|_| format!("Cannot open given file: {}", f))?),
|
2016-07-25 16:09:47 +02:00
|
|
|
None => Box::new(io::stdin()),
|
|
|
|
};
|
|
|
|
|
|
|
|
const READAHEAD_BYTES: usize = 8;
|
|
|
|
|
|
|
|
let mut first_bytes: Vec<u8> = vec![0; READAHEAD_BYTES];
|
|
|
|
let mut first_read = 0;
|
|
|
|
|
|
|
|
let format = match cmd.format {
|
|
|
|
Some(format) => format,
|
|
|
|
None => {
|
2016-12-27 12:53:56 +01:00
|
|
|
first_read = instream.read(&mut first_bytes).map_err(|_| "Error reading from the file/stream.")?;
|
2016-07-25 16:09:47 +02:00
|
|
|
match first_bytes[0] {
|
|
|
|
0xf9 => DataFormat::Binary,
|
|
|
|
_ => DataFormat::Hex,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-07-10 13:21:11 +02:00
|
|
|
let informant = Arc::new(Informant::new(
|
|
|
|
FullNodeInformantData {
|
|
|
|
client: client.clone(),
|
|
|
|
sync: None,
|
|
|
|
net: None,
|
|
|
|
},
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
cmd.with_color,
|
|
|
|
));
|
|
|
|
|
2016-12-10 23:58:39 +01:00
|
|
|
service.register_io_handler(informant).map_err(|_| "Unable to register informant handler".to_owned())?;
|
2016-09-11 14:04:56 +02:00
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
let do_import = |bytes| {
|
|
|
|
while client.queue_info().is_full() { sleep(Duration::from_secs(1)); }
|
|
|
|
match client.import_block(bytes) {
|
|
|
|
Err(BlockImportError::Import(ImportError::AlreadyInChain)) => {
|
|
|
|
trace!("Skipping block already in chain.");
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
return Err(format!("Cannot import block: {:?}", e));
|
|
|
|
},
|
|
|
|
Ok(_) => {},
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
};
|
|
|
|
|
|
|
|
match format {
|
|
|
|
DataFormat::Binary => {
|
|
|
|
loop {
|
|
|
|
let mut bytes = if first_read > 0 {first_bytes.clone()} else {vec![0; READAHEAD_BYTES]};
|
|
|
|
let n = if first_read > 0 {
|
|
|
|
first_read
|
|
|
|
} else {
|
2016-12-27 12:53:56 +01:00
|
|
|
instream.read(&mut bytes).map_err(|_| "Error reading from the file/stream.")?
|
2016-07-25 16:09:47 +02:00
|
|
|
};
|
|
|
|
if n == 0 { break; }
|
|
|
|
first_read = 0;
|
2016-12-27 12:53:56 +01:00
|
|
|
let s = PayloadInfo::from(&bytes).map_err(|e| format!("Invalid RLP in the file/stream: {:?}", e))?.total();
|
2016-07-25 16:09:47 +02:00
|
|
|
bytes.resize(s, 0);
|
2016-12-27 12:53:56 +01:00
|
|
|
instream.read_exact(&mut bytes[n..]).map_err(|_| "Error reading from the file/stream.")?;
|
|
|
|
do_import(bytes)?;
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
DataFormat::Hex => {
|
|
|
|
for line in BufReader::new(instream).lines() {
|
2016-12-27 12:53:56 +01:00
|
|
|
let s = line.map_err(|_| "Error reading from the file/stream.")?;
|
2016-07-25 16:09:47 +02:00
|
|
|
let s = if first_read > 0 {from_utf8(&first_bytes).unwrap().to_owned() + &(s[..])} else {s};
|
|
|
|
first_read = 0;
|
2016-12-27 12:53:56 +01:00
|
|
|
let bytes = s.from_hex().map_err(|_| "Invalid hex in file/stream.")?;
|
|
|
|
do_import(bytes)?;
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
client.flush_queue();
|
2016-09-26 19:21:25 +02:00
|
|
|
|
|
|
|
// save user defaults
|
|
|
|
user_defaults.pruning = algorithm;
|
|
|
|
user_defaults.tracing = tracing;
|
2016-11-27 12:58:14 +01:00
|
|
|
user_defaults.fat_db = fat_db;
|
2016-12-27 12:53:56 +01:00
|
|
|
user_defaults.save(&user_defaults_path)?;
|
2016-09-26 19:21:25 +02:00
|
|
|
|
2016-09-15 16:56:10 +02:00
|
|
|
let report = client.report();
|
|
|
|
|
|
|
|
let ms = timer.elapsed().as_milliseconds();
|
2016-12-16 11:00:17 +01:00
|
|
|
info!("Import completed in {} seconds, {} blocks, {} blk/s, {} transactions, {} tx/s, {} Mgas, {} Mgas/s",
|
2016-09-15 16:56:10 +02:00
|
|
|
ms / 1000,
|
|
|
|
report.blocks_imported,
|
|
|
|
(report.blocks_imported * 1000) as u64 / ms,
|
|
|
|
report.transactions_applied,
|
|
|
|
(report.transactions_applied * 1000) as u64 / ms,
|
|
|
|
report.gas_processed / From::from(1_000_000),
|
|
|
|
(report.gas_processed / From::from(ms * 1000)).low_u64(),
|
2016-12-16 11:00:17 +01:00
|
|
|
);
|
|
|
|
Ok(())
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
2016-11-27 11:11:56 +01:00
|
|
|
fn start_client(
|
|
|
|
dirs: Directories,
|
|
|
|
spec: SpecType,
|
|
|
|
pruning: Pruning,
|
|
|
|
pruning_history: u64,
|
2017-01-20 13:25:53 +01:00
|
|
|
pruning_memory: usize,
|
2016-11-27 11:11:56 +01:00
|
|
|
tracing: Switch,
|
|
|
|
fat_db: Switch,
|
|
|
|
compaction: DatabaseCompactionProfile,
|
|
|
|
wal: bool,
|
2017-06-27 22:40:46 +02:00
|
|
|
cache_config: CacheConfig,
|
|
|
|
require_fat_db: bool,
|
2016-12-04 18:01:50 +01:00
|
|
|
) -> Result<ClientService, String> {
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
// load spec file
|
2017-07-10 12:57:40 +02:00
|
|
|
let spec = spec.spec(&dirs.cache)?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
// load genesis hash
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
|
2016-09-26 19:21:25 +02:00
|
|
|
// database paths
|
2016-12-12 16:51:07 +01:00
|
|
|
let db_dirs = dirs.database(genesis_hash, None, spec.data_dir.clone());
|
2016-09-26 19:21:25 +02:00
|
|
|
|
|
|
|
// user defaults path
|
|
|
|
let user_defaults_path = db_dirs.user_defaults_path();
|
|
|
|
|
|
|
|
// load user defaults
|
2016-12-27 12:53:56 +01:00
|
|
|
let user_defaults = UserDefaults::load(&user_defaults_path)?;
|
2016-09-26 19:21:25 +02:00
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
fdlimit::raise_fd_limit();
|
|
|
|
|
|
|
|
// select pruning algorithm
|
2016-11-27 11:11:56 +01:00
|
|
|
let algorithm = pruning.to_algorithm(&user_defaults);
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-10-03 11:13:10 +02:00
|
|
|
// check if tracing is on
|
2016-12-27 12:53:56 +01:00
|
|
|
let tracing = tracing_switch_to_bool(tracing, &user_defaults)?;
|
2016-10-03 11:13:10 +02:00
|
|
|
|
|
|
|
// check if fatdb is on
|
2016-12-27 12:53:56 +01:00
|
|
|
let fat_db = fatdb_switch_to_bool(fat_db, &user_defaults, algorithm)?;
|
2017-06-27 22:40:46 +02:00
|
|
|
if !fat_db && require_fat_db {
|
|
|
|
return Err("This command requires Parity to be synced with --fat-db on.".to_owned());
|
|
|
|
}
|
2016-10-03 11:13:10 +02:00
|
|
|
|
2016-09-07 15:27:28 +02:00
|
|
|
// prepare client and snapshot paths.
|
2016-09-26 19:21:25 +02:00
|
|
|
let client_path = db_dirs.client_path(algorithm);
|
|
|
|
let snapshot_path = db_dirs.snapshot_path();
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
// execute upgrades
|
2016-12-27 12:53:56 +01:00
|
|
|
execute_upgrades(&dirs.base, &db_dirs, algorithm, compaction.compaction_profile(db_dirs.db_root_path().as_path()))?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-12-13 23:38:29 +01:00
|
|
|
// create dirs used by parity
|
2017-02-20 16:13:21 +01:00
|
|
|
dirs.create_dirs(false, false, false)?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
// prepare client config
|
2017-01-20 13:25:53 +01:00
|
|
|
let client_config = to_client_config(
|
|
|
|
&cache_config,
|
2017-03-13 12:10:53 +01:00
|
|
|
spec.name.to_lowercase(),
|
2017-01-20 13:25:53 +01:00
|
|
|
Mode::Active,
|
|
|
|
tracing,
|
|
|
|
fat_db,
|
|
|
|
compaction,
|
|
|
|
wal,
|
|
|
|
VMType::default(),
|
|
|
|
"".into(),
|
|
|
|
algorithm,
|
|
|
|
pruning_history,
|
|
|
|
pruning_memory,
|
|
|
|
true,
|
|
|
|
);
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let service = ClientService::start(
|
2016-07-25 16:09:47 +02:00
|
|
|
client_config,
|
2016-08-05 23:33:55 +02:00
|
|
|
&spec,
|
2016-09-07 15:27:28 +02:00
|
|
|
&client_path,
|
|
|
|
&snapshot_path,
|
2016-11-27 18:26:23 +01:00
|
|
|
&dirs.ipc_path(),
|
2016-08-05 23:33:55 +02:00
|
|
|
Arc::new(Miner::with_spec(&spec)),
|
2016-12-27 12:53:56 +01:00
|
|
|
).map_err(|e| format!("Client service error: {:?}", e))?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
2016-11-15 19:07:23 +01:00
|
|
|
drop(spec);
|
2016-11-27 11:11:56 +01:00
|
|
|
Ok(service)
|
|
|
|
}
|
|
|
|
|
2016-12-16 11:00:17 +01:00
|
|
|
fn execute_export(cmd: ExportBlockchain) -> Result<(), String> {
|
2017-01-20 13:25:53 +01:00
|
|
|
let service = start_client(
|
|
|
|
cmd.dirs,
|
|
|
|
cmd.spec,
|
|
|
|
cmd.pruning,
|
|
|
|
cmd.pruning_history,
|
|
|
|
cmd.pruning_memory,
|
|
|
|
cmd.tracing,
|
|
|
|
cmd.fat_db,
|
|
|
|
cmd.compaction,
|
|
|
|
cmd.wal,
|
2017-06-27 22:40:46 +02:00
|
|
|
cmd.cache_config,
|
|
|
|
false,
|
2017-01-20 13:25:53 +01:00
|
|
|
)?;
|
2016-11-27 11:11:56 +01:00
|
|
|
let format = cmd.format.unwrap_or_default();
|
2016-11-15 19:07:23 +01:00
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
let client = service.client();
|
|
|
|
|
|
|
|
let mut out: Box<io::Write> = match cmd.file_path {
|
2016-12-27 12:53:56 +01:00
|
|
|
Some(f) => Box::new(fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f))?),
|
2016-07-25 16:09:47 +02:00
|
|
|
None => Box::new(io::stdout()),
|
|
|
|
};
|
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let from = client.block_number(cmd.from_block).ok_or("From block could not be found")?;
|
|
|
|
let to = client.block_number(cmd.to_block).ok_or("To block could not be found")?;
|
2016-07-25 16:09:47 +02:00
|
|
|
|
|
|
|
for i in from..(to + 1) {
|
2016-11-27 11:11:56 +01:00
|
|
|
if i % 10000 == 0 {
|
|
|
|
info!("#{}", i);
|
|
|
|
}
|
2016-12-28 13:44:51 +01:00
|
|
|
let b = client.block(BlockId::Number(i)).ok_or("Error exporting incomplete chain")?.into_inner();
|
2016-07-25 16:09:47 +02:00
|
|
|
match format {
|
backports to beta (#7434)
* Merge pull request #7368 from paritytech/td-future-blocks
Wait for future blocks in AuRa
* Fix tracing failed calls.
* Problem: sending any Whisper message fails
The error is "PoW too low to compete with other messages"
This has been previously reported in #7144
Solution: prevent the move semantics
The source of the error is in PoolHandle.relay
implementation for NetPoolHandle.
Because of the move semantics, `res` variable is in fact
copied (as it implements Copy) into the closure and for
that reason, the returned result is always `false.
* Merge pull request #7433 from paritytech/td-strict-config
Strict config parsing
* Problem: AuRa's unsafeties around step duration (#7282)
Firstly, `Step.duration_remaining` casts it to u32, unnecesarily
limiting it to 2^32. While theoretically this is "good enough" (at 3
seconds steps it provides room for a little over 400 years), it is
still a lossy way to calculate the remaining time until the next step.
Secondly, step duration might be zero, triggering division by zero
in `Step.calibrate`
Solution: rework the code around the fact that duration is
typically in single digits and never grows, hence, it can be represented
by a much narrower range (u16) and this highlights the fact that
multiplying u64 by u16 will only result in an overflow in even further
future, at which point we should panic informatively (if anybody's
still around)
Similarly, panic when it is detected that incrementing the step
counter wrapped around on the overflow of usize.
As for the division by zero, prevent it by making zero an invalid
value for step duration. This will make AuRa log the constraint
mismatch and panic (after all, what purpose would zero step duration
serve? it makes no sense within the definition of the protocol,
as finality can only be achieved as per the specification
if messages are received within the step duration, which would violate
the speed of light and other physical laws in this case).
* Merge pull request #7437 from paritytech/a5-chains-expanse
Remove expanse chain
* Expanse Byzantium update w/ correct metropolis difficulty increment divisor (#7463)
* Byzantium Update for Expanse
Here the changes go. Hope I didnt miss anything.
* expip2 changes - update duration limit
* Fix missing EXPIP-2 fields
* Format numbers as hex
* Fix compilation errors
* Group expanse chain spec fields together
* Set metropolisDifficultyIncrementDivisor for Expanse
* Revert #7437
* Add Expanse block 900_000 hash checkpoint
* Advance AuRa step as far as we can and prevent invalid blocks. (#7451)
* Advance AuRa step as far as we can.
* Wait for future blocks.
* fixed panic when io is not available for export block, closes #7486 (#7495)
* Update Parity Mainnet Bootnodes (#7476)
* Update Parity Mainnet Bootnodes
* Replace the Azure HDD bootnodes with the new ones :)
* Use https connection (#7503)
Use https when connecting to etherscan.io API for price-info
* Expose default gas price percentile configuration in CLI (#7497)
* Expose gas price percentile.
* Fix light eth_call.
* fix gas_price in light client
2018-01-09 13:55:10 +01:00
|
|
|
DataFormat::Binary => {
|
|
|
|
out.write(&b).map_err(|e| format!("Couldn't write to stream. Cause: {}", e))?;
|
|
|
|
}
|
|
|
|
DataFormat::Hex => {
|
|
|
|
out.write_fmt(format_args!("{}", b.pretty())).map_err(|e| format!("Couldn't write to stream. Cause: {}", e))?;
|
|
|
|
}
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-16 11:00:17 +01:00
|
|
|
info!("Export completed.");
|
|
|
|
Ok(())
|
2016-07-25 16:09:47 +02:00
|
|
|
}
|
|
|
|
|
2016-12-16 11:00:17 +01:00
|
|
|
fn execute_export_state(cmd: ExportState) -> Result<(), String> {
|
2017-01-20 13:25:53 +01:00
|
|
|
let service = start_client(
|
|
|
|
cmd.dirs,
|
|
|
|
cmd.spec,
|
|
|
|
cmd.pruning,
|
|
|
|
cmd.pruning_history,
|
|
|
|
cmd.pruning_memory,
|
|
|
|
cmd.tracing,
|
|
|
|
cmd.fat_db,
|
|
|
|
cmd.compaction,
|
|
|
|
cmd.wal,
|
2017-06-27 22:40:46 +02:00
|
|
|
cmd.cache_config,
|
|
|
|
true
|
2017-01-20 13:25:53 +01:00
|
|
|
)?;
|
|
|
|
|
2016-11-27 11:11:56 +01:00
|
|
|
let client = service.client();
|
|
|
|
|
|
|
|
let mut out: Box<io::Write> = match cmd.file_path {
|
2016-12-27 12:53:56 +01:00
|
|
|
Some(f) => Box::new(fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f))?),
|
2016-11-27 11:11:56 +01:00
|
|
|
None => Box::new(io::stdout()),
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut last: Option<Address> = None;
|
|
|
|
let at = cmd.at;
|
|
|
|
let mut i = 0usize;
|
|
|
|
|
2017-04-25 19:08:28 +02:00
|
|
|
out.write_fmt(format_args!("{{ \"state\": {{", )).expect("Couldn't write to stream.");
|
2016-11-27 11:11:56 +01:00
|
|
|
loop {
|
2016-12-27 12:53:56 +01:00
|
|
|
let accounts = client.list_accounts(at, last.as_ref(), 1000).ok_or("Specified block not found")?;
|
2016-11-27 11:11:56 +01:00
|
|
|
if accounts.is_empty() {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for account in accounts.into_iter() {
|
2016-11-27 18:16:43 +01:00
|
|
|
let balance = client.balance(&account, at).unwrap_or_else(U256::zero);
|
|
|
|
if cmd.min_balance.map_or(false, |m| balance < m) || cmd.max_balance.map_or(false, |m| balance > m) {
|
2016-11-28 01:32:40 +01:00
|
|
|
last = Some(account);
|
2016-11-27 18:16:43 +01:00
|
|
|
continue; //filtered out
|
|
|
|
}
|
|
|
|
|
2016-11-27 11:11:56 +01:00
|
|
|
if i != 0 {
|
|
|
|
out.write(b",").expect("Write error");
|
|
|
|
}
|
2016-11-27 18:16:43 +01:00
|
|
|
out.write_fmt(format_args!("\n\"0x{}\": {{\"balance\": \"{:x}\", \"nonce\": \"{:x}\"", account.hex(), balance, client.nonce(&account, at).unwrap_or_else(U256::zero))).expect("Write error");
|
2016-11-27 11:11:56 +01:00
|
|
|
let code = client.code(&account, at).unwrap_or(None).unwrap_or_else(Vec::new);
|
|
|
|
if !code.is_empty() {
|
2017-08-31 11:35:41 +02:00
|
|
|
out.write_fmt(format_args!(", \"code_hash\": \"0x{}\"", keccak(&code).hex())).expect("Write error");
|
2016-11-27 11:11:56 +01:00
|
|
|
if cmd.code {
|
|
|
|
out.write_fmt(format_args!(", \"code\": \"{}\"", code.to_hex())).expect("Write error");
|
|
|
|
}
|
|
|
|
}
|
2017-08-31 11:35:41 +02:00
|
|
|
let storage_root = client.storage_root(&account, at).unwrap_or(KECCAK_NULL_RLP);
|
|
|
|
if storage_root != KECCAK_NULL_RLP {
|
2016-11-27 11:11:56 +01:00
|
|
|
out.write_fmt(format_args!(", \"storage_root\": \"0x{}\"", storage_root.hex())).expect("Write error");
|
|
|
|
if cmd.storage {
|
2016-11-27 11:57:05 +01:00
|
|
|
out.write_fmt(format_args!(", \"storage\": {{")).expect("Write error");
|
|
|
|
let mut last_storage: Option<H256> = None;
|
|
|
|
loop {
|
2016-12-27 12:53:56 +01:00
|
|
|
let keys = client.list_storage(at, &account, last_storage.as_ref(), 1000).ok_or("Specified block not found")?;
|
2016-11-27 11:57:05 +01:00
|
|
|
if keys.is_empty() {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for key in keys.into_iter() {
|
2017-04-25 19:08:28 +02:00
|
|
|
if last_storage.is_some() {
|
2016-11-27 11:57:05 +01:00
|
|
|
out.write(b",").expect("Write error");
|
|
|
|
}
|
|
|
|
out.write_fmt(format_args!("\n\t\"0x{}\": \"0x{}\"", key.hex(), client.storage_at(&account, &key, at).unwrap_or_else(Default::default).hex())).expect("Write error");
|
|
|
|
last_storage = Some(key);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out.write(b"\n}").expect("Write error");
|
2016-11-27 11:11:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
out.write(b"}").expect("Write error");
|
|
|
|
i += 1;
|
|
|
|
if i % 10000 == 0 {
|
2016-11-27 18:16:43 +01:00
|
|
|
info!("Account #{}", i);
|
2016-11-27 11:11:56 +01:00
|
|
|
}
|
|
|
|
last = Some(account);
|
|
|
|
}
|
|
|
|
}
|
2017-04-25 19:08:28 +02:00
|
|
|
out.write_fmt(format_args!("\n}}}}")).expect("Write error");
|
2016-12-16 11:00:17 +01:00
|
|
|
info!("Export completed.");
|
|
|
|
Ok(())
|
2016-11-27 11:11:56 +01:00
|
|
|
}
|
|
|
|
|
2016-12-16 11:00:17 +01:00
|
|
|
pub fn kill_db(cmd: KillBlockchain) -> Result<(), String> {
|
2017-07-10 12:57:40 +02:00
|
|
|
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
|
2016-12-12 17:19:41 +01:00
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir);
|
|
|
|
let user_defaults_path = db_dirs.user_defaults_path();
|
2017-07-18 15:38:38 +02:00
|
|
|
let mut user_defaults = UserDefaults::load(&user_defaults_path)?;
|
2016-12-12 17:19:41 +01:00
|
|
|
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
|
|
|
let dir = db_dirs.db_path(algorithm);
|
2016-12-27 12:53:56 +01:00
|
|
|
fs::remove_dir_all(&dir).map_err(|e| format!("Error removing database: {:?}", e))?;
|
2017-07-18 15:38:38 +02:00
|
|
|
user_defaults.is_first_launch = true;
|
|
|
|
user_defaults.save(&user_defaults_path)?;
|
2016-12-16 11:00:17 +01:00
|
|
|
info!("Database deleted.");
|
|
|
|
Ok(())
|
2016-12-12 17:19:41 +01:00
|
|
|
}
|
|
|
|
|
2016-07-25 16:09:47 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use super::DataFormat;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_data_format_parsing() {
|
|
|
|
assert_eq!(DataFormat::Binary, "binary".parse().unwrap());
|
|
|
|
assert_eq!(DataFormat::Binary, "bin".parse().unwrap());
|
|
|
|
assert_eq!(DataFormat::Hex, "hex".parse().unwrap());
|
|
|
|
}
|
|
|
|
}
|