2019-01-07 11:33:07 +01:00
|
|
|
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of Parity Ethereum.
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
2016-08-05 17:00:46 +02:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
2016-08-05 17:00:46 +02:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2019-01-07 11:33:07 +01:00
|
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
//! Snapshot and restoration commands.
|
|
|
|
|
|
|
|
use std::{
|
|
|
|
path::{Path, PathBuf},
|
|
|
|
sync::Arc,
|
|
|
|
time::Duration,
|
2018-09-13 12:58:49 +02:00
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-09-13 12:58:49 +02:00
|
|
|
use ethcore::{
|
2016-09-26 19:21:25 +02:00
|
|
|
client::{DatabaseCompactionProfile, Mode, VMType},
|
2016-08-05 17:00:46 +02:00
|
|
|
miner::Miner,
|
2018-09-13 12:58:49 +02:00
|
|
|
snapshot::{
|
|
|
|
io::{PackedReader, PackedWriter, SnapshotReader},
|
|
|
|
service::Service as SnapshotService,
|
|
|
|
Progress, RestorationStatus, SnapshotConfiguration, SnapshotService as SS,
|
2016-09-26 19:21:25 +02:00
|
|
|
},
|
|
|
|
};
|
2018-03-13 11:49:57 +01:00
|
|
|
use ethcore_service::ClientService;
|
2017-08-31 11:35:41 +02:00
|
|
|
use hash::keccak;
|
2019-01-04 14:05:46 +01:00
|
|
|
use types::ids::BlockId;
|
2016-08-08 18:41:30 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
use cache::CacheConfig;
|
2016-10-03 11:13:10 +02:00
|
|
|
use db;
|
2016-08-05 17:00:46 +02:00
|
|
|
use dir::Directories;
|
2018-04-13 21:14:53 +02:00
|
|
|
use helpers::{execute_upgrades, to_client_config};
|
|
|
|
use params::{fatdb_switch_to_bool, tracing_switch_to_bool, Pruning, SpecType, Switch};
|
2016-09-26 19:21:25 +02:00
|
|
|
use user_defaults::UserDefaults;
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
/// Kinds of snapshot commands.
|
|
|
|
#[derive(Debug, PartialEq, Clone, Copy)]
|
|
|
|
pub enum Kind {
|
|
|
|
/// Take a snapshot.
|
|
|
|
Take,
|
|
|
|
/// Restore a snapshot.
|
|
|
|
Restore,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Command for snapshot creation or restoration.
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct SnapshotCommand {
|
|
|
|
pub cache_config: CacheConfig,
|
|
|
|
pub dirs: Directories,
|
|
|
|
pub spec: SpecType,
|
|
|
|
pub pruning: Pruning,
|
2016-10-14 14:44:56 +02:00
|
|
|
pub pruning_history: u64,
|
2017-01-20 13:25:53 +01:00
|
|
|
pub pruning_memory: usize,
|
2016-08-05 17:00:46 +02:00
|
|
|
pub tracing: Switch,
|
2016-10-03 11:13:10 +02:00
|
|
|
pub fat_db: Switch,
|
2016-08-05 17:00:46 +02:00
|
|
|
pub compaction: DatabaseCompactionProfile,
|
|
|
|
pub file_path: Option<String>,
|
|
|
|
pub kind: Kind,
|
2016-12-09 23:01:43 +01:00
|
|
|
pub block_at: BlockId,
|
2018-10-26 13:21:36 +02:00
|
|
|
pub max_round_blocks_to_import: usize,
|
2018-09-13 12:58:49 +02:00
|
|
|
pub snapshot_conf: SnapshotConfiguration,
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
// helper for reading chunks from arbitrary reader and feeding them into the
|
|
|
|
// service.
|
|
|
|
fn restore_using<R: SnapshotReader>(
|
|
|
|
snapshot: Arc<SnapshotService>,
|
|
|
|
reader: &R,
|
|
|
|
recover: bool,
|
|
|
|
) -> Result<(), String> {
|
|
|
|
let manifest = reader.manifest();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
info!(
|
|
|
|
"Restoring to block #{} (0x{:?})",
|
|
|
|
manifest.block_number, manifest.block_hash
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
snapshot
|
|
|
|
.init_restore(manifest.clone(), recover)
|
2016-09-11 14:05:59 +02:00
|
|
|
.map_err(|e| format!("Failed to begin restoration: {}", e))?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
let informant_handle = snapshot.clone();
|
|
|
|
::std::thread::spawn(move || {
|
2016-10-18 18:16:00 +02:00
|
|
|
while let RestorationStatus::Ongoing {
|
|
|
|
state_chunks_done,
|
|
|
|
block_chunks_done,
|
|
|
|
..
|
|
|
|
} = informant_handle.status()
|
|
|
|
{
|
2016-09-11 14:05:59 +02:00
|
|
|
info!(
|
|
|
|
"Processed {}/{} state chunks and {}/{} block chunks.",
|
|
|
|
state_chunks_done, num_state, block_chunks_done, num_blocks
|
|
|
|
);
|
|
|
|
::std::thread::sleep(Duration::from_secs(5));
|
|
|
|
}
|
|
|
|
});
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
info!("Restoring state");
|
|
|
|
for &state_hash in &manifest.state_hashes {
|
|
|
|
if snapshot.status() == RestorationStatus::Failed {
|
|
|
|
return Err("Restoration failed".into());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let chunk = reader.chunk(state_hash).map_err(|e| {
|
|
|
|
format!(
|
|
|
|
"Encountered error while reading chunk {:?}: {}",
|
|
|
|
state_hash, e
|
2020-08-05 06:08:03 +02:00
|
|
|
)
|
2016-12-27 12:53:56 +01:00
|
|
|
})?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-08-31 11:35:41 +02:00
|
|
|
let hash = keccak(&chunk);
|
2016-11-07 19:33:55 +01:00
|
|
|
if hash != state_hash {
|
|
|
|
return Err(format!(
|
|
|
|
"Mismatched chunk hash. Expected {:?}, got {:?}",
|
|
|
|
state_hash, hash
|
|
|
|
));
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
snapshot.feed_state_chunk(state_hash, &chunk);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
info!("Restoring blocks");
|
|
|
|
for &block_hash in &manifest.block_hashes {
|
|
|
|
if snapshot.status() == RestorationStatus::Failed {
|
|
|
|
return Err("Restoration failed".into());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let chunk = reader.chunk(block_hash).map_err(|e| {
|
|
|
|
format!(
|
|
|
|
"Encountered error while reading chunk {:?}: {}",
|
|
|
|
block_hash, e
|
2020-08-05 06:08:03 +02:00
|
|
|
)
|
2016-12-27 12:53:56 +01:00
|
|
|
})?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-08-31 11:35:41 +02:00
|
|
|
let hash = keccak(&chunk);
|
2016-11-07 19:33:55 +01:00
|
|
|
if hash != block_hash {
|
|
|
|
return Err(format!(
|
|
|
|
"Mismatched chunk hash. Expected {:?}, got {:?}",
|
|
|
|
block_hash, hash
|
|
|
|
));
|
|
|
|
}
|
2016-09-11 14:05:59 +02:00
|
|
|
snapshot.feed_block_chunk(block_hash, &chunk);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
match snapshot.status() {
|
|
|
|
RestorationStatus::Ongoing { .. } => {
|
|
|
|
Err("Snapshot file is incomplete and missing chunks.".into())
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2018-05-16 22:01:55 +02:00
|
|
|
RestorationStatus::Initializing { .. } => {
|
|
|
|
Err("Snapshot restoration is still initializing.".into())
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2016-09-11 14:05:59 +02:00
|
|
|
RestorationStatus::Failed => Err("Snapshot restoration failed.".into()),
|
|
|
|
RestorationStatus::Inactive => {
|
|
|
|
info!("Restoration complete.");
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2016-09-11 14:05:59 +02:00
|
|
|
}
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
impl SnapshotCommand {
|
|
|
|
// shared portion of snapshot commands: start the client service
|
2017-06-22 19:00:53 +02:00
|
|
|
fn start_service(self) -> Result<ClientService, String> {
|
2016-08-05 17:00:46 +02:00
|
|
|
// load spec file
|
2017-07-10 12:57:40 +02:00
|
|
|
let spec = self.spec.spec(&self.dirs.cache)?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
// load genesis hash
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-26 19:21:25 +02:00
|
|
|
// database paths
|
2016-12-12 16:51:07 +01:00
|
|
|
let db_dirs = self
|
|
|
|
.dirs
|
|
|
|
.database(genesis_hash, None, spec.data_dir.clone());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-26 19:21:25 +02:00
|
|
|
// user defaults path
|
|
|
|
let user_defaults_path = db_dirs.user_defaults_path();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-26 19:21:25 +02:00
|
|
|
// load user defaults
|
2016-12-27 12:53:56 +01:00
|
|
|
let user_defaults = UserDefaults::load(&user_defaults_path)?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
// select pruning algorithm
|
2016-09-26 19:21:25 +02:00
|
|
|
let algorithm = self.pruning.to_algorithm(&user_defaults);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-03 11:13:10 +02:00
|
|
|
// check if tracing is on
|
2016-12-27 12:53:56 +01:00
|
|
|
let tracing = tracing_switch_to_bool(self.tracing, &user_defaults)?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-10-03 11:13:10 +02:00
|
|
|
// check if fatdb is on
|
2016-12-27 12:53:56 +01:00
|
|
|
let fat_db = fatdb_switch_to_bool(self.fat_db, &user_defaults, algorithm)?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-07 15:27:28 +02:00
|
|
|
// prepare client and snapshot paths.
|
2016-09-26 19:21:25 +02:00
|
|
|
let client_path = db_dirs.client_path(algorithm);
|
|
|
|
let snapshot_path = db_dirs.snapshot_path();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
// execute upgrades
|
2018-04-13 21:14:53 +02:00
|
|
|
execute_upgrades(&self.dirs.base, &db_dirs, algorithm, &self.compaction)?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
// prepare client config
|
2018-09-13 12:58:49 +02:00
|
|
|
let mut client_config = to_client_config(
|
2017-01-20 13:25:53 +01:00
|
|
|
&self.cache_config,
|
2017-03-13 12:10:53 +01:00
|
|
|
spec.name.to_lowercase(),
|
2017-01-20 13:25:53 +01:00
|
|
|
Mode::Active,
|
|
|
|
tracing,
|
|
|
|
fat_db,
|
|
|
|
self.compaction,
|
|
|
|
VMType::default(),
|
|
|
|
"".into(),
|
|
|
|
algorithm,
|
|
|
|
self.pruning_history,
|
|
|
|
self.pruning_memory,
|
2018-07-02 19:00:06 +02:00
|
|
|
true,
|
2018-10-26 13:21:36 +02:00
|
|
|
self.max_round_blocks_to_import,
|
2017-01-20 13:25:53 +01:00
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-09-13 12:58:49 +02:00
|
|
|
client_config.snapshot = self.snapshot_conf;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-04-13 21:14:53 +02:00
|
|
|
let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
|
2018-06-20 15:13:07 +02:00
|
|
|
let client_db = restoration_db_handler
|
|
|
|
.open(&client_path)
|
|
|
|
.map_err(|e| format!("Failed to open database {:?}", e))?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let service = ClientService::start(
|
2016-08-05 17:00:46 +02:00
|
|
|
client_config,
|
2016-08-05 23:33:55 +02:00
|
|
|
&spec,
|
2018-04-09 14:21:37 +02:00
|
|
|
client_db,
|
2016-09-07 15:27:28 +02:00
|
|
|
&snapshot_path,
|
2018-04-09 14:21:37 +02:00
|
|
|
restoration_db_handler,
|
2016-09-07 15:27:28 +02:00
|
|
|
&self.dirs.ipc_path(),
|
2018-04-13 17:34:27 +02:00
|
|
|
// TODO [ToDr] don't use test miner here
|
|
|
|
// (actually don't require miner at all)
|
|
|
|
Arc::new(Miner::new_for_tests(&spec, None)),
|
2016-12-27 12:53:56 +01:00
|
|
|
)
|
|
|
|
.map_err(|e| format!("Client service error: {:?}", e))?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-06-22 19:00:53 +02:00
|
|
|
Ok(service)
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
/// restore from a snapshot
|
|
|
|
pub fn restore(self) -> Result<(), String> {
|
2016-09-11 14:05:59 +02:00
|
|
|
let file = self.file_path.clone();
|
2017-06-22 19:00:53 +02:00
|
|
|
let service = self.start_service()?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
warn!("Snapshot restoration is experimental and the format may be subject to change.");
|
2016-08-25 14:28:45 +02:00
|
|
|
warn!(
|
|
|
|
"On encountering an unexpected error, please ensure that you have a recent snapshot."
|
|
|
|
);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
let snapshot = service.snapshot_service();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
if let Some(file) = file {
|
|
|
|
info!("Attempting to restore from snapshot at '{}'", file);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
let reader = PackedReader::new(Path::new(&file))
|
|
|
|
.map_err(|e| format!("Couldn't open snapshot file: {}", e))
|
|
|
|
.and_then(|x| x.ok_or("Snapshot file has invalid format.".into()));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let reader = reader?;
|
|
|
|
restore_using(snapshot, &reader, true)?;
|
2016-09-11 14:05:59 +02:00
|
|
|
} else {
|
|
|
|
info!("Attempting to restore from local snapshot.");
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
// attempting restoration with recovery will lead to deadlock
|
|
|
|
// as we currently hold a read lock on the service's reader.
|
|
|
|
match *snapshot.reader() {
|
2016-12-27 12:53:56 +01:00
|
|
|
Some(ref reader) => restore_using(snapshot.clone(), reader, false)?,
|
2016-09-11 14:05:59 +02:00
|
|
|
None => return Err("No local snapshot found.".into()),
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
Ok(())
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
/// Take a snapshot from the head of the chain.
|
|
|
|
pub fn take_snapshot(self) -> Result<(), String> {
|
2016-12-27 12:53:56 +01:00
|
|
|
let file_path = self
|
|
|
|
.file_path
|
|
|
|
.clone()
|
|
|
|
.ok_or("No file path provided.".to_owned())?;
|
2016-08-05 17:00:46 +02:00
|
|
|
let file_path: PathBuf = file_path.into();
|
2016-08-10 16:29:40 +02:00
|
|
|
let block_at = self.block_at;
|
2017-06-22 19:00:53 +02:00
|
|
|
let service = self.start_service()?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
warn!("Snapshots are currently experimental. File formats may be subject to change.");
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let writer = PackedWriter::new(&file_path)
|
|
|
|
.map_err(|e| format!("Failed to open snapshot writer: {}", e))?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-10 16:29:40 +02:00
|
|
|
let progress = Arc::new(Progress::default());
|
2016-08-08 18:41:30 +02:00
|
|
|
let p = progress.clone();
|
|
|
|
let informant_handle = ::std::thread::spawn(move || {
|
|
|
|
::std::thread::sleep(Duration::from_secs(5));
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-08 18:41:30 +02:00
|
|
|
let mut last_size = 0;
|
|
|
|
while !p.done() {
|
|
|
|
let cur_size = p.size();
|
|
|
|
if cur_size != last_size {
|
|
|
|
last_size = cur_size;
|
2019-06-25 15:38:29 +02:00
|
|
|
let bytes = ::informant::format_bytes(cur_size as usize);
|
2016-10-24 18:27:23 +02:00
|
|
|
info!(
|
|
|
|
"Snapshot: {} accounts {} blocks {}",
|
|
|
|
p.accounts(),
|
|
|
|
p.blocks(),
|
|
|
|
bytes
|
|
|
|
);
|
2016-08-08 18:41:30 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-08 18:41:30 +02:00
|
|
|
::std::thread::sleep(Duration::from_secs(5));
|
|
|
|
}
|
|
|
|
});
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-08 18:41:30 +02:00
|
|
|
if let Err(e) = service.client().take_snapshot(writer, block_at, &*progress) {
|
2016-08-05 17:00:46 +02:00
|
|
|
let _ = ::std::fs::remove_file(&file_path);
|
|
|
|
return Err(format!(
|
|
|
|
"Encountered fatal error while creating snapshot: {}",
|
|
|
|
e
|
|
|
|
));
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-08 18:41:30 +02:00
|
|
|
info!("snapshot creation complete");
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-08 18:41:30 +02:00
|
|
|
assert!(progress.done());
|
2016-12-27 12:53:56 +01:00
|
|
|
informant_handle
|
|
|
|
.join()
|
|
|
|
.map_err(|_| "failed to join logger thread")?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Execute this snapshot command.
|
|
|
|
pub fn execute(cmd: SnapshotCommand) -> Result<String, String> {
|
|
|
|
match cmd.kind {
|
2016-12-27 12:53:56 +01:00
|
|
|
Kind::Take => cmd.take_snapshot()?,
|
|
|
|
Kind::Restore => cmd.restore()?,
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(String::new())
|
2016-08-10 16:29:40 +02:00
|
|
|
}
|