2019-01-07 11:33:07 +01:00
|
|
|
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of Parity Ethereum.
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
2016-08-05 17:00:46 +02:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
2016-08-05 17:00:46 +02:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2019-01-07 11:33:07 +01:00
|
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
//! Snapshot and restoration commands.
|
|
|
|
|
|
|
|
use std::time::Duration;
|
|
|
|
use std::path::{Path, PathBuf};
|
|
|
|
use std::sync::Arc;
|
2016-08-08 18:41:30 +02:00
|
|
|
|
2019-08-28 10:09:42 +02:00
|
|
|
use client_traits::SnapshotClient;
|
2017-08-31 11:35:41 +02:00
|
|
|
use hash::keccak;
|
2019-09-03 11:29:25 +02:00
|
|
|
use snapshot::{SnapshotConfiguration, SnapshotService as SS};
|
|
|
|
use snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
|
|
|
|
use snapshot::service::Service as SnapshotService;
|
|
|
|
use ethcore::client::{Client, DatabaseCompactionProfile, VMType};
|
2016-08-05 17:00:46 +02:00
|
|
|
use ethcore::miner::Miner;
|
2018-03-13 11:49:57 +01:00
|
|
|
use ethcore_service::ClientService;
|
2019-08-15 17:59:22 +02:00
|
|
|
use types::{
|
|
|
|
ids::BlockId,
|
|
|
|
snapshot::Progress,
|
|
|
|
client_types::Mode,
|
2019-08-22 18:25:49 +02:00
|
|
|
snapshot::RestorationStatus,
|
2019-08-15 17:59:22 +02:00
|
|
|
};
|
2016-08-08 18:41:30 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
use cache::CacheConfig;
|
2016-10-03 11:13:10 +02:00
|
|
|
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool};
|
2018-04-13 21:14:53 +02:00
|
|
|
use helpers::{to_client_config, execute_upgrades};
|
2016-08-05 17:00:46 +02:00
|
|
|
use dir::Directories;
|
2016-09-26 19:21:25 +02:00
|
|
|
use user_defaults::UserDefaults;
|
2018-04-09 16:14:33 +02:00
|
|
|
use ethcore_private_tx;
|
2018-04-13 21:14:53 +02:00
|
|
|
use db;
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
/// Kinds of snapshot commands.
|
|
|
|
#[derive(Debug, PartialEq, Clone, Copy)]
|
|
|
|
pub enum Kind {
|
|
|
|
/// Take a snapshot.
|
|
|
|
Take,
|
|
|
|
/// Restore a snapshot.
|
|
|
|
Restore
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Command for snapshot creation or restoration.
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct SnapshotCommand {
|
|
|
|
pub cache_config: CacheConfig,
|
|
|
|
pub dirs: Directories,
|
|
|
|
pub spec: SpecType,
|
|
|
|
pub pruning: Pruning,
|
2016-10-14 14:44:56 +02:00
|
|
|
pub pruning_history: u64,
|
2017-01-20 13:25:53 +01:00
|
|
|
pub pruning_memory: usize,
|
2016-08-05 17:00:46 +02:00
|
|
|
pub tracing: Switch,
|
2016-10-03 11:13:10 +02:00
|
|
|
pub fat_db: Switch,
|
2016-08-05 17:00:46 +02:00
|
|
|
pub compaction: DatabaseCompactionProfile,
|
|
|
|
pub file_path: Option<String>,
|
|
|
|
pub kind: Kind,
|
2016-12-09 23:01:43 +01:00
|
|
|
pub block_at: BlockId,
|
2018-10-26 13:21:36 +02:00
|
|
|
pub max_round_blocks_to_import: usize,
|
2018-09-13 12:58:49 +02:00
|
|
|
pub snapshot_conf: SnapshotConfiguration,
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
// helper for reading chunks from arbitrary reader and feeding them into the
|
|
|
|
// service.
|
2019-09-03 11:29:25 +02:00
|
|
|
fn restore_using<R: SnapshotReader>(snapshot: Arc<SnapshotService<Client>>, reader: &R, recover: bool) -> Result<(), String> {
|
2016-09-11 14:05:59 +02:00
|
|
|
let manifest = reader.manifest();
|
|
|
|
|
|
|
|
info!("Restoring to block #{} (0x{:?})", manifest.block_number, manifest.block_hash);
|
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
snapshot.init_restore(manifest.clone(), recover).map_err(|e| {
|
2016-09-11 14:05:59 +02:00
|
|
|
format!("Failed to begin restoration: {}", e)
|
2016-12-27 12:53:56 +01:00
|
|
|
})?;
|
2016-09-11 14:05:59 +02:00
|
|
|
|
|
|
|
let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len());
|
|
|
|
|
|
|
|
let informant_handle = snapshot.clone();
|
|
|
|
::std::thread::spawn(move || {
|
2016-10-18 18:16:00 +02:00
|
|
|
while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } = informant_handle.status() {
|
2016-09-11 14:05:59 +02:00
|
|
|
info!("Processed {}/{} state chunks and {}/{} block chunks.",
|
|
|
|
state_chunks_done, num_state, block_chunks_done, num_blocks);
|
|
|
|
::std::thread::sleep(Duration::from_secs(5));
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
info!("Restoring state");
|
|
|
|
for &state_hash in &manifest.state_hashes {
|
|
|
|
if snapshot.status() == RestorationStatus::Failed {
|
|
|
|
return Err("Restoration failed".into());
|
|
|
|
}
|
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let chunk = reader.chunk(state_hash)
|
|
|
|
.map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e))?;
|
2016-11-07 19:33:55 +01:00
|
|
|
|
2017-08-31 11:35:41 +02:00
|
|
|
let hash = keccak(&chunk);
|
2016-11-07 19:33:55 +01:00
|
|
|
if hash != state_hash {
|
|
|
|
return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", state_hash, hash));
|
|
|
|
}
|
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
snapshot.feed_state_chunk(state_hash, &chunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
info!("Restoring blocks");
|
|
|
|
for &block_hash in &manifest.block_hashes {
|
|
|
|
if snapshot.status() == RestorationStatus::Failed {
|
|
|
|
return Err("Restoration failed".into());
|
|
|
|
}
|
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let chunk = reader.chunk(block_hash)
|
|
|
|
.map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e))?;
|
2016-11-07 19:33:55 +01:00
|
|
|
|
2017-08-31 11:35:41 +02:00
|
|
|
let hash = keccak(&chunk);
|
2016-11-07 19:33:55 +01:00
|
|
|
if hash != block_hash {
|
|
|
|
return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", block_hash, hash));
|
|
|
|
}
|
2016-09-11 14:05:59 +02:00
|
|
|
snapshot.feed_block_chunk(block_hash, &chunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
match snapshot.status() {
|
|
|
|
RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()),
|
2018-05-16 22:01:55 +02:00
|
|
|
RestorationStatus::Initializing { .. } => Err("Snapshot restoration is still initializing.".into()),
|
2019-07-01 14:41:45 +02:00
|
|
|
RestorationStatus::Finalizing => Err("Snapshot restoration is still finalizing.".into()),
|
2016-09-11 14:05:59 +02:00
|
|
|
RestorationStatus::Failed => Err("Snapshot restoration failed.".into()),
|
|
|
|
RestorationStatus::Inactive => {
|
|
|
|
info!("Restoration complete.");
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
impl SnapshotCommand {
|
|
|
|
// shared portion of snapshot commands: start the client service
|
2017-06-22 19:00:53 +02:00
|
|
|
fn start_service(self) -> Result<ClientService, String> {
|
2016-08-05 17:00:46 +02:00
|
|
|
// load spec file
|
2017-07-10 12:57:40 +02:00
|
|
|
let spec = self.spec.spec(&self.dirs.cache)?;
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
// load genesis hash
|
|
|
|
let genesis_hash = spec.genesis_header().hash();
|
|
|
|
|
2016-09-26 19:21:25 +02:00
|
|
|
// database paths
|
2016-12-12 16:51:07 +01:00
|
|
|
let db_dirs = self.dirs.database(genesis_hash, None, spec.data_dir.clone());
|
2016-09-26 19:21:25 +02:00
|
|
|
|
|
|
|
// user defaults path
|
|
|
|
let user_defaults_path = db_dirs.user_defaults_path();
|
|
|
|
|
|
|
|
// load user defaults
|
2016-12-27 12:53:56 +01:00
|
|
|
let user_defaults = UserDefaults::load(&user_defaults_path)?;
|
2016-09-26 19:21:25 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
// select pruning algorithm
|
2016-09-26 19:21:25 +02:00
|
|
|
let algorithm = self.pruning.to_algorithm(&user_defaults);
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2016-10-03 11:13:10 +02:00
|
|
|
// check if tracing is on
|
2016-12-27 12:53:56 +01:00
|
|
|
let tracing = tracing_switch_to_bool(self.tracing, &user_defaults)?;
|
2016-10-03 11:13:10 +02:00
|
|
|
|
|
|
|
// check if fatdb is on
|
2016-12-27 12:53:56 +01:00
|
|
|
let fat_db = fatdb_switch_to_bool(self.fat_db, &user_defaults, algorithm)?;
|
2016-10-03 11:13:10 +02:00
|
|
|
|
2016-09-07 15:27:28 +02:00
|
|
|
// prepare client and snapshot paths.
|
2016-09-26 19:21:25 +02:00
|
|
|
let client_path = db_dirs.client_path(algorithm);
|
|
|
|
let snapshot_path = db_dirs.snapshot_path();
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
// execute upgrades
|
2018-04-13 21:14:53 +02:00
|
|
|
execute_upgrades(&self.dirs.base, &db_dirs, algorithm, &self.compaction)?;
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
// prepare client config
|
2018-09-13 12:58:49 +02:00
|
|
|
let mut client_config = to_client_config(
|
2017-01-20 13:25:53 +01:00
|
|
|
&self.cache_config,
|
2017-03-13 12:10:53 +01:00
|
|
|
spec.name.to_lowercase(),
|
2017-01-20 13:25:53 +01:00
|
|
|
Mode::Active,
|
|
|
|
tracing,
|
|
|
|
fat_db,
|
|
|
|
self.compaction,
|
|
|
|
VMType::default(),
|
|
|
|
"".into(),
|
|
|
|
algorithm,
|
|
|
|
self.pruning_history,
|
|
|
|
self.pruning_memory,
|
2018-07-02 19:00:06 +02:00
|
|
|
true,
|
2018-10-26 13:21:36 +02:00
|
|
|
self.max_round_blocks_to_import,
|
2017-01-20 13:25:53 +01:00
|
|
|
);
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2018-09-13 12:58:49 +02:00
|
|
|
client_config.snapshot = self.snapshot_conf;
|
|
|
|
|
2018-04-13 21:14:53 +02:00
|
|
|
let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
|
2018-06-20 15:13:07 +02:00
|
|
|
let client_db = restoration_db_handler.open(&client_path)
|
|
|
|
.map_err(|e| format!("Failed to open database {:?}", e))?;
|
2018-04-09 14:21:37 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let service = ClientService::start(
|
2016-08-05 17:00:46 +02:00
|
|
|
client_config,
|
2016-08-05 23:33:55 +02:00
|
|
|
&spec,
|
2018-04-09 14:21:37 +02:00
|
|
|
client_db,
|
2016-09-07 15:27:28 +02:00
|
|
|
&snapshot_path,
|
2018-04-09 14:21:37 +02:00
|
|
|
restoration_db_handler,
|
2016-09-07 15:27:28 +02:00
|
|
|
&self.dirs.ipc_path(),
|
2018-04-13 17:34:27 +02:00
|
|
|
// TODO [ToDr] don't use test miner here
|
|
|
|
// (actually don't require miner at all)
|
|
|
|
Arc::new(Miner::new_for_tests(&spec, None)),
|
2019-02-07 14:34:24 +01:00
|
|
|
Arc::new(ethcore_private_tx::DummySigner),
|
2018-04-09 16:14:33 +02:00
|
|
|
Box::new(ethcore_private_tx::NoopEncryptor),
|
2018-04-13 17:34:27 +02:00
|
|
|
Default::default(),
|
2019-02-07 12:39:04 +01:00
|
|
|
Default::default(),
|
2016-12-27 12:53:56 +01:00
|
|
|
).map_err(|e| format!("Client service error: {:?}", e))?;
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2017-06-22 19:00:53 +02:00
|
|
|
Ok(service)
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
/// restore from a snapshot
|
|
|
|
pub fn restore(self) -> Result<(), String> {
|
2016-09-11 14:05:59 +02:00
|
|
|
let file = self.file_path.clone();
|
2017-06-22 19:00:53 +02:00
|
|
|
let service = self.start_service()?;
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
warn!("Snapshot restoration is experimental and the format may be subject to change.");
|
2016-08-25 14:28:45 +02:00
|
|
|
warn!("On encountering an unexpected error, please ensure that you have a recent snapshot.");
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
let snapshot = service.snapshot_service();
|
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
if let Some(file) = file {
|
|
|
|
info!("Attempting to restore from snapshot at '{}'", file);
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
let reader = PackedReader::new(Path::new(&file))
|
|
|
|
.map_err(|e| format!("Couldn't open snapshot file: {}", e))
|
|
|
|
.and_then(|x| x.ok_or("Snapshot file has invalid format.".into()));
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let reader = reader?;
|
|
|
|
restore_using(snapshot, &reader, true)?;
|
2016-09-11 14:05:59 +02:00
|
|
|
} else {
|
|
|
|
info!("Attempting to restore from local snapshot.");
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
// attempting restoration with recovery will lead to deadlock
|
|
|
|
// as we currently hold a read lock on the service's reader.
|
|
|
|
match *snapshot.reader() {
|
2016-12-27 12:53:56 +01:00
|
|
|
Some(ref reader) => restore_using(snapshot.clone(), reader, false)?,
|
2016-09-11 14:05:59 +02:00
|
|
|
None => return Err("No local snapshot found.".into()),
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-11 14:05:59 +02:00
|
|
|
Ok(())
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Take a snapshot from the head of the chain.
|
|
|
|
pub fn take_snapshot(self) -> Result<(), String> {
|
2016-12-27 12:53:56 +01:00
|
|
|
let file_path = self.file_path.clone().ok_or("No file path provided.".to_owned())?;
|
2016-08-05 17:00:46 +02:00
|
|
|
let file_path: PathBuf = file_path.into();
|
2016-08-10 16:29:40 +02:00
|
|
|
let block_at = self.block_at;
|
2017-06-22 19:00:53 +02:00
|
|
|
let service = self.start_service()?;
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
warn!("Snapshots are currently experimental. File formats may be subject to change.");
|
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let writer = PackedWriter::new(&file_path)
|
|
|
|
.map_err(|e| format!("Failed to open snapshot writer: {}", e))?;
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2016-08-10 16:29:40 +02:00
|
|
|
let progress = Arc::new(Progress::default());
|
2016-08-08 18:41:30 +02:00
|
|
|
let p = progress.clone();
|
|
|
|
let informant_handle = ::std::thread::spawn(move || {
|
|
|
|
::std::thread::sleep(Duration::from_secs(5));
|
|
|
|
|
|
|
|
let mut last_size = 0;
|
|
|
|
while !p.done() {
|
|
|
|
let cur_size = p.size();
|
|
|
|
if cur_size != last_size {
|
|
|
|
last_size = cur_size;
|
2019-06-19 10:13:09 +02:00
|
|
|
let bytes = ::informant::format_bytes(cur_size as usize);
|
2016-10-24 18:27:23 +02:00
|
|
|
info!("Snapshot: {} accounts {} blocks {}", p.accounts(), p.blocks(), bytes);
|
2016-08-08 18:41:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
::std::thread::sleep(Duration::from_secs(5));
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
if let Err(e) = service.client().take_snapshot(writer, block_at, &*progress) {
|
2016-08-05 17:00:46 +02:00
|
|
|
let _ = ::std::fs::remove_file(&file_path);
|
|
|
|
return Err(format!("Encountered fatal error while creating snapshot: {}", e));
|
|
|
|
}
|
|
|
|
|
2016-08-08 18:41:30 +02:00
|
|
|
info!("snapshot creation complete");
|
|
|
|
|
|
|
|
assert!(progress.done());
|
2016-12-27 12:53:56 +01:00
|
|
|
informant_handle.join().map_err(|_| "failed to join logger thread")?;
|
2016-08-08 18:41:30 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Execute this snapshot command.
|
|
|
|
pub fn execute(cmd: SnapshotCommand) -> Result<String, String> {
|
|
|
|
match cmd.kind {
|
2016-12-27 12:53:56 +01:00
|
|
|
Kind::Take => cmd.take_snapshot()?,
|
|
|
|
Kind::Restore => cmd.restore()?,
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(String::new())
|
2016-08-10 16:29:40 +02:00
|
|
|
}
|