2016-08-05 17:00:46 +02:00
|
|
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
//! Snapshot network service implementation.
|
|
|
|
|
|
|
|
use std::collections::HashSet;
|
|
|
|
use std::io::ErrorKind;
|
|
|
|
use std::fs;
|
2016-09-07 15:27:28 +02:00
|
|
|
use std::path::PathBuf;
|
2016-08-05 17:00:46 +02:00
|
|
|
use std::sync::Arc;
|
2016-09-07 15:27:14 +02:00
|
|
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2016-09-06 15:31:13 +02:00
|
|
|
use super::{ManifestData, StateRebuilder, BlockRebuilder, RestorationStatus, SnapshotService};
|
2016-08-25 22:20:44 +02:00
|
|
|
use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter};
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
use blockchain::BlockChain;
|
2016-09-02 16:15:25 +02:00
|
|
|
use client::Client;
|
2016-08-05 17:00:46 +02:00
|
|
|
use engines::Engine;
|
|
|
|
use error::Error;
|
2016-09-02 16:15:25 +02:00
|
|
|
use ids::BlockID;
|
2016-08-05 17:00:46 +02:00
|
|
|
use service::ClientIoMessage;
|
|
|
|
|
|
|
|
use io::IoChannel;
|
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
use util::{Bytes, H256, Mutex, RwLock, UtilError};
|
2016-08-05 17:00:46 +02:00
|
|
|
use util::journaldb::Algorithm;
|
|
|
|
use util::kvdb::{Database, DatabaseConfig};
|
|
|
|
use util::snappy;
|
|
|
|
|
2016-09-02 19:00:20 +02:00
|
|
|
/// Helper for removing directories in case of error.
|
|
|
|
struct Guard(bool, PathBuf);
|
|
|
|
|
|
|
|
impl Guard {
|
|
|
|
fn new(path: PathBuf) -> Self { Guard(true, path) }
|
|
|
|
|
|
|
|
fn disarm(mut self) { self.0 = false }
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for Guard {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if self.0 {
|
|
|
|
let _ = fs::remove_dir_all(&self.1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-06 15:31:13 +02:00
|
|
|
/// External database restoration handler
|
2016-09-06 15:41:56 +02:00
|
|
|
pub trait DatabaseRestore: Send + Sync {
|
2016-09-06 15:31:13 +02:00
|
|
|
/// Restart with a new backend. Takes ownership of passed database and moves it to a new location.
|
|
|
|
fn restore_db(&self, new_db: &str) -> Result<(), Error>;
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// State restoration manager.
|
|
|
|
struct Restoration {
|
2016-08-25 22:20:44 +02:00
|
|
|
manifest: ManifestData,
|
2016-08-05 17:00:46 +02:00
|
|
|
state_chunks_left: HashSet<H256>,
|
|
|
|
block_chunks_left: HashSet<H256>,
|
|
|
|
state: StateRebuilder,
|
|
|
|
blocks: BlockRebuilder,
|
2016-08-25 22:20:44 +02:00
|
|
|
writer: LooseWriter,
|
2016-08-05 17:00:46 +02:00
|
|
|
snappy_buffer: Bytes,
|
|
|
|
final_state_root: H256,
|
2016-09-02 19:00:20 +02:00
|
|
|
guard: Guard,
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
struct RestorationParams<'a> {
|
|
|
|
manifest: ManifestData, // manifest to base restoration on.
|
|
|
|
pruning: Algorithm, // pruning algorithm for the database.
|
|
|
|
db_path: PathBuf, // database path
|
2016-09-07 15:27:28 +02:00
|
|
|
db_config: &'a DatabaseConfig,
|
2016-08-25 22:20:44 +02:00
|
|
|
writer: LooseWriter, // writer for recovered snapshot.
|
|
|
|
genesis: &'a [u8], // genesis block of the chain.
|
2016-09-02 19:00:20 +02:00
|
|
|
guard: Guard, // guard for the restoration directory.
|
2016-08-25 22:20:44 +02:00
|
|
|
}
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
impl Restoration {
|
2016-08-25 22:20:44 +02:00
|
|
|
// make a new restoration using the given parameters.
|
|
|
|
fn new(params: RestorationParams) -> Result<Self, Error> {
|
|
|
|
let manifest = params.manifest;
|
|
|
|
|
|
|
|
let state_chunks = manifest.state_hashes.iter().cloned().collect();
|
|
|
|
let block_chunks = manifest.block_hashes.iter().cloned().collect();
|
|
|
|
|
2016-09-07 15:27:28 +02:00
|
|
|
let raw_db = Arc::new(try!(Database::open(params.db_config, &*params.db_path.to_string_lossy())
|
2016-08-10 16:29:40 +02:00
|
|
|
.map_err(UtilError::SimpleString)));
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone());
|
2016-08-05 17:00:46 +02:00
|
|
|
let blocks = try!(BlockRebuilder::new(chain, manifest.block_number));
|
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
let root = manifest.state_root.clone();
|
2016-08-05 17:00:46 +02:00
|
|
|
Ok(Restoration {
|
2016-08-25 22:20:44 +02:00
|
|
|
manifest: manifest,
|
|
|
|
state_chunks_left: state_chunks,
|
|
|
|
block_chunks_left: block_chunks,
|
|
|
|
state: StateRebuilder::new(raw_db, params.pruning),
|
2016-08-05 17:00:46 +02:00
|
|
|
blocks: blocks,
|
2016-08-25 22:20:44 +02:00
|
|
|
writer: params.writer,
|
2016-08-05 17:00:46 +02:00
|
|
|
snappy_buffer: Vec::new(),
|
2016-08-25 22:20:44 +02:00
|
|
|
final_state_root: root,
|
2016-09-02 19:00:20 +02:00
|
|
|
guard: params.guard,
|
2016-08-05 17:00:46 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// feeds a state chunk
|
|
|
|
fn feed_state(&mut self, hash: H256, chunk: &[u8]) -> Result<(), Error> {
|
|
|
|
if self.state_chunks_left.remove(&hash) {
|
2016-08-25 22:20:44 +02:00
|
|
|
let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer));
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
try!(self.state.feed(&self.snappy_buffer[..len]));
|
|
|
|
try!(self.writer.write_state_chunk(hash, chunk));
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
// feeds a block chunk
|
|
|
|
fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &Engine) -> Result<(), Error> {
|
|
|
|
if self.block_chunks_left.remove(&hash) {
|
2016-08-25 22:20:44 +02:00
|
|
|
let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer));
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
try!(self.blocks.feed(&self.snappy_buffer[..len], engine));
|
2016-08-25 22:20:44 +02:00
|
|
|
try!(self.writer.write_block_chunk(hash, chunk));
|
|
|
|
}
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
// finish up restoration.
|
|
|
|
fn finalize(self) -> Result<(), Error> {
|
|
|
|
use util::trie::TrieError;
|
|
|
|
|
|
|
|
if !self.is_done() { return Ok(()) }
|
|
|
|
|
|
|
|
// verify final state root.
|
|
|
|
let root = self.state.state_root();
|
|
|
|
if root != self.final_state_root {
|
|
|
|
warn!("Final restored state has wrong state root: expected {:?}, got {:?}", root, self.final_state_root);
|
|
|
|
return Err(TrieError::InvalidStateRoot(root).into());
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
// check for missing code.
|
|
|
|
try!(self.state.check_missing());
|
|
|
|
|
|
|
|
// connect out-of-order chunks.
|
|
|
|
self.blocks.glue_chunks();
|
|
|
|
|
|
|
|
try!(self.writer.finish(self.manifest));
|
|
|
|
|
2016-09-02 19:00:20 +02:00
|
|
|
self.guard.disarm();
|
2016-08-05 17:00:46 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
// is everything done?
|
|
|
|
fn is_done(&self) -> bool {
|
|
|
|
self.block_chunks_left.is_empty() && self.state_chunks_left.is_empty()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Type alias for client io channel.
|
|
|
|
pub type Channel = IoChannel<ClientIoMessage>;
|
|
|
|
|
2016-09-07 15:27:28 +02:00
|
|
|
/// Snapshot service parameters.
|
|
|
|
pub struct ServiceParams {
|
|
|
|
/// The consensus engine this is built on.
|
|
|
|
pub engine: Arc<Engine>,
|
|
|
|
/// The chain's genesis block.
|
|
|
|
pub genesis_block: Bytes,
|
|
|
|
/// Database configuration options.
|
|
|
|
pub db_config: DatabaseConfig,
|
|
|
|
/// State pruning algorithm.
|
|
|
|
pub pruning: Algorithm,
|
|
|
|
/// Async IO channel for sending messages.
|
|
|
|
pub channel: Channel,
|
|
|
|
/// The directory to put snapshots in.
|
|
|
|
/// Usually "<chain hash>/snapshot"
|
|
|
|
pub snapshot_root: PathBuf,
|
|
|
|
/// A handle for database restoration.
|
|
|
|
pub db_restore: Arc<DatabaseRestore>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// `SnapshotService` implementation.
|
|
|
|
/// This controls taking snapshots and restoring from them.
|
2016-08-05 17:00:46 +02:00
|
|
|
pub struct Service {
|
|
|
|
restoration: Mutex<Option<Restoration>>,
|
2016-09-07 15:27:28 +02:00
|
|
|
snapshot_root: PathBuf,
|
|
|
|
db_config: DatabaseConfig,
|
2016-08-05 17:00:46 +02:00
|
|
|
io_channel: Channel,
|
|
|
|
pruning: Algorithm,
|
|
|
|
status: Mutex<RestorationStatus>,
|
2016-08-25 22:20:44 +02:00
|
|
|
reader: RwLock<Option<LooseReader>>,
|
2016-08-05 23:33:55 +02:00
|
|
|
engine: Arc<Engine>,
|
|
|
|
genesis_block: Bytes,
|
2016-08-05 17:00:46 +02:00
|
|
|
state_chunks: AtomicUsize,
|
|
|
|
block_chunks: AtomicUsize,
|
2016-09-06 15:31:13 +02:00
|
|
|
db_restore: Arc<DatabaseRestore>,
|
2016-09-06 17:44:11 +02:00
|
|
|
progress: super::Progress,
|
2016-09-07 15:27:14 +02:00
|
|
|
taking_snapshot: AtomicBool,
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Service {
|
2016-09-07 15:27:28 +02:00
|
|
|
/// Create a new snapshot service from the given parameters.
|
|
|
|
pub fn new(params: ServiceParams) -> Result<Self, Error> {
|
|
|
|
let mut service = Service {
|
2016-08-05 17:00:46 +02:00
|
|
|
restoration: Mutex::new(None),
|
2016-09-07 15:27:28 +02:00
|
|
|
snapshot_root: params.snapshot_root,
|
|
|
|
db_config: params.db_config,
|
|
|
|
io_channel: params.channel,
|
|
|
|
pruning: params.pruning,
|
2016-08-05 17:00:46 +02:00
|
|
|
status: Mutex::new(RestorationStatus::Inactive),
|
2016-09-07 15:27:28 +02:00
|
|
|
reader: RwLock::new(None),
|
|
|
|
engine: params.engine,
|
|
|
|
genesis_block: params.genesis_block,
|
2016-08-05 17:00:46 +02:00
|
|
|
state_chunks: AtomicUsize::new(0),
|
|
|
|
block_chunks: AtomicUsize::new(0),
|
2016-09-07 15:27:28 +02:00
|
|
|
db_restore: params.db_restore,
|
2016-09-06 17:44:11 +02:00
|
|
|
progress: Default::default(),
|
2016-09-07 15:27:14 +02:00
|
|
|
taking_snapshot: AtomicBool::new(false),
|
2016-08-05 17:00:46 +02:00
|
|
|
};
|
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
// create the root snapshot dir if it doesn't exist.
|
2016-09-07 15:27:28 +02:00
|
|
|
if let Err(e) = fs::create_dir_all(&service.snapshot_root) {
|
2016-08-10 16:29:40 +02:00
|
|
|
if e.kind() != ErrorKind::AlreadyExists {
|
|
|
|
return Err(e.into())
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// delete the temporary restoration dir if it does exist.
|
2016-08-10 16:29:40 +02:00
|
|
|
if let Err(e) = fs::remove_dir_all(service.restoration_dir()) {
|
|
|
|
if e.kind() != ErrorKind::NotFound {
|
|
|
|
return Err(e.into())
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-05 14:25:56 +02:00
|
|
|
// delete the temporary snapshot dir if it does exist.
|
|
|
|
if let Err(e) = fs::remove_dir_all(service.temp_snapshot_dir()) {
|
2016-09-05 14:28:28 +02:00
|
|
|
if e.kind() != ErrorKind::NotFound {
|
2016-09-05 14:25:56 +02:00
|
|
|
return Err(e.into())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-07 15:27:28 +02:00
|
|
|
let reader = LooseReader::new(service.snapshot_dir()).ok();
|
|
|
|
*service.reader.get_mut() = reader;
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2016-09-07 15:27:28 +02:00
|
|
|
Ok(service)
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
// get the current snapshot dir.
|
|
|
|
fn snapshot_dir(&self) -> PathBuf {
|
2016-09-07 15:27:28 +02:00
|
|
|
let mut dir = self.snapshot_root.clone();
|
2016-08-25 22:20:44 +02:00
|
|
|
dir.push("current");
|
|
|
|
dir
|
|
|
|
}
|
|
|
|
|
2016-09-02 16:15:25 +02:00
|
|
|
// get the temporary snapshot dir.
|
|
|
|
fn temp_snapshot_dir(&self) -> PathBuf {
|
2016-09-07 15:27:28 +02:00
|
|
|
let mut dir = self.snapshot_root.clone();
|
2016-09-02 16:15:25 +02:00
|
|
|
dir.push("in_progress");
|
|
|
|
dir
|
|
|
|
}
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
// get the restoration directory.
|
|
|
|
fn restoration_dir(&self) -> PathBuf {
|
2016-09-07 15:27:28 +02:00
|
|
|
let mut dir = self.snapshot_root.clone();
|
2016-08-05 17:00:46 +02:00
|
|
|
dir.push("restoration");
|
|
|
|
dir
|
|
|
|
}
|
|
|
|
|
|
|
|
// restoration db path.
|
|
|
|
fn restoration_db(&self) -> PathBuf {
|
|
|
|
let mut dir = self.restoration_dir();
|
|
|
|
dir.push("db");
|
|
|
|
dir
|
|
|
|
}
|
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
// temporary snapshot recovery path.
|
|
|
|
fn temp_recovery_dir(&self) -> PathBuf {
|
|
|
|
let mut dir = self.restoration_dir();
|
|
|
|
dir.push("temp");
|
|
|
|
dir
|
|
|
|
}
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
// replace one the client's database with our own.
|
|
|
|
fn replace_client_db(&self) -> Result<(), Error> {
|
|
|
|
let our_db = self.restoration_db();
|
|
|
|
|
2016-09-06 15:31:13 +02:00
|
|
|
try!(self.db_restore.restore_db(our_db.to_str().unwrap()));
|
|
|
|
Ok(())
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
2016-09-06 17:44:11 +02:00
|
|
|
/// Tick the snapshot service. This will log any active snapshot
|
|
|
|
/// being taken.
|
|
|
|
pub fn tick(&self) {
|
2016-09-07 15:27:14 +02:00
|
|
|
if self.progress.done() || !self.taking_snapshot.load(Ordering::SeqCst) { return }
|
2016-09-06 17:44:11 +02:00
|
|
|
|
|
|
|
let p = &self.progress;
|
|
|
|
info!("Snapshot: {} accounts {} blocks {} bytes", p.accounts(), p.blocks(), p.size());
|
|
|
|
}
|
|
|
|
|
2016-09-02 16:15:25 +02:00
|
|
|
/// Take a snapshot at the block with the given number.
|
|
|
|
/// calling this while a restoration is in progress or vice versa
|
|
|
|
/// will lead to a race condition where the first one to finish will
|
|
|
|
/// have their produced snapshot overwritten.
|
|
|
|
pub fn take_snapshot(&self, client: &Client, num: u64) -> Result<(), Error> {
|
2016-09-07 15:27:14 +02:00
|
|
|
if self.taking_snapshot.compare_and_swap(false, true, Ordering::SeqCst) {
|
|
|
|
info!("Skipping snapshot at #{} as another one is currently in-progress.", num);
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2016-09-02 16:15:25 +02:00
|
|
|
info!("Taking snapshot at #{}", num);
|
2016-09-06 17:44:11 +02:00
|
|
|
self.progress.reset();
|
2016-09-02 16:15:25 +02:00
|
|
|
|
|
|
|
let temp_dir = self.temp_snapshot_dir();
|
|
|
|
let snapshot_dir = self.snapshot_dir();
|
|
|
|
|
|
|
|
let _ = fs::remove_dir_all(&temp_dir);
|
2016-09-02 19:00:20 +02:00
|
|
|
|
2016-09-02 16:15:25 +02:00
|
|
|
let writer = try!(LooseWriter::new(temp_dir.clone()));
|
|
|
|
|
2016-09-02 19:00:20 +02:00
|
|
|
let guard = Guard::new(temp_dir.clone());
|
2016-09-07 15:27:14 +02:00
|
|
|
let res = client.take_snapshot(writer, BlockID::Number(num), &self.progress);
|
|
|
|
|
|
|
|
self.taking_snapshot.store(false, Ordering::SeqCst);
|
|
|
|
try!(res);
|
2016-09-06 17:44:11 +02:00
|
|
|
|
|
|
|
info!("Finished taking snapshot at #{}", num);
|
|
|
|
|
2016-09-02 16:15:25 +02:00
|
|
|
let mut reader = self.reader.write();
|
|
|
|
|
|
|
|
// destroy the old snapshot reader.
|
|
|
|
*reader = None;
|
|
|
|
|
|
|
|
try!(fs::rename(temp_dir, &snapshot_dir));
|
|
|
|
|
|
|
|
*reader = Some(try!(LooseReader::new(snapshot_dir)));
|
|
|
|
|
2016-09-02 19:00:20 +02:00
|
|
|
guard.disarm();
|
2016-09-02 16:15:25 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
/// Initialize the restoration synchronously.
|
|
|
|
pub fn init_restore(&self, manifest: ManifestData) -> Result<(), Error> {
|
|
|
|
let rest_dir = self.restoration_dir();
|
|
|
|
|
|
|
|
let mut res = self.restoration.lock();
|
|
|
|
|
|
|
|
// tear down existing restoration.
|
|
|
|
*res = None;
|
|
|
|
|
|
|
|
// delete and restore the restoration dir.
|
|
|
|
if let Err(e) = fs::remove_dir_all(&rest_dir) {
|
|
|
|
match e.kind() {
|
|
|
|
ErrorKind::NotFound => {},
|
|
|
|
_ => return Err(e.into()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
try!(fs::create_dir_all(&rest_dir));
|
|
|
|
|
|
|
|
// make new restoration.
|
|
|
|
let writer = try!(LooseWriter::new(self.temp_recovery_dir()));
|
|
|
|
|
|
|
|
let params = RestorationParams {
|
|
|
|
manifest: manifest,
|
|
|
|
pruning: self.pruning,
|
|
|
|
db_path: self.restoration_db(),
|
2016-09-07 15:27:28 +02:00
|
|
|
db_config: &self.db_config,
|
2016-08-25 22:20:44 +02:00
|
|
|
writer: writer,
|
|
|
|
genesis: &self.genesis_block,
|
2016-09-02 19:00:20 +02:00
|
|
|
guard: Guard::new(rest_dir),
|
2016-08-25 22:20:44 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
*res = Some(try!(Restoration::new(params)));
|
|
|
|
|
2016-09-06 15:31:13 +02:00
|
|
|
*self.status.lock() = RestorationStatus::Ongoing {
|
|
|
|
state_chunks_done: self.state_chunks.load(Ordering::Relaxed) as u32,
|
|
|
|
block_chunks_done: self.block_chunks.load(Ordering::Relaxed) as u32,
|
|
|
|
};
|
2016-08-25 22:20:44 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
// finalize the restoration. this accepts an already-locked
|
|
|
|
// restoration as an argument -- so acquiring it again _will_
|
|
|
|
// lead to deadlock.
|
|
|
|
fn finalize_restoration(&self, rest: &mut Option<Restoration>) -> Result<(), Error> {
|
|
|
|
trace!(target: "snapshot", "finalizing restoration");
|
|
|
|
|
|
|
|
self.state_chunks.store(0, Ordering::SeqCst);
|
|
|
|
self.block_chunks.store(0, Ordering::SeqCst);
|
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
// destroy the restoration before replacing databases and snapshot.
|
|
|
|
try!(rest.take().map(Restoration::finalize).unwrap_or(Ok(())));
|
2016-08-05 17:00:46 +02:00
|
|
|
try!(self.replace_client_db());
|
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
let mut reader = self.reader.write();
|
|
|
|
*reader = None; // destroy the old reader if it existed.
|
|
|
|
|
|
|
|
let snapshot_dir = self.snapshot_dir();
|
|
|
|
|
|
|
|
trace!(target: "snapshot", "removing old snapshot dir at {}", snapshot_dir.to_string_lossy());
|
|
|
|
if let Err(e) = fs::remove_dir_all(&snapshot_dir) {
|
|
|
|
match e.kind() {
|
|
|
|
ErrorKind::NotFound => {}
|
|
|
|
_ => return Err(e.into()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
try!(fs::create_dir(&snapshot_dir));
|
|
|
|
|
|
|
|
trace!(target: "snapshot", "copying restored snapshot files over");
|
2016-09-02 16:15:25 +02:00
|
|
|
try!(fs::rename(self.temp_recovery_dir(), &snapshot_dir));
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
let _ = fs::remove_dir_all(self.restoration_dir());
|
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
*reader = Some(try!(LooseReader::new(snapshot_dir)));
|
|
|
|
|
|
|
|
*self.status.lock() = RestorationStatus::Inactive;
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Feed a chunk of either kind. no-op if no restoration or status is wrong.
|
|
|
|
fn feed_chunk(&self, hash: H256, chunk: &[u8], is_state: bool) -> Result<(), Error> {
|
2016-08-25 22:20:44 +02:00
|
|
|
// TODO: be able to process block chunks and state chunks at same time?
|
|
|
|
let mut restoration = self.restoration.lock();
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
match self.status() {
|
|
|
|
RestorationStatus::Inactive | RestorationStatus::Failed => Ok(()),
|
2016-09-06 15:31:13 +02:00
|
|
|
RestorationStatus::Ongoing { .. } => {
|
2016-08-05 17:00:46 +02:00
|
|
|
let res = {
|
|
|
|
let rest = match *restoration {
|
|
|
|
Some(ref mut r) => r,
|
|
|
|
None => return Ok(()),
|
|
|
|
};
|
|
|
|
|
|
|
|
match is_state {
|
|
|
|
true => rest.feed_state(hash, chunk),
|
2016-08-05 23:33:55 +02:00
|
|
|
false => rest.feed_blocks(hash, chunk, &*self.engine),
|
2016-08-05 17:00:46 +02:00
|
|
|
}.map(|_| rest.is_done())
|
|
|
|
};
|
|
|
|
|
|
|
|
match res {
|
|
|
|
Ok(is_done) => {
|
|
|
|
match is_state {
|
|
|
|
true => self.state_chunks.fetch_add(1, Ordering::SeqCst),
|
|
|
|
false => self.block_chunks.fetch_add(1, Ordering::SeqCst),
|
|
|
|
};
|
|
|
|
|
|
|
|
match is_done {
|
|
|
|
true => self.finalize_restoration(&mut *restoration),
|
|
|
|
false => Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
other => other.map(drop),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Feed a state chunk to be processed synchronously.
|
|
|
|
pub fn feed_state_chunk(&self, hash: H256, chunk: &[u8]) {
|
|
|
|
match self.feed_chunk(hash, chunk, true) {
|
|
|
|
Ok(()) => (),
|
|
|
|
Err(e) => {
|
|
|
|
warn!("Encountered error during state restoration: {}", e);
|
|
|
|
*self.restoration.lock() = None;
|
|
|
|
*self.status.lock() = RestorationStatus::Failed;
|
|
|
|
let _ = fs::remove_dir_all(self.restoration_dir());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Feed a block chunk to be processed synchronously.
|
|
|
|
pub fn feed_block_chunk(&self, hash: H256, chunk: &[u8]) {
|
|
|
|
match self.feed_chunk(hash, chunk, false) {
|
|
|
|
Ok(()) => (),
|
|
|
|
Err(e) => {
|
|
|
|
warn!("Encountered error during block restoration: {}", e);
|
|
|
|
*self.restoration.lock() = None;
|
|
|
|
*self.status.lock() = RestorationStatus::Failed;
|
|
|
|
let _ = fs::remove_dir_all(self.restoration_dir());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl SnapshotService for Service {
|
|
|
|
fn manifest(&self) -> Option<ManifestData> {
|
2016-08-25 22:20:44 +02:00
|
|
|
self.reader.read().as_ref().map(|r| r.manifest().clone())
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fn chunk(&self, hash: H256) -> Option<Bytes> {
|
2016-08-25 22:20:44 +02:00
|
|
|
self.reader.read().as_ref().and_then(|r| r.chunk(hash).ok())
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fn status(&self) -> RestorationStatus {
|
|
|
|
*self.status.lock()
|
|
|
|
}
|
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
fn begin_restore(&self, manifest: ManifestData) {
|
|
|
|
self.io_channel.send(ClientIoMessage::BeginRestoration(manifest))
|
|
|
|
.expect("snapshot service and io service are kept alive by client service; qed");
|
|
|
|
}
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2016-08-25 22:20:44 +02:00
|
|
|
fn abort_restore(&self) {
|
|
|
|
*self.restoration.lock() = None;
|
|
|
|
*self.status.lock() = RestorationStatus::Inactive;
|
|
|
|
if let Err(e) = fs::remove_dir_all(&self.restoration_dir()) {
|
2016-08-05 17:00:46 +02:00
|
|
|
match e.kind() {
|
|
|
|
ErrorKind::NotFound => {},
|
2016-08-25 22:20:44 +02:00
|
|
|
_ => warn!("encountered error {} while deleting snapshot restoration dir.", e),
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn restore_state_chunk(&self, hash: H256, chunk: Bytes) {
|
|
|
|
self.io_channel.send(ClientIoMessage::FeedStateChunk(hash, chunk))
|
|
|
|
.expect("snapshot service and io service are kept alive by client service; qed");
|
|
|
|
}
|
|
|
|
|
|
|
|
fn restore_block_chunk(&self, hash: H256, chunk: Bytes) {
|
|
|
|
self.io_channel.send(ClientIoMessage::FeedBlockChunk(hash, chunk))
|
|
|
|
.expect("snapshot service and io service are kept alive by client service; qed");
|
|
|
|
}
|
2016-08-10 16:29:40 +02:00
|
|
|
}
|
2016-09-05 12:24:03 +02:00
|
|
|
|
2016-09-06 15:41:56 +02:00
|
|
|
impl Drop for Service {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.abort_restore();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-05 12:24:03 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2016-09-06 15:31:13 +02:00
|
|
|
use std::sync::Arc;
|
2016-09-05 12:24:03 +02:00
|
|
|
use service::ClientIoMessage;
|
|
|
|
use io::{IoService};
|
|
|
|
use devtools::RandomTempPath;
|
|
|
|
use tests::helpers::get_test_spec;
|
|
|
|
use util::journaldb::Algorithm;
|
2016-09-06 15:31:13 +02:00
|
|
|
use error::Error;
|
|
|
|
use snapshot::{ManifestData, RestorationStatus, SnapshotService};
|
2016-09-05 12:24:03 +02:00
|
|
|
use super::*;
|
|
|
|
|
2016-09-06 15:31:13 +02:00
|
|
|
struct NoopDBRestore;
|
|
|
|
impl DatabaseRestore for NoopDBRestore {
|
|
|
|
fn restore_db(&self, _new_db: &str) -> Result<(), Error> {
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-05 12:24:03 +02:00
|
|
|
#[test]
|
|
|
|
fn sends_async_messages() {
|
|
|
|
let service = IoService::<ClientIoMessage>::start().unwrap();
|
2016-09-07 15:27:28 +02:00
|
|
|
let spec = get_test_spec();
|
2016-09-05 12:24:03 +02:00
|
|
|
|
|
|
|
let dir = RandomTempPath::new();
|
|
|
|
let mut dir = dir.as_path().to_owned();
|
2016-09-07 15:27:28 +02:00
|
|
|
let mut client_db = dir.clone();
|
|
|
|
dir.push("snapshot");
|
|
|
|
client_db.push("client");
|
|
|
|
|
|
|
|
let snapshot_params = ServiceParams {
|
|
|
|
engine: spec.engine.clone(),
|
|
|
|
genesis_block: spec.genesis_block(),
|
|
|
|
db_config: Default::default(),
|
|
|
|
pruning: Algorithm::Archive,
|
|
|
|
channel: service.channel(),
|
|
|
|
snapshot_root: dir,
|
|
|
|
db_restore: Arc::new(NoopDBRestore),
|
|
|
|
};
|
2016-09-05 12:24:03 +02:00
|
|
|
|
2016-09-07 15:27:28 +02:00
|
|
|
let service = Service::new(snapshot_params).unwrap();
|
2016-09-05 12:24:03 +02:00
|
|
|
|
|
|
|
assert!(service.manifest().is_none());
|
|
|
|
assert!(service.chunk(Default::default()).is_none());
|
|
|
|
assert_eq!(service.status(), RestorationStatus::Inactive);
|
|
|
|
|
|
|
|
let manifest = ManifestData {
|
|
|
|
state_hashes: vec![],
|
|
|
|
block_hashes: vec![],
|
|
|
|
state_root: Default::default(),
|
|
|
|
block_number: 0,
|
|
|
|
block_hash: Default::default(),
|
|
|
|
};
|
|
|
|
|
|
|
|
service.begin_restore(manifest);
|
|
|
|
service.abort_restore();
|
|
|
|
service.restore_state_chunk(Default::default(), vec![]);
|
|
|
|
service.restore_block_chunk(Default::default(), vec![]);
|
|
|
|
}
|
|
|
|
}
|