take snapshot at specified block and slightly better informants (#1873)
* prettier informant for snapshot creation * allow taking snapshot at a given block * minor tweaks * elaborate on cli
This commit is contained in:
parent
59b0f8c7a3
commit
2f1ade8116
@ -594,19 +594,35 @@ impl Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Take a snapshot.
|
/// Take a snapshot at the given block.
|
||||||
pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(&self, writer: W) -> Result<(), ::error::Error> {
|
/// If the ID given is "latest", this will default to 1000 blocks behind.
|
||||||
|
pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(&self, writer: W, at: BlockID, p: &snapshot::Progress) -> Result<(), ::error::Error> {
|
||||||
let db = self.state_db.lock().boxed_clone();
|
let db = self.state_db.lock().boxed_clone();
|
||||||
let best_block_number = self.chain_info().best_block_number;
|
let best_block_number = self.chain_info().best_block_number;
|
||||||
let start_block_number = if best_block_number > 1000 {
|
let block_number = try!(self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at)));
|
||||||
best_block_number - 1000
|
|
||||||
} else {
|
|
||||||
0
|
|
||||||
};
|
|
||||||
let start_hash = self.block_hash(BlockID::Number(start_block_number))
|
|
||||||
.expect("blocks within HISTORY are always stored.");
|
|
||||||
|
|
||||||
try!(snapshot::take_snapshot(&self.chain, start_hash, db.as_hashdb(), writer));
|
if best_block_number > HISTORY + block_number && db.is_pruned() {
|
||||||
|
return Err(snapshot::Error::OldBlockPrunedDB.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let start_hash = match at {
|
||||||
|
BlockID::Latest => {
|
||||||
|
let start_num = if best_block_number > 1000 {
|
||||||
|
best_block_number - 1000
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
|
self.block_hash(BlockID::Number(start_num))
|
||||||
|
.expect("blocks within HISTORY are always stored.")
|
||||||
|
}
|
||||||
|
_ => match self.block_hash(at) {
|
||||||
|
Some(hash) => hash,
|
||||||
|
None => return Err(snapshot::Error::InvalidStartingBlock(at).into()),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
try!(snapshot::take_snapshot(&self.chain, start_hash, db.as_hashdb(), writer, p));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,8 @@
|
|||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
|
use ids::BlockID;
|
||||||
|
|
||||||
use util::H256;
|
use util::H256;
|
||||||
use util::trie::TrieError;
|
use util::trie::TrieError;
|
||||||
use util::rlp::DecoderError;
|
use util::rlp::DecoderError;
|
||||||
@ -26,9 +28,13 @@ use util::rlp::DecoderError;
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
/// Invalid starting block for snapshot.
|
/// Invalid starting block for snapshot.
|
||||||
InvalidStartingBlock(H256),
|
InvalidStartingBlock(BlockID),
|
||||||
/// Block not found.
|
/// Block not found.
|
||||||
BlockNotFound(H256),
|
BlockNotFound(H256),
|
||||||
|
/// Incomplete chain.
|
||||||
|
IncompleteChain,
|
||||||
|
/// Old starting block in a pruned database.
|
||||||
|
OldBlockPrunedDB,
|
||||||
/// Trie error.
|
/// Trie error.
|
||||||
Trie(TrieError),
|
Trie(TrieError),
|
||||||
/// Decoder error.
|
/// Decoder error.
|
||||||
@ -40,8 +46,11 @@ pub enum Error {
|
|||||||
impl fmt::Display for Error {
|
impl fmt::Display for Error {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
match *self {
|
match *self {
|
||||||
Error::InvalidStartingBlock(ref hash) => write!(f, "Invalid starting block hash: {}", hash),
|
Error::InvalidStartingBlock(ref id) => write!(f, "Invalid starting block: {:?}", id),
|
||||||
Error::BlockNotFound(ref hash) => write!(f, "Block not found in chain: {}", hash),
|
Error::BlockNotFound(ref hash) => write!(f, "Block not found in chain: {}", hash),
|
||||||
|
Error::IncompleteChain => write!(f, "Cannot create snapshot due to incomplete chain."),
|
||||||
|
Error::OldBlockPrunedDB => write!(f, "Attempted to create a snapshot at an old block while using \
|
||||||
|
a pruned database. Please re-run with the --pruning archive flag."),
|
||||||
Error::Io(ref err) => err.fmt(f),
|
Error::Io(ref err) => err.fmt(f),
|
||||||
Error::Decoder(ref err) => err.fmt(f),
|
Error::Decoder(ref err) => err.fmt(f),
|
||||||
Error::Trie(ref err) => err.fmt(f),
|
Error::Trie(ref err) => err.fmt(f),
|
||||||
|
@ -18,10 +18,12 @@
|
|||||||
|
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||||
|
|
||||||
use account_db::{AccountDB, AccountDBMut};
|
use account_db::{AccountDB, AccountDBMut};
|
||||||
use blockchain::{BlockChain, BlockProvider};
|
use blockchain::{BlockChain, BlockProvider};
|
||||||
use engines::Engine;
|
use engines::Engine;
|
||||||
|
use ids::BlockID;
|
||||||
use views::BlockView;
|
use views::BlockView;
|
||||||
|
|
||||||
use util::{Bytes, Hashable, HashDB, snappy, TrieDB, TrieDBMut, TrieMut};
|
use util::{Bytes, Hashable, HashDB, snappy, TrieDB, TrieDBMut, TrieMut};
|
||||||
@ -58,9 +60,49 @@ const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024;
|
|||||||
// How many blocks to include in a snapshot, starting from the head of the chain.
|
// How many blocks to include in a snapshot, starting from the head of the chain.
|
||||||
const SNAPSHOT_BLOCKS: u64 = 30000;
|
const SNAPSHOT_BLOCKS: u64 = 30000;
|
||||||
|
|
||||||
|
/// A progress indicator for snapshots.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Progress {
|
||||||
|
accounts: AtomicUsize,
|
||||||
|
blocks: AtomicUsize,
|
||||||
|
size: AtomicUsize, // Todo [rob] use Atomicu64 when it stabilizes.
|
||||||
|
done: AtomicBool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Progress {
|
||||||
|
/// Create a new progress indicator.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Progress {
|
||||||
|
accounts: AtomicUsize::new(0),
|
||||||
|
blocks: AtomicUsize::new(0),
|
||||||
|
size: AtomicUsize::new(0),
|
||||||
|
done: AtomicBool::new(false),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the number of accounts snapshotted thus far.
|
||||||
|
pub fn accounts(&self) -> usize { self.accounts.load(Ordering::Relaxed) }
|
||||||
|
|
||||||
|
/// Get the number of blocks snapshotted thus far.
|
||||||
|
pub fn blocks(&self) -> usize { self.blocks.load(Ordering::Relaxed) }
|
||||||
|
|
||||||
|
/// Get the written size of the snapshot in bytes.
|
||||||
|
pub fn size(&self) -> usize { self.size.load(Ordering::Relaxed) }
|
||||||
|
|
||||||
|
/// Whether the snapshot is complete.
|
||||||
|
pub fn done(&self) -> bool { self.done.load(Ordering::SeqCst) }
|
||||||
|
|
||||||
|
}
|
||||||
/// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer.
|
/// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer.
|
||||||
pub fn take_snapshot<W: SnapshotWriter + Send>(chain: &BlockChain, start_block_hash: H256, state_db: &HashDB, writer: W) -> Result<(), Error> {
|
pub fn take_snapshot<W: SnapshotWriter + Send>(
|
||||||
let start_header = try!(chain.block_header(&start_block_hash).ok_or(Error::InvalidStartingBlock(start_block_hash)));
|
chain: &BlockChain,
|
||||||
|
block_at: H256,
|
||||||
|
state_db: &HashDB,
|
||||||
|
writer: W,
|
||||||
|
p: &Progress
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let start_header = try!(chain.block_header(&block_at)
|
||||||
|
.ok_or(Error::InvalidStartingBlock(BlockID::Hash(block_at))));
|
||||||
let state_root = start_header.state_root();
|
let state_root = start_header.state_root();
|
||||||
let number = start_header.number();
|
let number = start_header.number();
|
||||||
|
|
||||||
@ -68,8 +110,8 @@ pub fn take_snapshot<W: SnapshotWriter + Send>(chain: &BlockChain, start_block_h
|
|||||||
|
|
||||||
let writer = Mutex::new(writer);
|
let writer = Mutex::new(writer);
|
||||||
let (state_hashes, block_hashes) = try!(scope(|scope| {
|
let (state_hashes, block_hashes) = try!(scope(|scope| {
|
||||||
let block_guard = scope.spawn(|| chunk_blocks(chain, (number, start_block_hash), &writer));
|
let block_guard = scope.spawn(|| chunk_blocks(chain, (number, block_at), &writer, p));
|
||||||
let state_res = chunk_state(state_db, state_root, &writer);
|
let state_res = chunk_state(state_db, state_root, &writer, p);
|
||||||
|
|
||||||
state_res.and_then(|state_hashes| {
|
state_res.and_then(|state_hashes| {
|
||||||
block_guard.join().map(|block_hashes| (state_hashes, block_hashes))
|
block_guard.join().map(|block_hashes| (state_hashes, block_hashes))
|
||||||
@ -83,11 +125,13 @@ pub fn take_snapshot<W: SnapshotWriter + Send>(chain: &BlockChain, start_block_h
|
|||||||
block_hashes: block_hashes,
|
block_hashes: block_hashes,
|
||||||
state_root: *state_root,
|
state_root: *state_root,
|
||||||
block_number: number,
|
block_number: number,
|
||||||
block_hash: start_block_hash,
|
block_hash: block_at,
|
||||||
};
|
};
|
||||||
|
|
||||||
try!(writer.into_inner().finish(manifest_data));
|
try!(writer.into_inner().finish(manifest_data));
|
||||||
|
|
||||||
|
p.done.store(true, Ordering::SeqCst);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,6 +144,7 @@ struct BlockChunker<'a> {
|
|||||||
hashes: Vec<H256>,
|
hashes: Vec<H256>,
|
||||||
snappy_buffer: Vec<u8>,
|
snappy_buffer: Vec<u8>,
|
||||||
writer: &'a Mutex<SnapshotWriter + 'a>,
|
writer: &'a Mutex<SnapshotWriter + 'a>,
|
||||||
|
progress: &'a Progress,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> BlockChunker<'a> {
|
impl<'a> BlockChunker<'a> {
|
||||||
@ -162,7 +207,8 @@ impl<'a> BlockChunker<'a> {
|
|||||||
|
|
||||||
let parent_total_difficulty = parent_details.total_difficulty;
|
let parent_total_difficulty = parent_details.total_difficulty;
|
||||||
|
|
||||||
let mut rlp_stream = RlpStream::new_list(3 + self.rlps.len());
|
let num_entries = self.rlps.len();
|
||||||
|
let mut rlp_stream = RlpStream::new_list(3 + num_entries);
|
||||||
rlp_stream.append(&parent_number).append(&parent_hash).append(&parent_total_difficulty);
|
rlp_stream.append(&parent_number).append(&parent_hash).append(&parent_total_difficulty);
|
||||||
|
|
||||||
for pair in self.rlps.drain(..) {
|
for pair in self.rlps.drain(..) {
|
||||||
@ -178,6 +224,9 @@ impl<'a> BlockChunker<'a> {
|
|||||||
try!(self.writer.lock().write_block_chunk(hash, compressed));
|
try!(self.writer.lock().write_block_chunk(hash, compressed));
|
||||||
trace!(target: "snapshot", "wrote block chunk. hash: {}, size: {}, uncompressed size: {}", hash.hex(), size, raw_data.len());
|
trace!(target: "snapshot", "wrote block chunk. hash: {}, size: {}, uncompressed size: {}", hash.hex(), size, raw_data.len());
|
||||||
|
|
||||||
|
self.progress.size.fetch_add(size, Ordering::SeqCst);
|
||||||
|
self.progress.blocks.fetch_add(num_entries, Ordering::SeqCst);
|
||||||
|
|
||||||
self.hashes.push(hash);
|
self.hashes.push(hash);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -189,7 +238,7 @@ impl<'a> BlockChunker<'a> {
|
|||||||
/// The path parameter is the directory to store the block chunks in.
|
/// The path parameter is the directory to store the block chunks in.
|
||||||
/// This function assumes the directory exists already.
|
/// This function assumes the directory exists already.
|
||||||
/// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis.
|
/// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis.
|
||||||
pub fn chunk_blocks<'a>(chain: &'a BlockChain, start_block_info: (u64, H256), writer: &Mutex<SnapshotWriter + 'a>) -> Result<Vec<H256>, Error> {
|
pub fn chunk_blocks<'a>(chain: &'a BlockChain, start_block_info: (u64, H256), writer: &Mutex<SnapshotWriter + 'a>, progress: &'a Progress) -> Result<Vec<H256>, Error> {
|
||||||
let (start_number, start_hash) = start_block_info;
|
let (start_number, start_hash) = start_block_info;
|
||||||
|
|
||||||
let first_hash = if start_number < SNAPSHOT_BLOCKS {
|
let first_hash = if start_number < SNAPSHOT_BLOCKS {
|
||||||
@ -197,8 +246,7 @@ pub fn chunk_blocks<'a>(chain: &'a BlockChain, start_block_info: (u64, H256), wr
|
|||||||
chain.genesis_hash()
|
chain.genesis_hash()
|
||||||
} else {
|
} else {
|
||||||
let first_num = start_number - SNAPSHOT_BLOCKS;
|
let first_num = start_number - SNAPSHOT_BLOCKS;
|
||||||
chain.block_hash(first_num)
|
try!(chain.block_hash(first_num).ok_or(Error::IncompleteChain))
|
||||||
.expect("number before best block number; whole chain is stored; qed")
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut chunker = BlockChunker {
|
let mut chunker = BlockChunker {
|
||||||
@ -208,6 +256,7 @@ pub fn chunk_blocks<'a>(chain: &'a BlockChain, start_block_info: (u64, H256), wr
|
|||||||
hashes: Vec::new(),
|
hashes: Vec::new(),
|
||||||
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
|
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
|
||||||
writer: writer,
|
writer: writer,
|
||||||
|
progress: progress,
|
||||||
};
|
};
|
||||||
|
|
||||||
try!(chunker.chunk_all(first_hash));
|
try!(chunker.chunk_all(first_hash));
|
||||||
@ -222,6 +271,7 @@ struct StateChunker<'a> {
|
|||||||
cur_size: usize,
|
cur_size: usize,
|
||||||
snappy_buffer: Vec<u8>,
|
snappy_buffer: Vec<u8>,
|
||||||
writer: &'a Mutex<SnapshotWriter + 'a>,
|
writer: &'a Mutex<SnapshotWriter + 'a>,
|
||||||
|
progress: &'a Progress,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> StateChunker<'a> {
|
impl<'a> StateChunker<'a> {
|
||||||
@ -249,7 +299,8 @@ impl<'a> StateChunker<'a> {
|
|||||||
// Write out the buffer to disk, pushing the created chunk's hash to
|
// Write out the buffer to disk, pushing the created chunk's hash to
|
||||||
// the list.
|
// the list.
|
||||||
fn write_chunk(&mut self) -> Result<(), Error> {
|
fn write_chunk(&mut self) -> Result<(), Error> {
|
||||||
let mut stream = RlpStream::new_list(self.rlps.len());
|
let num_entries = self.rlps.len();
|
||||||
|
let mut stream = RlpStream::new_list(num_entries);
|
||||||
for rlp in self.rlps.drain(..) {
|
for rlp in self.rlps.drain(..) {
|
||||||
stream.append_raw(&rlp, 1);
|
stream.append_raw(&rlp, 1);
|
||||||
}
|
}
|
||||||
@ -263,6 +314,9 @@ impl<'a> StateChunker<'a> {
|
|||||||
try!(self.writer.lock().write_state_chunk(hash, compressed));
|
try!(self.writer.lock().write_state_chunk(hash, compressed));
|
||||||
trace!(target: "snapshot", "wrote state chunk. size: {}, uncompressed size: {}", compressed_size, raw_data.len());
|
trace!(target: "snapshot", "wrote state chunk. size: {}, uncompressed size: {}", compressed_size, raw_data.len());
|
||||||
|
|
||||||
|
self.progress.accounts.fetch_add(num_entries, Ordering::SeqCst);
|
||||||
|
self.progress.size.fetch_add(compressed_size, Ordering::SeqCst);
|
||||||
|
|
||||||
self.hashes.push(hash);
|
self.hashes.push(hash);
|
||||||
self.cur_size = 0;
|
self.cur_size = 0;
|
||||||
|
|
||||||
@ -275,7 +329,7 @@ impl<'a> StateChunker<'a> {
|
|||||||
///
|
///
|
||||||
/// Returns a list of hashes of chunks created, or any error it may
|
/// Returns a list of hashes of chunks created, or any error it may
|
||||||
/// have encountered.
|
/// have encountered.
|
||||||
pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter + 'a>) -> Result<Vec<H256>, Error> {
|
pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter + 'a>, progress: &'a Progress) -> Result<Vec<H256>, Error> {
|
||||||
let account_trie = try!(TrieDB::new(db, &root));
|
let account_trie = try!(TrieDB::new(db, &root));
|
||||||
|
|
||||||
let mut chunker = StateChunker {
|
let mut chunker = StateChunker {
|
||||||
@ -284,10 +338,9 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter +
|
|||||||
cur_size: 0,
|
cur_size: 0,
|
||||||
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
|
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
|
||||||
writer: writer,
|
writer: writer,
|
||||||
|
progress: progress,
|
||||||
};
|
};
|
||||||
|
|
||||||
trace!(target: "snapshot", "beginning state chunking");
|
|
||||||
|
|
||||||
// account_key here is the address' hash.
|
// account_key here is the address' hash.
|
||||||
for (account_key, account_data) in account_trie.iter() {
|
for (account_key, account_data) in account_trie.iter() {
|
||||||
let account = Account::from_thin_rlp(account_data);
|
let account = Account::from_thin_rlp(account_data);
|
||||||
@ -383,6 +436,7 @@ impl StateRebuilder {
|
|||||||
let chunk_size = account_fat_rlps.len() / ::num_cpus::get() + 1;
|
let chunk_size = account_fat_rlps.len() / ::num_cpus::get() + 1;
|
||||||
|
|
||||||
// build account tries in parallel.
|
// build account tries in parallel.
|
||||||
|
// Todo [rob] keep a thread pool around so we don't do this per-chunk.
|
||||||
try!(scope(|scope| {
|
try!(scope(|scope| {
|
||||||
let mut handles = Vec::new();
|
let mut handles = Vec::new();
|
||||||
for (account_chunk, out_pairs_chunk) in account_fat_rlps.chunks(chunk_size).zip(pairs.chunks_mut(chunk_size)) {
|
for (account_chunk, out_pairs_chunk) in account_fat_rlps.chunks(chunk_size).zip(pairs.chunks_mut(chunk_size)) {
|
||||||
|
@ -20,7 +20,7 @@ use devtools::RandomTempPath;
|
|||||||
|
|
||||||
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
|
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
|
||||||
use blockchain::BlockChain;
|
use blockchain::BlockChain;
|
||||||
use snapshot::{chunk_blocks, BlockRebuilder};
|
use snapshot::{chunk_blocks, BlockRebuilder, Progress};
|
||||||
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
|
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
|
||||||
|
|
||||||
use util::{Mutex, snappy};
|
use util::{Mutex, snappy};
|
||||||
@ -55,7 +55,7 @@ fn chunk_and_restore(amount: u64) {
|
|||||||
|
|
||||||
// snapshot it.
|
// snapshot it.
|
||||||
let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap());
|
let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap());
|
||||||
let block_hashes = chunk_blocks(&bc, (amount, best_hash), &writer).unwrap();
|
let block_hashes = chunk_blocks(&bc, (amount, best_hash), &writer, &Progress::new()).unwrap();
|
||||||
writer.into_inner().finish(::snapshot::ManifestData {
|
writer.into_inner().finish(::snapshot::ManifestData {
|
||||||
state_hashes: Vec::new(),
|
state_hashes: Vec::new(),
|
||||||
block_hashes: block_hashes,
|
block_hashes: block_hashes,
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
//! State snapshotting tests.
|
//! State snapshotting tests.
|
||||||
|
|
||||||
use snapshot::{chunk_state, StateRebuilder};
|
use snapshot::{chunk_state, Progress, StateRebuilder};
|
||||||
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
|
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
|
||||||
use super::helpers::{compare_dbs, StateProducer};
|
use super::helpers::{compare_dbs, StateProducer};
|
||||||
|
|
||||||
@ -48,7 +48,7 @@ fn snap_and_restore() {
|
|||||||
let state_root = producer.state_root();
|
let state_root = producer.state_root();
|
||||||
let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap());
|
let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap());
|
||||||
|
|
||||||
let state_hashes = chunk_state(&old_db, &state_root, &writer).unwrap();
|
let state_hashes = chunk_state(&old_db, &state_root, &writer, &Progress::new()).unwrap();
|
||||||
|
|
||||||
writer.into_inner().finish(::snapshot::ManifestData {
|
writer.into_inner().finish(::snapshot::ManifestData {
|
||||||
state_hashes: state_hashes,
|
state_hashes: state_hashes,
|
||||||
|
@ -231,6 +231,12 @@ Import/Export Options:
|
|||||||
--format FORMAT For import/export in given format. FORMAT must be
|
--format FORMAT For import/export in given format. FORMAT must be
|
||||||
one of 'hex' and 'binary'.
|
one of 'hex' and 'binary'.
|
||||||
|
|
||||||
|
Snapshot Options:
|
||||||
|
--at BLOCK Take a snapshot at the given block, which may be an
|
||||||
|
index, hash, or 'latest'. Note that taking snapshots at
|
||||||
|
non-recent blocks will only work with --pruning archive
|
||||||
|
[default: latest]
|
||||||
|
|
||||||
Virtual Machine Options:
|
Virtual Machine Options:
|
||||||
--jitvm Enable the JIT VM.
|
--jitvm Enable the JIT VM.
|
||||||
|
|
||||||
@ -365,6 +371,7 @@ pub struct Args {
|
|||||||
pub flag_version: bool,
|
pub flag_version: bool,
|
||||||
pub flag_from: String,
|
pub flag_from: String,
|
||||||
pub flag_to: String,
|
pub flag_to: String,
|
||||||
|
pub flag_at: String,
|
||||||
pub flag_format: Option<String>,
|
pub flag_format: Option<String>,
|
||||||
pub flag_jitvm: bool,
|
pub flag_jitvm: bool,
|
||||||
pub flag_log_file: Option<String>,
|
pub flag_log_file: Option<String>,
|
||||||
|
@ -171,6 +171,7 @@ impl Configuration {
|
|||||||
file_path: self.args.arg_file.clone(),
|
file_path: self.args.arg_file.clone(),
|
||||||
wal: wal,
|
wal: wal,
|
||||||
kind: snapshot::Kind::Take,
|
kind: snapshot::Kind::Take,
|
||||||
|
block_at: try!(to_block_id(&self.args.flag_at)),
|
||||||
};
|
};
|
||||||
Cmd::Snapshot(snapshot_cmd)
|
Cmd::Snapshot(snapshot_cmd)
|
||||||
} else if self.args.cmd_restore {
|
} else if self.args.cmd_restore {
|
||||||
@ -186,6 +187,7 @@ impl Configuration {
|
|||||||
file_path: self.args.arg_file.clone(),
|
file_path: self.args.arg_file.clone(),
|
||||||
wal: wal,
|
wal: wal,
|
||||||
kind: snapshot::Kind::Restore,
|
kind: snapshot::Kind::Restore,
|
||||||
|
block_at: try!(to_block_id("latest")), // unimportant.
|
||||||
};
|
};
|
||||||
Cmd::Snapshot(restore_cmd)
|
Cmd::Snapshot(restore_cmd)
|
||||||
} else {
|
} else {
|
||||||
|
@ -19,12 +19,15 @@
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use ethcore_logger::{setup_log, Config as LogConfig};
|
use ethcore_logger::{setup_log, Config as LogConfig};
|
||||||
use ethcore::snapshot::{RestorationStatus, SnapshotService};
|
use ethcore::snapshot::{Progress, RestorationStatus, SnapshotService};
|
||||||
use ethcore::snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
|
use ethcore::snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
|
||||||
use ethcore::service::ClientService;
|
use ethcore::service::ClientService;
|
||||||
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType};
|
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType};
|
||||||
use ethcore::miner::Miner;
|
use ethcore::miner::Miner;
|
||||||
|
use ethcore::ids::BlockID;
|
||||||
|
|
||||||
use cache::CacheConfig;
|
use cache::CacheConfig;
|
||||||
use params::{SpecType, Pruning};
|
use params::{SpecType, Pruning};
|
||||||
use helpers::{to_client_config, execute_upgrades};
|
use helpers::{to_client_config, execute_upgrades};
|
||||||
@ -56,6 +59,7 @@ pub struct SnapshotCommand {
|
|||||||
pub file_path: Option<String>,
|
pub file_path: Option<String>,
|
||||||
pub wal: bool,
|
pub wal: bool,
|
||||||
pub kind: Kind,
|
pub kind: Kind,
|
||||||
|
pub block_at: BlockID,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SnapshotCommand {
|
impl SnapshotCommand {
|
||||||
@ -168,6 +172,7 @@ impl SnapshotCommand {
|
|||||||
pub fn take_snapshot(self) -> Result<(), String> {
|
pub fn take_snapshot(self) -> Result<(), String> {
|
||||||
let file_path = try!(self.file_path.clone().ok_or("No file path provided.".to_owned()));
|
let file_path = try!(self.file_path.clone().ok_or("No file path provided.".to_owned()));
|
||||||
let file_path: PathBuf = file_path.into();
|
let file_path: PathBuf = file_path.into();
|
||||||
|
let block_at = self.block_at.clone();
|
||||||
let (service, _panic_handler) = try!(self.start_service());
|
let (service, _panic_handler) = try!(self.start_service());
|
||||||
|
|
||||||
warn!("Snapshots are currently experimental. File formats may be subject to change.");
|
warn!("Snapshots are currently experimental. File formats may be subject to change.");
|
||||||
@ -175,11 +180,35 @@ impl SnapshotCommand {
|
|||||||
let writer = try!(PackedWriter::new(&file_path)
|
let writer = try!(PackedWriter::new(&file_path)
|
||||||
.map_err(|e| format!("Failed to open snapshot writer: {}", e)));
|
.map_err(|e| format!("Failed to open snapshot writer: {}", e)));
|
||||||
|
|
||||||
if let Err(e) = service.client().take_snapshot(writer) {
|
let progress = Arc::new(Progress::new());
|
||||||
|
let p = progress.clone();
|
||||||
|
let informant_handle = ::std::thread::spawn(move || {
|
||||||
|
::std::thread::sleep(Duration::from_secs(5));
|
||||||
|
|
||||||
|
let mut last_size = 0;
|
||||||
|
while !p.done() {
|
||||||
|
let cur_size = p.size();
|
||||||
|
if cur_size != last_size {
|
||||||
|
last_size = cur_size;
|
||||||
|
info!("Snapshot: {} accounts {} blocks {} bytes", p.accounts(), p.blocks(), p.size());
|
||||||
|
} else {
|
||||||
|
info!("Snapshot: No progress since last update.");
|
||||||
|
}
|
||||||
|
|
||||||
|
::std::thread::sleep(Duration::from_secs(5));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Err(e) = service.client().take_snapshot(writer, block_at, &*progress) {
|
||||||
let _ = ::std::fs::remove_file(&file_path);
|
let _ = ::std::fs::remove_file(&file_path);
|
||||||
return Err(format!("Encountered fatal error while creating snapshot: {}", e));
|
return Err(format!("Encountered fatal error while creating snapshot: {}", e));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info!("snapshot creation complete");
|
||||||
|
|
||||||
|
assert!(progress.done());
|
||||||
|
try!(informant_handle.join().map_err(|_| "failed to join logger thread"));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user