2017-01-25 18:51:41 +01:00
|
|
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
2016-06-10 12:19:50 +02:00
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
//! Snapshot creation, restoration, and network service.
|
2016-09-22 19:47:03 +02:00
|
|
|
//!
|
|
|
|
//! Documentation of the format can be found at
|
2017-04-19 20:31:53 +02:00
|
|
|
//! https://github.com/paritytech/parity/wiki/Warp-Sync-Snapshot-Format
|
2016-06-10 12:19:50 +02:00
|
|
|
|
2017-04-19 20:31:53 +02:00
|
|
|
use std::collections::{HashMap, HashSet};
|
2016-08-05 17:00:46 +02:00
|
|
|
use std::sync::Arc;
|
2016-08-08 18:41:30 +02:00
|
|
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
2016-06-10 12:19:50 +02:00
|
|
|
|
2016-06-15 17:46:40 +02:00
|
|
|
use account_db::{AccountDB, AccountDBMut};
|
2016-08-05 17:00:46 +02:00
|
|
|
use blockchain::{BlockChain, BlockProvider};
|
|
|
|
use engines::Engine;
|
2016-10-28 16:10:30 +02:00
|
|
|
use header::Header;
|
2016-12-09 23:01:43 +01:00
|
|
|
use ids::BlockId;
|
2016-06-10 12:19:50 +02:00
|
|
|
|
2017-05-24 12:31:33 +02:00
|
|
|
use util::{Bytes, Hashable, HashDB, DBValue, snappy, U256};
|
2016-08-05 17:00:46 +02:00
|
|
|
use util::Mutex;
|
2017-03-11 19:58:15 +01:00
|
|
|
use util::hash::{H256};
|
2016-08-05 17:00:46 +02:00
|
|
|
use util::journaldb::{self, Algorithm, JournalDB};
|
2017-05-17 12:41:33 +02:00
|
|
|
use util::kvdb::KeyValueDB;
|
2016-08-24 16:53:36 +02:00
|
|
|
use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut};
|
2016-09-06 15:31:13 +02:00
|
|
|
use util::sha3::SHA3_NULL_RLP;
|
2017-03-22 14:41:46 +01:00
|
|
|
use rlp::{RlpStream, UntrustedRlp};
|
2016-10-25 18:40:01 +02:00
|
|
|
use bloom_journal::Bloom;
|
2016-06-14 13:22:15 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
use self::io::SnapshotWriter;
|
2016-06-14 18:34:27 +02:00
|
|
|
|
2016-10-03 12:02:43 +02:00
|
|
|
use super::state_db::StateDB;
|
2016-10-20 16:49:27 +02:00
|
|
|
use super::state::Account as StateAccount;
|
2016-10-03 12:02:43 +02:00
|
|
|
|
2016-10-26 16:14:13 +02:00
|
|
|
use crossbeam::scope;
|
2016-08-05 17:00:46 +02:00
|
|
|
use rand::{Rng, OsRng};
|
|
|
|
|
|
|
|
pub use self::error::Error;
|
2016-09-06 15:41:56 +02:00
|
|
|
|
2017-04-19 20:31:53 +02:00
|
|
|
pub use self::consensus::*;
|
2016-09-06 15:31:13 +02:00
|
|
|
pub use self::service::{Service, DatabaseRestore};
|
2016-10-05 19:42:57 +02:00
|
|
|
pub use self::traits::SnapshotService;
|
2016-09-02 18:28:47 +02:00
|
|
|
pub use self::watcher::Watcher;
|
2016-09-06 15:31:13 +02:00
|
|
|
pub use types::snapshot_manifest::ManifestData;
|
|
|
|
pub use types::restoration_status::RestorationStatus;
|
2017-03-24 14:02:04 +01:00
|
|
|
pub use types::basic_account::BasicAccount;
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
pub mod io;
|
|
|
|
pub mod service;
|
2016-06-30 20:43:54 +02:00
|
|
|
|
2016-06-15 16:42:49 +02:00
|
|
|
mod account;
|
2016-06-14 18:34:27 +02:00
|
|
|
mod block;
|
2017-04-19 20:31:53 +02:00
|
|
|
mod consensus;
|
2016-08-05 17:00:46 +02:00
|
|
|
mod error;
|
2016-09-02 18:28:47 +02:00
|
|
|
mod watcher;
|
2016-06-14 18:34:27 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests;
|
2016-06-14 13:22:15 +02:00
|
|
|
|
2016-10-05 19:42:57 +02:00
|
|
|
/// IPC interfaces
|
|
|
|
#[cfg(feature="ipc")]
|
|
|
|
pub mod remote {
|
|
|
|
pub use super::traits::RemoteSnapshotService;
|
|
|
|
}
|
|
|
|
|
2016-09-06 15:31:13 +02:00
|
|
|
mod traits {
|
|
|
|
#![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues
|
|
|
|
include!(concat!(env!("OUT_DIR"), "/snapshot_service_trait.rs"));
|
|
|
|
}
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
// Try to have chunks be around 4MB (before compression)
|
|
|
|
const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024;
|
2016-07-09 17:33:14 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
// Minimum supported state chunk version.
|
|
|
|
const MIN_SUPPORTED_STATE_CHUNK_VERSION: u64 = 1;
|
|
|
|
// current state chunk version.
|
|
|
|
const STATE_CHUNK_VERSION: u64 = 2;
|
|
|
|
|
2016-08-08 18:41:30 +02:00
|
|
|
/// A progress indicator for snapshots.
|
2016-08-10 16:29:40 +02:00
|
|
|
#[derive(Debug, Default)]
|
2016-08-08 18:41:30 +02:00
|
|
|
pub struct Progress {
|
|
|
|
accounts: AtomicUsize,
|
|
|
|
blocks: AtomicUsize,
|
|
|
|
size: AtomicUsize, // Todo [rob] use Atomicu64 when it stabilizes.
|
|
|
|
done: AtomicBool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Progress {
|
2016-09-06 17:44:11 +02:00
|
|
|
/// Reset the progress.
|
|
|
|
pub fn reset(&self) {
|
|
|
|
self.accounts.store(0, Ordering::Release);
|
|
|
|
self.blocks.store(0, Ordering::Release);
|
|
|
|
self.size.store(0, Ordering::Release);
|
|
|
|
|
|
|
|
// atomic fence here to ensure the others are written first?
|
|
|
|
// logs might very rarely get polluted if not.
|
|
|
|
self.done.store(false, Ordering::Release);
|
|
|
|
}
|
|
|
|
|
2016-08-08 18:41:30 +02:00
|
|
|
/// Get the number of accounts snapshotted thus far.
|
2016-09-06 17:44:11 +02:00
|
|
|
pub fn accounts(&self) -> usize { self.accounts.load(Ordering::Acquire) }
|
2016-08-08 18:41:30 +02:00
|
|
|
|
|
|
|
/// Get the number of blocks snapshotted thus far.
|
2016-09-06 17:44:11 +02:00
|
|
|
pub fn blocks(&self) -> usize { self.blocks.load(Ordering::Acquire) }
|
2016-08-08 18:41:30 +02:00
|
|
|
|
|
|
|
/// Get the written size of the snapshot in bytes.
|
2016-09-06 17:44:11 +02:00
|
|
|
pub fn size(&self) -> usize { self.size.load(Ordering::Acquire) }
|
2016-08-08 18:41:30 +02:00
|
|
|
|
|
|
|
/// Whether the snapshot is complete.
|
2016-09-06 17:44:11 +02:00
|
|
|
pub fn done(&self) -> bool { self.done.load(Ordering::Acquire) }
|
2016-08-08 18:41:30 +02:00
|
|
|
|
|
|
|
}
|
2016-08-05 17:00:46 +02:00
|
|
|
/// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer.
|
2016-08-08 18:41:30 +02:00
|
|
|
pub fn take_snapshot<W: SnapshotWriter + Send>(
|
2017-04-19 20:31:53 +02:00
|
|
|
engine: &Engine,
|
2016-08-08 18:41:30 +02:00
|
|
|
chain: &BlockChain,
|
|
|
|
block_at: H256,
|
|
|
|
state_db: &HashDB,
|
|
|
|
writer: W,
|
|
|
|
p: &Progress
|
|
|
|
) -> Result<(), Error> {
|
2016-12-27 12:53:56 +01:00
|
|
|
let start_header = chain.block_header(&block_at)
|
|
|
|
.ok_or(Error::InvalidStartingBlock(BlockId::Hash(block_at)))?;
|
2016-08-05 17:00:46 +02:00
|
|
|
let state_root = start_header.state_root();
|
|
|
|
let number = start_header.number();
|
2016-07-09 17:33:14 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
info!("Taking snapshot starting at block {}", number);
|
2016-07-09 17:33:14 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
let writer = Mutex::new(writer);
|
2017-04-19 20:31:53 +02:00
|
|
|
let chunker = engine.snapshot_components().ok_or(Error::SnapshotsUnsupported)?;
|
2017-05-17 12:41:33 +02:00
|
|
|
let snapshot_version = chunker.current_version();
|
2016-12-27 12:53:56 +01:00
|
|
|
let (state_hashes, block_hashes) = scope(|scope| {
|
2017-04-19 20:31:53 +02:00
|
|
|
let writer = &writer;
|
|
|
|
let block_guard = scope.spawn(move || chunk_secondary(chunker, chain, block_at, writer, p));
|
|
|
|
let state_res = chunk_state(state_db, state_root, writer, p);
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
state_res.and_then(|state_hashes| {
|
|
|
|
block_guard.join().map(|block_hashes| (state_hashes, block_hashes))
|
|
|
|
})
|
2016-12-27 12:53:56 +01:00
|
|
|
})?;
|
2016-07-09 17:33:14 +02:00
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
info!("produced {} state chunks and {} block chunks.", state_hashes.len(), block_hashes.len());
|
2016-07-09 17:33:14 +02:00
|
|
|
|
|
|
|
let manifest_data = ManifestData {
|
2017-05-17 12:41:33 +02:00
|
|
|
version: snapshot_version,
|
2016-07-09 17:33:14 +02:00
|
|
|
state_hashes: state_hashes,
|
|
|
|
block_hashes: block_hashes,
|
2016-08-05 17:00:46 +02:00
|
|
|
state_root: *state_root,
|
|
|
|
block_number: number,
|
2016-08-08 18:41:30 +02:00
|
|
|
block_hash: block_at,
|
2016-07-09 17:33:14 +02:00
|
|
|
};
|
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
writer.into_inner().finish(manifest_data)?;
|
2016-07-11 13:17:26 +02:00
|
|
|
|
2016-08-08 18:41:30 +02:00
|
|
|
p.done.store(true, Ordering::SeqCst);
|
|
|
|
|
2016-07-11 13:17:26 +02:00
|
|
|
Ok(())
|
2016-07-09 17:33:14 +02:00
|
|
|
}
|
|
|
|
|
2017-04-19 20:31:53 +02:00
|
|
|
/// Create and write out all secondary chunks to disk, returning a vector of all
|
|
|
|
/// the hashes of secondary chunks created.
|
2016-06-13 16:29:26 +02:00
|
|
|
///
|
2017-04-19 20:31:53 +02:00
|
|
|
/// Secondary chunks are engine-specific, but they intend to corroborate the state data
|
|
|
|
/// in the state chunks.
|
2016-08-05 17:00:46 +02:00
|
|
|
/// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis.
|
2017-04-19 20:31:53 +02:00
|
|
|
pub fn chunk_secondary<'a>(mut chunker: Box<SnapshotComponents>, chain: &'a BlockChain, start_hash: H256, writer: &Mutex<SnapshotWriter + 'a>, progress: &'a Progress) -> Result<Vec<H256>, Error> {
|
|
|
|
let mut chunk_hashes = Vec::new();
|
|
|
|
let mut snappy_buffer = vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)];
|
|
|
|
|
|
|
|
{
|
|
|
|
let mut chunk_sink = |raw_data: &[u8]| {
|
|
|
|
let compressed_size = snappy::compress_into(raw_data, &mut snappy_buffer);
|
|
|
|
let compressed = &snappy_buffer[..compressed_size];
|
|
|
|
let hash = compressed.sha3();
|
|
|
|
let size = compressed.len();
|
|
|
|
|
|
|
|
writer.lock().write_block_chunk(hash, compressed)?;
|
|
|
|
trace!(target: "snapshot", "wrote secondary chunk. hash: {}, size: {}, uncompressed size: {}",
|
|
|
|
hash.hex(), size, raw_data.len());
|
|
|
|
|
|
|
|
progress.size.fetch_add(size, Ordering::SeqCst);
|
|
|
|
chunk_hashes.push(hash);
|
|
|
|
Ok(())
|
|
|
|
};
|
2016-06-13 16:29:26 +02:00
|
|
|
|
2017-04-19 20:31:53 +02:00
|
|
|
chunker.chunk_all(
|
|
|
|
chain,
|
|
|
|
start_hash,
|
|
|
|
&mut chunk_sink,
|
|
|
|
PREFERRED_CHUNK_SIZE,
|
|
|
|
)?;
|
|
|
|
}
|
2016-06-14 18:34:27 +02:00
|
|
|
|
2017-04-19 20:31:53 +02:00
|
|
|
Ok(chunk_hashes)
|
2016-06-10 17:19:55 +02:00
|
|
|
}
|
|
|
|
|
2016-06-11 19:28:18 +02:00
|
|
|
/// State trie chunker.
|
2016-06-13 16:29:26 +02:00
|
|
|
struct StateChunker<'a> {
|
2016-06-11 19:28:18 +02:00
|
|
|
hashes: Vec<H256>,
|
|
|
|
rlps: Vec<Bytes>,
|
|
|
|
cur_size: usize,
|
2016-06-14 13:22:15 +02:00
|
|
|
snappy_buffer: Vec<u8>,
|
2016-08-05 17:00:46 +02:00
|
|
|
writer: &'a Mutex<SnapshotWriter + 'a>,
|
2016-08-08 18:41:30 +02:00
|
|
|
progress: &'a Progress,
|
2016-06-11 19:28:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> StateChunker<'a> {
|
|
|
|
// Push a key, value pair to be encoded.
|
|
|
|
//
|
|
|
|
// If the buffer is greater than the desired chunk size,
|
|
|
|
// this will write out the data to disk.
|
2017-03-28 16:23:09 +02:00
|
|
|
fn push(&mut self, data: Bytes) -> Result<(), Error> {
|
|
|
|
self.cur_size += data.len();
|
|
|
|
self.rlps.push(data);
|
2016-06-13 16:21:23 +02:00
|
|
|
Ok(())
|
2016-06-11 19:28:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write out the buffer to disk, pushing the created chunk's hash to
|
|
|
|
// the list.
|
2016-06-13 16:21:23 +02:00
|
|
|
fn write_chunk(&mut self) -> Result<(), Error> {
|
2016-08-08 18:41:30 +02:00
|
|
|
let num_entries = self.rlps.len();
|
|
|
|
let mut stream = RlpStream::new_list(num_entries);
|
2016-06-16 16:39:42 +02:00
|
|
|
for rlp in self.rlps.drain(..) {
|
|
|
|
stream.append_raw(&rlp, 1);
|
|
|
|
}
|
2016-06-11 19:28:18 +02:00
|
|
|
|
2016-06-14 13:37:17 +02:00
|
|
|
let raw_data = stream.out();
|
2016-08-05 17:00:46 +02:00
|
|
|
|
|
|
|
let compressed_size = snappy::compress_into(&raw_data, &mut self.snappy_buffer);
|
|
|
|
let compressed = &self.snappy_buffer[..compressed_size];
|
|
|
|
let hash = compressed.sha3();
|
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
self.writer.lock().write_state_chunk(hash, compressed)?;
|
2016-06-14 13:37:17 +02:00
|
|
|
trace!(target: "snapshot", "wrote state chunk. size: {}, uncompressed size: {}", compressed_size, raw_data.len());
|
2016-06-11 19:28:18 +02:00
|
|
|
|
2016-08-08 18:41:30 +02:00
|
|
|
self.progress.accounts.fetch_add(num_entries, Ordering::SeqCst);
|
|
|
|
self.progress.size.fetch_add(compressed_size, Ordering::SeqCst);
|
|
|
|
|
2016-06-11 19:28:18 +02:00
|
|
|
self.hashes.push(hash);
|
|
|
|
self.cur_size = 0;
|
2016-06-13 16:21:23 +02:00
|
|
|
|
|
|
|
Ok(())
|
2016-06-11 19:28:18 +02:00
|
|
|
}
|
2017-03-28 16:23:09 +02:00
|
|
|
|
|
|
|
// Get current chunk size.
|
|
|
|
fn chunk_size(&self) -> usize {
|
|
|
|
self.cur_size
|
|
|
|
}
|
2016-06-13 16:29:26 +02:00
|
|
|
}
|
2016-06-11 19:28:18 +02:00
|
|
|
|
2016-06-13 16:29:26 +02:00
|
|
|
/// Walk the given state database starting from the given root,
|
|
|
|
/// creating chunks and writing them out.
|
|
|
|
///
|
|
|
|
/// Returns a list of hashes of chunks created, or any error it may
|
|
|
|
/// have encountered.
|
2016-08-08 18:41:30 +02:00
|
|
|
pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter + 'a>, progress: &'a Progress) -> Result<Vec<H256>, Error> {
|
2016-12-27 12:53:56 +01:00
|
|
|
let account_trie = TrieDB::new(db, &root)?;
|
2016-06-11 19:28:18 +02:00
|
|
|
|
2016-06-13 16:29:26 +02:00
|
|
|
let mut chunker = StateChunker {
|
|
|
|
hashes: Vec::new(),
|
|
|
|
rlps: Vec::new(),
|
|
|
|
cur_size: 0,
|
2016-06-15 13:05:00 +02:00
|
|
|
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
|
2016-08-05 17:00:46 +02:00
|
|
|
writer: writer,
|
2016-08-08 18:41:30 +02:00
|
|
|
progress: progress,
|
2016-06-13 16:29:26 +02:00
|
|
|
};
|
2016-06-13 13:52:41 +02:00
|
|
|
|
2016-08-25 14:28:45 +02:00
|
|
|
let mut used_code = HashSet::new();
|
|
|
|
|
2016-06-13 16:29:26 +02:00
|
|
|
// account_key here is the address' hash.
|
2016-12-27 12:53:56 +01:00
|
|
|
for item in account_trie.iter()? {
|
|
|
|
let (account_key, account_data) = item?;
|
2017-01-03 17:05:27 +01:00
|
|
|
let account = ::rlp::decode(&*account_data);
|
2016-06-13 16:29:26 +02:00
|
|
|
let account_key_hash = H256::from_slice(&account_key);
|
2016-06-13 14:23:53 +02:00
|
|
|
|
2016-06-13 16:29:26 +02:00
|
|
|
let account_db = AccountDB::from_hash(db, account_key_hash);
|
2016-06-13 12:04:20 +02:00
|
|
|
|
2017-03-28 16:23:09 +02:00
|
|
|
let fat_rlps = account::to_fat_rlps(&account_key_hash, &account, &account_db, &mut used_code, PREFERRED_CHUNK_SIZE - chunker.chunk_size(), PREFERRED_CHUNK_SIZE)?;
|
2017-03-24 14:02:04 +01:00
|
|
|
for (i, fat_rlp) in fat_rlps.into_iter().enumerate() {
|
2017-03-28 16:23:09 +02:00
|
|
|
if i > 0 {
|
|
|
|
chunker.write_chunk()?;
|
|
|
|
}
|
|
|
|
chunker.push(fat_rlp)?;
|
2017-03-24 14:02:04 +01:00
|
|
|
}
|
2016-06-13 16:29:26 +02:00
|
|
|
}
|
2016-06-13 15:07:54 +02:00
|
|
|
|
2016-06-13 16:29:26 +02:00
|
|
|
if chunker.cur_size != 0 {
|
2016-12-27 12:53:56 +01:00
|
|
|
chunker.write_chunk()?;
|
2016-06-13 12:04:20 +02:00
|
|
|
}
|
2016-06-13 16:29:26 +02:00
|
|
|
|
|
|
|
Ok(chunker.hashes)
|
2016-06-13 12:04:20 +02:00
|
|
|
}
|
|
|
|
|
2016-06-15 17:46:40 +02:00
|
|
|
/// Used to rebuild the state trie piece by piece.
|
2016-06-17 12:56:57 +02:00
|
|
|
pub struct StateRebuilder {
|
|
|
|
db: Box<JournalDB>,
|
2016-06-15 17:46:40 +02:00
|
|
|
state_root: H256,
|
2016-11-11 17:18:31 +01:00
|
|
|
known_code: HashMap<H256, H256>, // code hashes mapped to first account with this code.
|
2016-08-25 14:28:45 +02:00
|
|
|
missing_code: HashMap<H256, Vec<H256>>, // maps code hashes to lists of accounts missing that code.
|
2016-10-25 18:40:01 +02:00
|
|
|
bloom: Bloom,
|
2017-03-24 14:02:04 +01:00
|
|
|
known_storage_roots: HashMap<H256, H256>, // maps account hashes to last known storage root. Only filled for last account per chunk.
|
2016-06-15 17:46:40 +02:00
|
|
|
}
|
|
|
|
|
2016-06-17 12:56:57 +02:00
|
|
|
impl StateRebuilder {
|
2016-06-15 17:46:40 +02:00
|
|
|
/// Create a new state rebuilder to write into the given backing DB.
|
2017-05-17 12:41:33 +02:00
|
|
|
pub fn new(db: Arc<KeyValueDB>, pruning: Algorithm) -> Self {
|
2016-06-15 17:46:40 +02:00
|
|
|
StateRebuilder {
|
2016-08-18 18:24:49 +02:00
|
|
|
db: journaldb::new(db.clone(), pruning, ::db::COL_STATE),
|
2016-08-05 17:00:46 +02:00
|
|
|
state_root: SHA3_NULL_RLP,
|
2016-11-11 17:18:31 +01:00
|
|
|
known_code: HashMap::new(),
|
2016-08-25 14:28:45 +02:00
|
|
|
missing_code: HashMap::new(),
|
2016-10-25 18:40:01 +02:00
|
|
|
bloom: StateDB::load_bloom(&*db),
|
2017-03-24 14:02:04 +01:00
|
|
|
known_storage_roots: HashMap::new(),
|
2016-06-15 17:46:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-05 17:00:46 +02:00
|
|
|
/// Feed an uncompressed state chunk into the rebuilder.
|
2016-11-13 13:52:53 +01:00
|
|
|
pub fn feed(&mut self, chunk: &[u8], flag: &AtomicBool) -> Result<(), ::error::Error> {
|
2016-08-05 17:00:46 +02:00
|
|
|
let rlp = UntrustedRlp::new(chunk);
|
2016-10-20 16:49:27 +02:00
|
|
|
let empty_rlp = StateAccount::new_basic(U256::zero(), U256::zero()).rlp();
|
2017-03-22 14:41:46 +01:00
|
|
|
let mut pairs = Vec::with_capacity(rlp.item_count()?);
|
2016-06-15 17:46:40 +02:00
|
|
|
|
2016-06-30 20:43:54 +02:00
|
|
|
// initialize the pairs vector with empty values so we have slots to write into.
|
2017-03-22 14:41:46 +01:00
|
|
|
pairs.resize(rlp.item_count()?, (H256::new(), Vec::new()));
|
2016-06-15 17:46:40 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let status = rebuild_accounts(
|
2016-11-13 13:52:53 +01:00
|
|
|
self.db.as_hashdb_mut(),
|
|
|
|
rlp,
|
|
|
|
&mut pairs,
|
2016-11-14 14:02:19 +01:00
|
|
|
&self.known_code,
|
2017-03-24 14:02:04 +01:00
|
|
|
&mut self.known_storage_roots,
|
2016-11-13 13:52:53 +01:00
|
|
|
flag
|
2016-12-27 12:53:56 +01:00
|
|
|
)?;
|
2016-06-15 17:46:40 +02:00
|
|
|
|
2016-11-13 13:52:53 +01:00
|
|
|
for (addr_hash, code_hash) in status.missing_code {
|
|
|
|
self.missing_code.entry(code_hash).or_insert_with(Vec::new).push(addr_hash);
|
2016-10-26 16:14:13 +02:00
|
|
|
}
|
2016-11-11 17:18:31 +01:00
|
|
|
|
2016-08-25 14:28:45 +02:00
|
|
|
// patch up all missing code. must be done after collecting all new missing code entries.
|
2016-11-14 14:02:19 +01:00
|
|
|
for (code_hash, code, first_with) in status.new_code {
|
2016-08-25 14:28:45 +02:00
|
|
|
for addr_hash in self.missing_code.remove(&code_hash).unwrap_or_else(Vec::new) {
|
|
|
|
let mut db = AccountDBMut::from_hash(self.db.as_hashdb_mut(), addr_hash);
|
2016-10-26 13:53:47 +02:00
|
|
|
db.emplace(code_hash, DBValue::from_slice(&code));
|
2016-08-25 14:28:45 +02:00
|
|
|
}
|
|
|
|
|
2016-11-11 17:18:31 +01:00
|
|
|
self.known_code.insert(code_hash, first_with);
|
2016-08-25 14:28:45 +02:00
|
|
|
}
|
|
|
|
|
2016-10-03 12:02:43 +02:00
|
|
|
let backing = self.db.backing().clone();
|
|
|
|
|
2016-06-29 18:37:17 +02:00
|
|
|
// batch trie writes
|
|
|
|
{
|
2016-08-05 17:00:46 +02:00
|
|
|
let mut account_trie = if self.state_root != SHA3_NULL_RLP {
|
2016-12-27 12:53:56 +01:00
|
|
|
TrieDBMut::from_existing(self.db.as_hashdb_mut(), &mut self.state_root)?
|
2016-06-16 18:30:18 +02:00
|
|
|
} else {
|
2016-06-17 12:56:57 +02:00
|
|
|
TrieDBMut::new(self.db.as_hashdb_mut(), &mut self.state_root)
|
2016-06-16 18:30:18 +02:00
|
|
|
};
|
2016-06-29 18:37:17 +02:00
|
|
|
|
|
|
|
for (hash, thin_rlp) in pairs {
|
2016-11-13 13:52:53 +01:00
|
|
|
if !flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) }
|
|
|
|
|
2016-10-20 16:49:27 +02:00
|
|
|
if &thin_rlp[..] != &empty_rlp[..] {
|
2016-10-25 18:40:01 +02:00
|
|
|
self.bloom.set(&*hash);
|
2016-10-20 16:49:27 +02:00
|
|
|
}
|
2016-12-27 12:53:56 +01:00
|
|
|
account_trie.insert(&hash, &thin_rlp)?;
|
2016-06-29 18:37:17 +02:00
|
|
|
}
|
2016-06-15 17:46:40 +02:00
|
|
|
}
|
|
|
|
|
2016-10-25 18:40:01 +02:00
|
|
|
let bloom_journal = self.bloom.drain_journal();
|
2016-08-25 16:43:56 +02:00
|
|
|
let mut batch = backing.transaction();
|
2016-12-27 12:53:56 +01:00
|
|
|
StateDB::commit_bloom(&mut batch, bloom_journal)?;
|
|
|
|
self.db.inject(&mut batch)?;
|
2016-10-25 18:40:01 +02:00
|
|
|
backing.write_buffered(batch);
|
2016-08-05 17:00:46 +02:00
|
|
|
trace!(target: "snapshot", "current state root: {:?}", self.state_root);
|
2016-06-15 17:46:40 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-03-25 10:00:50 +01:00
|
|
|
/// Finalize the restoration. Check for accounts missing code and make a dummy
|
|
|
|
/// journal entry.
|
|
|
|
/// Once all chunks have been fed, there should be nothing missing.
|
2017-05-17 12:41:33 +02:00
|
|
|
pub fn finalize(mut self, era: u64, id: H256) -> Result<Box<JournalDB>, ::error::Error> {
|
2016-08-25 14:28:45 +02:00
|
|
|
let missing = self.missing_code.keys().cloned().collect::<Vec<_>>();
|
2017-03-25 10:00:50 +01:00
|
|
|
if !missing.is_empty() { return Err(Error::MissingCode(missing).into()) }
|
|
|
|
|
|
|
|
let mut batch = self.db.backing().transaction();
|
|
|
|
self.db.journal_under(&mut batch, era, &id)?;
|
|
|
|
self.db.backing().write_buffered(batch);
|
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
Ok(self.db)
|
2016-08-25 14:28:45 +02:00
|
|
|
}
|
|
|
|
|
2016-06-15 17:46:40 +02:00
|
|
|
/// Get the state root of the rebuilder.
|
|
|
|
pub fn state_root(&self) -> H256 { self.state_root }
|
2016-06-30 20:43:54 +02:00
|
|
|
}
|
|
|
|
|
2016-08-25 14:28:45 +02:00
|
|
|
#[derive(Default)]
|
|
|
|
struct RebuiltStatus {
|
2016-11-11 17:18:31 +01:00
|
|
|
// new code that's become available. (code_hash, code, addr_hash)
|
|
|
|
new_code: Vec<(H256, Bytes, H256)>,
|
2016-08-25 14:28:45 +02:00
|
|
|
missing_code: Vec<(H256, H256)>, // accounts that are missing code.
|
|
|
|
}
|
|
|
|
|
|
|
|
// rebuild a set of accounts and their storage.
|
2016-11-13 13:52:53 +01:00
|
|
|
// returns a status detailing newly-loaded code and accounts missing code.
|
2016-08-25 14:28:45 +02:00
|
|
|
fn rebuild_accounts(
|
|
|
|
db: &mut HashDB,
|
2016-11-13 13:52:53 +01:00
|
|
|
account_fat_rlps: UntrustedRlp,
|
2016-08-25 14:28:45 +02:00
|
|
|
out_chunk: &mut [(H256, Bytes)],
|
2016-11-11 17:18:31 +01:00
|
|
|
known_code: &HashMap<H256, H256>,
|
2017-03-24 14:02:04 +01:00
|
|
|
known_storage_roots: &mut HashMap<H256, H256>,
|
2016-11-14 14:02:19 +01:00
|
|
|
abort_flag: &AtomicBool,
|
2016-11-11 17:32:54 +01:00
|
|
|
) -> Result<RebuiltStatus, ::error::Error> {
|
2016-08-25 14:28:45 +02:00
|
|
|
let mut status = RebuiltStatus::default();
|
2017-03-24 14:02:04 +01:00
|
|
|
for (account_rlp, out) in account_fat_rlps.into_iter().zip(out_chunk.iter_mut()) {
|
2016-11-13 13:52:53 +01:00
|
|
|
if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) }
|
2016-06-30 20:43:54 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let hash: H256 = account_rlp.val_at(0)?;
|
|
|
|
let fat_rlp = account_rlp.at(1)?;
|
2016-06-30 20:43:54 +02:00
|
|
|
|
|
|
|
let thin_rlp = {
|
|
|
|
|
|
|
|
// fill out the storage trie and code while decoding.
|
2016-11-11 17:18:31 +01:00
|
|
|
let (acc, maybe_code) = {
|
|
|
|
let mut acct_db = AccountDBMut::from_hash(db, hash);
|
2017-03-24 14:02:04 +01:00
|
|
|
let storage_root = known_storage_roots.get(&hash).cloned().unwrap_or(H256::zero());
|
|
|
|
account::from_fat_rlp(&mut acct_db, fat_rlp, storage_root)?
|
2016-11-11 17:18:31 +01:00
|
|
|
};
|
2016-08-25 14:28:45 +02:00
|
|
|
|
2017-01-03 17:05:27 +01:00
|
|
|
let code_hash = acc.code_hash.clone();
|
2016-08-25 14:28:45 +02:00
|
|
|
match maybe_code {
|
2016-11-11 17:18:31 +01:00
|
|
|
// new inline code
|
|
|
|
Some(code) => status.new_code.push((code_hash, code, hash)),
|
2016-08-25 14:28:45 +02:00
|
|
|
None => {
|
2016-11-11 17:18:31 +01:00
|
|
|
if code_hash != ::util::SHA3_EMPTY {
|
|
|
|
// see if this code has already been included inline
|
|
|
|
match known_code.get(&code_hash) {
|
|
|
|
Some(&first_with) => {
|
|
|
|
// if so, load it from the database.
|
2016-12-27 12:53:56 +01:00
|
|
|
let code = AccountDB::from_hash(db, first_with)
|
2016-11-11 17:18:31 +01:00
|
|
|
.get(&code_hash)
|
2016-12-27 12:53:56 +01:00
|
|
|
.ok_or_else(|| Error::MissingCode(vec![first_with]))?;
|
2016-11-11 17:18:31 +01:00
|
|
|
|
|
|
|
// and write it again under a different mangled key
|
|
|
|
AccountDBMut::from_hash(db, hash).emplace(code_hash, code);
|
|
|
|
}
|
|
|
|
// if not, queue it up to be filled later
|
|
|
|
None => status.missing_code.push((hash, code_hash)),
|
|
|
|
}
|
2016-08-25 14:28:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-06-30 20:43:54 +02:00
|
|
|
|
2017-01-03 17:05:27 +01:00
|
|
|
::rlp::encode(&acc).to_vec()
|
2016-06-30 20:43:54 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
*out = (hash, thin_rlp);
|
|
|
|
}
|
2017-03-24 14:02:04 +01:00
|
|
|
if let Some(&(ref hash, ref rlp)) = out_chunk.iter().last() {
|
|
|
|
known_storage_roots.insert(*hash, ::rlp::decode::<BasicAccount>(rlp).storage_root);
|
|
|
|
}
|
|
|
|
if let Some(&(ref hash, ref rlp)) = out_chunk.iter().next() {
|
|
|
|
known_storage_roots.insert(*hash, ::rlp::decode::<BasicAccount>(rlp).storage_root);
|
|
|
|
}
|
2016-08-25 14:28:45 +02:00
|
|
|
Ok(status)
|
2016-07-19 09:25:51 +02:00
|
|
|
}
|
2016-08-05 17:00:46 +02:00
|
|
|
|
2016-08-10 16:29:40 +02:00
|
|
|
/// Proportion of blocks which we will verify `PoW` for.
|
2016-08-05 17:00:46 +02:00
|
|
|
const POW_VERIFY_RATE: f32 = 0.02;
|
|
|
|
|
2016-10-28 16:10:30 +02:00
|
|
|
/// Verify an old block with the given header, engine, blockchain, body. If `always` is set, it will perform
|
|
|
|
/// the fullest verification possible. If not, it will take a random sample to determine whether it will
|
|
|
|
/// do heavy or light verification.
|
|
|
|
pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &Engine, chain: &BlockChain, body: Option<&[u8]>, always: bool) -> Result<(), ::error::Error> {
|
2017-04-19 20:31:53 +02:00
|
|
|
engine.verify_block_basic(header, body)?;
|
|
|
|
|
2016-10-28 16:10:30 +02:00
|
|
|
if always || rng.gen::<f32>() <= POW_VERIFY_RATE {
|
2017-04-19 20:31:53 +02:00
|
|
|
engine.verify_block_unordered(header, body)?;
|
2016-10-28 16:10:30 +02:00
|
|
|
match chain.block_header(header.parent_hash()) {
|
2016-11-28 13:20:49 +01:00
|
|
|
Some(parent) => engine.verify_block_family(header, &parent, body),
|
2017-04-19 20:31:53 +02:00
|
|
|
None => Ok(()),
|
2016-10-28 16:10:30 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
Ok(())
|
2016-08-05 17:00:46 +02:00
|
|
|
}
|
2016-09-06 15:31:13 +02:00
|
|
|
}
|