2019-01-07 11:33:07 +01:00
|
|
|
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
|
|
|
|
// This file is part of Parity Ethereum.
|
2017-05-17 12:41:33 +02:00
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
2017-05-17 12:41:33 +02:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
2017-05-17 12:41:33 +02:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2019-01-07 11:33:07 +01:00
|
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2017-05-17 12:41:33 +02:00
|
|
|
|
|
|
|
//! Secondary chunk creation and restoration, implementation for proof-of-authority
|
|
|
|
//! based engines.
|
|
|
|
//!
|
|
|
|
//! The chunks here contain state proofs of transitions, along with validator proofs.
|
|
|
|
|
|
|
|
use super::{ChunkSink, Rebuilder, SnapshotComponents};
|
|
|
|
|
|
|
|
use std::sync::{
|
|
|
|
atomic::{AtomicBool, Ordering},
|
|
|
|
Arc,
|
|
|
|
};
|
|
|
|
|
2017-09-26 14:19:08 +02:00
|
|
|
use engines::{EpochTransition, EpochVerifier, EthEngine};
|
|
|
|
use machine::EthereumMachine;
|
2018-09-13 12:58:49 +02:00
|
|
|
use snapshot::{Error, ManifestData, Progress};
|
2017-05-17 12:41:33 +02:00
|
|
|
|
2019-01-04 14:05:46 +01:00
|
|
|
use blockchain::{BlockChain, BlockChainDB, BlockProvider};
|
|
|
|
use bytes::Bytes;
|
2018-01-10 13:35:18 +01:00
|
|
|
use ethereum_types::{H256, U256};
|
2019-01-04 14:05:46 +01:00
|
|
|
use itertools::{Itertools, Position};
|
2017-10-10 20:01:27 +02:00
|
|
|
use kvdb::KeyValueDB;
|
2019-01-04 14:05:46 +01:00
|
|
|
use rlp::{Rlp, RlpStream};
|
|
|
|
use types::{encoded, header::Header, ids::BlockId, receipt::Receipt};
|
2018-07-02 18:50:05 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
/// Snapshot creation and restoration for PoA chains.
|
|
|
|
/// Chunk format:
|
|
|
|
///
|
2017-06-28 13:17:36 +02:00
|
|
|
/// [FLAG, [header, epoch data], ...]
|
2017-05-17 12:41:33 +02:00
|
|
|
/// - Header data at which transition occurred,
|
2017-06-28 13:17:36 +02:00
|
|
|
/// - epoch data (usually list of validators and proof of change)
|
2017-05-17 12:41:33 +02:00
|
|
|
///
|
|
|
|
/// FLAG is a bool: true for last chunk, false otherwise.
|
|
|
|
///
|
|
|
|
/// The last item of the last chunk will be a list containing data for the warp target block:
|
2017-06-28 13:17:36 +02:00
|
|
|
/// [header, transactions, uncles, receipts, parent_td].
|
2017-05-17 12:41:33 +02:00
|
|
|
pub struct PoaSnapshot;
|
|
|
|
|
|
|
|
impl SnapshotComponents for PoaSnapshot {
|
|
|
|
fn chunk_all(
|
|
|
|
&mut self,
|
|
|
|
chain: &BlockChain,
|
|
|
|
block_at: H256,
|
|
|
|
sink: &mut ChunkSink,
|
2018-09-13 12:58:49 +02:00
|
|
|
_progress: &Progress,
|
2017-05-17 12:41:33 +02:00
|
|
|
preferred_size: usize,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let number = chain
|
|
|
|
.block_number(&block_at)
|
|
|
|
.ok_or_else(|| Error::InvalidStartingBlock(BlockId::Hash(block_at)))?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
let mut pending_size = 0;
|
|
|
|
let mut rlps = Vec::new();
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-06-28 13:17:36 +02:00
|
|
|
for (_, transition) in chain
|
|
|
|
.epoch_transitions()
|
2017-05-17 12:41:33 +02:00
|
|
|
.take_while(|&(_, ref t)| t.block_number <= number)
|
|
|
|
{
|
2017-06-28 13:17:36 +02:00
|
|
|
// this can happen when our starting block is non-canonical.
|
|
|
|
if transition.block_number == number && transition.block_hash != block_at {
|
|
|
|
break;
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
let header = chain
|
|
|
|
.block_header_data(&transition.block_hash)
|
2019-03-06 15:30:35 +01:00
|
|
|
.ok_or_else(|| Error::BlockNotFound(transition.block_hash))?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
let entry = {
|
2017-06-28 13:17:36 +02:00
|
|
|
let mut entry_stream = RlpStream::new_list(2);
|
2017-05-17 12:41:33 +02:00
|
|
|
entry_stream
|
|
|
|
.append_raw(&header.into_inner(), 1)
|
|
|
|
.append(&transition.proof);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
entry_stream.out()
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
// cut of the chunk if too large.
|
|
|
|
let new_loaded_size = pending_size + entry.len();
|
|
|
|
pending_size = if new_loaded_size > preferred_size && !rlps.is_empty() {
|
|
|
|
write_chunk(false, &mut rlps, sink)?;
|
|
|
|
entry.len()
|
|
|
|
} else {
|
|
|
|
new_loaded_size
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
rlps.push(entry);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
let (block, receipts) = chain
|
|
|
|
.block(&block_at)
|
|
|
|
.and_then(|b| chain.block_receipts(&block_at).map(|r| (b, r)))
|
2019-03-06 15:30:35 +01:00
|
|
|
.ok_or_else(|| Error::BlockNotFound(block_at))?;
|
2018-05-11 11:33:13 +02:00
|
|
|
let block = block.decode()?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
let parent_td = chain
|
|
|
|
.block_details(block.header.parent_hash())
|
|
|
|
.map(|d| d.total_difficulty)
|
2019-03-06 15:30:35 +01:00
|
|
|
.ok_or_else(|| Error::BlockNotFound(block_at))?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
rlps.push({
|
2017-06-28 13:17:36 +02:00
|
|
|
let mut stream = RlpStream::new_list(5);
|
2017-05-17 12:41:33 +02:00
|
|
|
stream
|
|
|
|
.append(&block.header)
|
|
|
|
.append_list(&block.transactions)
|
|
|
|
.append_list(&block.uncles)
|
|
|
|
.append(&receipts)
|
|
|
|
.append(&parent_td);
|
|
|
|
stream.out()
|
|
|
|
});
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
write_chunk(true, &mut rlps, sink)?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
fn rebuilder(
|
|
|
|
&self,
|
|
|
|
chain: BlockChain,
|
2020-07-29 10:36:15 +02:00
|
|
|
db: Arc<dyn BlockChainDB>,
|
2017-05-17 12:41:33 +02:00
|
|
|
manifest: &ManifestData,
|
2020-07-29 10:36:15 +02:00
|
|
|
) -> Result<Box<dyn Rebuilder>, ::error::Error> {
|
2017-05-17 12:41:33 +02:00
|
|
|
Ok(Box::new(ChunkRebuilder {
|
|
|
|
manifest: manifest.clone(),
|
|
|
|
warp_target: None,
|
|
|
|
chain: chain,
|
2018-06-20 15:13:07 +02:00
|
|
|
db: db.key_value().clone(),
|
2017-05-17 12:41:33 +02:00
|
|
|
had_genesis: false,
|
|
|
|
unverified_firsts: Vec::new(),
|
2017-06-28 13:17:36 +02:00
|
|
|
last_epochs: Vec::new(),
|
2017-05-17 12:41:33 +02:00
|
|
|
}))
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
fn min_supported_version(&self) -> u64 {
|
|
|
|
3
|
|
|
|
}
|
|
|
|
fn current_version(&self) -> u64 {
|
|
|
|
3
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// writes a chunk composed of the inner RLPs here.
|
|
|
|
// flag indicates whether the chunk is the last chunk.
|
|
|
|
fn write_chunk(last: bool, chunk_data: &mut Vec<Bytes>, sink: &mut ChunkSink) -> Result<(), Error> {
|
|
|
|
let mut stream = RlpStream::new_list(1 + chunk_data.len());
|
|
|
|
|
|
|
|
stream.append(&last);
|
|
|
|
for item in chunk_data.drain(..) {
|
|
|
|
stream.append_raw(&item, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
(sink)(stream.out().as_slice()).map_err(Into::into)
|
|
|
|
}
|
|
|
|
|
|
|
|
// rebuilder checks state proofs for all transitions, and checks that each
|
|
|
|
// transition header is verifiable from the epoch data of the one prior.
|
|
|
|
struct ChunkRebuilder {
|
|
|
|
manifest: ManifestData,
|
2017-06-28 13:17:36 +02:00
|
|
|
warp_target: Option<Header>,
|
2017-05-17 12:41:33 +02:00
|
|
|
chain: BlockChain,
|
2020-07-29 10:36:15 +02:00
|
|
|
db: Arc<dyn KeyValueDB>,
|
2017-05-17 12:41:33 +02:00
|
|
|
had_genesis: bool,
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
// sorted vectors of unverified first blocks in a chunk
|
|
|
|
// and epoch data from last blocks in chunks.
|
|
|
|
// verification for these will be done at the end.
|
2017-06-28 13:17:36 +02:00
|
|
|
unverified_firsts: Vec<(Header, Bytes, H256)>,
|
2020-07-29 10:36:15 +02:00
|
|
|
last_epochs: Vec<(Header, Box<dyn EpochVerifier<EthereumMachine>>)>,
|
2017-05-17 12:41:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// verified data.
|
|
|
|
struct Verified {
|
|
|
|
epoch_transition: EpochTransition,
|
|
|
|
header: Header,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ChunkRebuilder {
|
|
|
|
fn verify_transition(
|
|
|
|
&mut self,
|
2020-07-29 10:36:15 +02:00
|
|
|
last_verifier: &mut Option<Box<dyn EpochVerifier<EthereumMachine>>>,
|
2018-04-16 15:52:12 +02:00
|
|
|
transition_rlp: Rlp,
|
2020-07-29 10:36:15 +02:00
|
|
|
engine: &dyn EthEngine,
|
2017-05-17 12:41:33 +02:00
|
|
|
) -> Result<Verified, ::error::Error> {
|
2017-06-28 13:17:36 +02:00
|
|
|
use engines::ConstructedVerifier;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
// decode.
|
|
|
|
let header: Header = transition_rlp.val_at(0)?;
|
2017-06-28 13:17:36 +02:00
|
|
|
let epoch_data: Bytes = transition_rlp.val_at(1)?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-06-28 13:17:36 +02:00
|
|
|
trace!(target: "snapshot", "verifying transition to epoch at block {}", header.number());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
// check current transition against validators of last epoch.
|
2017-06-28 13:17:36 +02:00
|
|
|
let new_verifier = match engine.epoch_verifier(&header, &epoch_data) {
|
|
|
|
ConstructedVerifier::Trusted(v) => v,
|
|
|
|
ConstructedVerifier::Unconfirmed(v, finality_proof, hash) => {
|
|
|
|
match *last_verifier {
|
|
|
|
Some(ref last) => {
|
|
|
|
if last
|
|
|
|
.check_finality_proof(finality_proof)
|
|
|
|
.map_or(true, |hashes| !hashes.contains(&hash))
|
|
|
|
{
|
|
|
|
return Err(Error::BadEpochProof(header.number()).into());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2017-06-28 13:17:36 +02:00
|
|
|
None if header.number() != 0 => {
|
|
|
|
// genesis never requires additional validation.
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-06-28 13:17:36 +02:00
|
|
|
let idx = self
|
|
|
|
.unverified_firsts
|
|
|
|
.binary_search_by_key(&header.number(), |&(ref h, _, _)| h.number())
|
|
|
|
.unwrap_or_else(|x| x);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-06-28 13:17:36 +02:00
|
|
|
let entry = (header.clone(), finality_proof.to_owned(), hash);
|
|
|
|
self.unverified_firsts.insert(idx, entry);
|
|
|
|
}
|
|
|
|
None => {}
|
2017-05-17 12:41:33 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-06-28 13:17:36 +02:00
|
|
|
v
|
2017-05-17 12:41:33 +02:00
|
|
|
}
|
2017-06-28 13:17:36 +02:00
|
|
|
ConstructedVerifier::Err(e) => return Err(e),
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
// create new epoch verifier.
|
2017-06-28 13:17:36 +02:00
|
|
|
*last_verifier = Some(new_verifier);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
Ok(Verified {
|
|
|
|
epoch_transition: EpochTransition {
|
|
|
|
block_hash: header.hash(),
|
|
|
|
block_number: header.number(),
|
|
|
|
proof: epoch_data,
|
|
|
|
},
|
|
|
|
header: header,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Rebuilder for ChunkRebuilder {
|
|
|
|
fn feed(
|
|
|
|
&mut self,
|
|
|
|
chunk: &[u8],
|
2020-07-29 10:36:15 +02:00
|
|
|
engine: &dyn EthEngine,
|
2017-05-17 12:41:33 +02:00
|
|
|
abort_flag: &AtomicBool,
|
|
|
|
) -> Result<(), ::error::Error> {
|
2018-04-16 15:52:12 +02:00
|
|
|
let rlp = Rlp::new(chunk);
|
2017-05-17 12:41:33 +02:00
|
|
|
let is_last_chunk: bool = rlp.val_at(0)?;
|
|
|
|
let num_items = rlp.item_count()?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
// number of transitions in the chunk.
|
|
|
|
let num_transitions = if is_last_chunk {
|
|
|
|
num_items - 2
|
|
|
|
} else {
|
|
|
|
num_items - 1
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-06-28 13:17:36 +02:00
|
|
|
if num_transitions == 0 && !is_last_chunk {
|
|
|
|
return Err(
|
|
|
|
Error::WrongChunkFormat("Found non-last chunk without any data.".into()).into(),
|
|
|
|
);
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
let mut last_verifier = None;
|
|
|
|
let mut last_number = None;
|
|
|
|
for transition_rlp in rlp.iter().skip(1).take(num_transitions).with_position() {
|
|
|
|
if !abort_flag.load(Ordering::SeqCst) {
|
|
|
|
return Err(Error::RestorationAborted.into());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
let (is_first, is_last) = match transition_rlp {
|
|
|
|
Position::First(_) => (true, false),
|
|
|
|
Position::Middle(_) => (false, false),
|
|
|
|
Position::Last(_) => (false, true),
|
|
|
|
Position::Only(_) => (true, true),
|
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
let transition_rlp = transition_rlp.into_inner();
|
|
|
|
let verified = self.verify_transition(&mut last_verifier, transition_rlp, engine)?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
if last_number.map_or(false, |num| verified.header.number() <= num) {
|
|
|
|
return Err(Error::WrongChunkFormat(
|
|
|
|
"Later epoch transition in earlier or same block.".into(),
|
2020-08-05 06:08:03 +02:00
|
|
|
)
|
2017-05-17 12:41:33 +02:00
|
|
|
.into());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
last_number = Some(verified.header.number());
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
// book-keep borders for verification later.
|
|
|
|
if is_first {
|
|
|
|
// make sure the genesis transition was included,
|
|
|
|
// but it doesn't need verification later.
|
2017-06-28 13:17:36 +02:00
|
|
|
if verified.header.number() == 0 {
|
2017-05-17 12:41:33 +02:00
|
|
|
if verified.header.hash() != self.chain.genesis_hash() {
|
|
|
|
return Err(Error::WrongBlockHash(
|
|
|
|
0,
|
|
|
|
verified.header.hash(),
|
|
|
|
self.chain.genesis_hash(),
|
2020-08-05 06:08:03 +02:00
|
|
|
)
|
2017-05-17 12:41:33 +02:00
|
|
|
.into());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
self.had_genesis = true;
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
2017-05-17 12:41:33 +02:00
|
|
|
}
|
|
|
|
if is_last {
|
2017-06-28 13:17:36 +02:00
|
|
|
let idx = self
|
|
|
|
.last_epochs
|
|
|
|
.binary_search_by_key(&verified.header.number(), |&(ref h, _)| h.number())
|
2017-05-17 12:41:33 +02:00
|
|
|
.unwrap_or_else(|x| x);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
let entry = (
|
|
|
|
verified.header.clone(),
|
2017-06-28 13:17:36 +02:00
|
|
|
last_verifier
|
|
|
|
.take()
|
|
|
|
.expect("last_verifier always set after verify_transition; qed"),
|
2017-05-17 12:41:33 +02:00
|
|
|
);
|
2017-06-28 13:17:36 +02:00
|
|
|
self.last_epochs.insert(idx, entry);
|
2017-05-17 12:41:33 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
// write epoch transition into database.
|
|
|
|
let mut batch = self.db.transaction();
|
2017-06-28 13:17:36 +02:00
|
|
|
self.chain.insert_epoch_transition(
|
|
|
|
&mut batch,
|
|
|
|
verified.header.number(),
|
2017-05-17 12:41:33 +02:00
|
|
|
verified.epoch_transition,
|
|
|
|
);
|
|
|
|
self.db.write_buffered(batch);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-06-28 13:17:36 +02:00
|
|
|
trace!(target: "snapshot", "Verified epoch transition for epoch at block {}", verified.header.number());
|
2017-05-17 12:41:33 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
if is_last_chunk {
|
2019-01-04 14:05:46 +01:00
|
|
|
use types::block::Block;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
let last_rlp = rlp.at(num_items - 1)?;
|
|
|
|
let block = Block {
|
|
|
|
header: last_rlp.val_at(0)?,
|
|
|
|
transactions: last_rlp.list_at(1)?,
|
|
|
|
uncles: last_rlp.list_at(2)?,
|
|
|
|
};
|
2018-04-03 10:01:28 +02:00
|
|
|
let block_data = block.rlp_bytes();
|
2017-05-17 12:41:33 +02:00
|
|
|
let receipts: Vec<Receipt> = last_rlp.list_at(3)?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
{
|
|
|
|
let hash = block.header.hash();
|
|
|
|
let best_hash = self.manifest.block_hash;
|
|
|
|
if hash != best_hash {
|
|
|
|
return Err(
|
|
|
|
Error::WrongBlockHash(block.header.number(), best_hash, hash).into(),
|
2020-08-05 06:08:03 +02:00
|
|
|
);
|
2017-05-17 12:41:33 +02:00
|
|
|
}
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2018-01-10 13:35:18 +01:00
|
|
|
let parent_td: U256 = last_rlp.val_at(4)?;
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
let mut batch = self.db.transaction();
|
2018-07-30 11:45:10 +02:00
|
|
|
self.chain.insert_unordered_block(
|
|
|
|
&mut batch,
|
|
|
|
encoded::Block::new(block_data),
|
|
|
|
receipts,
|
|
|
|
Some(parent_td),
|
|
|
|
true,
|
|
|
|
false,
|
|
|
|
);
|
2017-05-17 12:41:33 +02:00
|
|
|
self.db.write_buffered(batch);
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-06-28 13:17:36 +02:00
|
|
|
self.warp_target = Some(block.header);
|
2017-05-17 12:41:33 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2020-07-29 10:36:15 +02:00
|
|
|
fn finalize(&mut self, _engine: &dyn EthEngine) -> Result<(), ::error::Error> {
|
2017-05-17 12:41:33 +02:00
|
|
|
if !self.had_genesis {
|
|
|
|
return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into());
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-06-28 13:17:36 +02:00
|
|
|
let target_header = match self.warp_target.take() {
|
2017-05-17 12:41:33 +02:00
|
|
|
Some(x) => x,
|
|
|
|
None => {
|
|
|
|
return Err(
|
|
|
|
Error::WrongChunkFormat("Warp target block not included.".into()).into(),
|
2020-08-05 06:08:03 +02:00
|
|
|
)
|
|
|
|
}
|
2017-05-17 12:41:33 +02:00
|
|
|
};
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
// verify the first entries of chunks we couldn't before.
|
2017-06-28 13:17:36 +02:00
|
|
|
// we store all last verifiers, but not all firsts.
|
|
|
|
// match each unverified first epoch with a last epoch verifier.
|
|
|
|
let mut lasts_reversed = self.last_epochs.iter().rev();
|
|
|
|
for &(ref header, ref finality_proof, hash) in self.unverified_firsts.iter().rev() {
|
|
|
|
let mut found = false;
|
|
|
|
while let Some(&(ref last_header, ref last_verifier)) = lasts_reversed.next() {
|
|
|
|
if last_header.number() < header.number() {
|
|
|
|
if last_verifier
|
|
|
|
.check_finality_proof(&finality_proof)
|
|
|
|
.map_or(true, |hashes| !hashes.contains(&hash))
|
|
|
|
{
|
|
|
|
return Err(Error::BadEpochProof(header.number()).into());
|
|
|
|
}
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2017-06-28 13:17:36 +02:00
|
|
|
if !found {
|
|
|
|
return Err(Error::WrongChunkFormat("Inconsistent chunk ordering.".into()).into());
|
2017-05-17 12:41:33 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
}
|
|
|
|
|
2017-06-28 13:17:36 +02:00
|
|
|
// verify that the warp target verifies correctly the
|
|
|
|
// most recent epoch. if the warp target was a transition itself,
|
|
|
|
// it's already verified and doesn't need any more verification.
|
|
|
|
let &(ref header, ref last_epoch) = self
|
|
|
|
.last_epochs
|
|
|
|
.last()
|
|
|
|
.expect("last_epochs known to have at least one element by the check above; qed");
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-06-28 13:17:36 +02:00
|
|
|
if header != &target_header {
|
|
|
|
last_epoch.verify_heavy(&target_header)?;
|
2017-05-17 12:41:33 +02:00
|
|
|
}
|
2020-08-05 06:08:03 +02:00
|
|
|
|
2017-05-17 12:41:33 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|