openethereum/ethcore/src/snapshot/tests/state.rs
David cd26526868
Make ClientIoMessage generic over the Client (#10981)
* Add client-traits crate
Move the BlockInfo trait to new crate

* New crate `machine`
Contains code extracted from ethcore that defines `Machine`, `Externalities` and other execution related code.

* Use new machine and client-traits crates in ethcore

* Use new crates machine and client-traits instead of ethcore where appropriate

* Fix tests

* Don't re-export so many types from ethcore::client

* Fixing more fallout from removing re-export

* fix test

* More fallout from not re-exporting types

* Add some docs

* cleanup

* import the macro edition style

* Tweak docs

* Add missing import

* remove unused ethabi_derive imports

* Use latest ethabi-contract

* Move many traits from ethcore/client/traits to client-traits crate
Initial version of extracted Engine trait

* Move snapshot related traits to the engine crate (eew)

* Move a few snapshot related types to common_types
Cleanup Executed as exported from machine crate

* fix warning

* Gradually introduce new engine crate: snapshot

* ethcore typechecks with new engine crate

* Sort out types outside ethcore

* Add an EpochVerifier to ethash and use that in Engine.epoch_verifier()
Cleanup

* Document pub members

* Sort out tests
Sort out default impls for EpochVerifier

* Add test-helpers feature and move EngineSigner impl to the right place

* Sort out tests

* Sort out tests and refactor verification types

* Fix missing traits

* More missing traits
Fix Histogram

* Fix tests and cleanup

* cleanup

* Put back needed logger import

* Don't rexport common_types from ethcore/src/client
Don't export ethcore::client::*

* Remove files no longer used
Use types from the engine crate
Explicit exports from engine::engine

* Get rid of itertools

* Move a few more traits from ethcore to client-traits: BlockChainReset, ScheduleInfo, StateClient

* Move ProvingBlockChainClient to client-traits

* Don't re-export ForkChoice and Transition from ethcore

* Address grumbles: sort imports, remove commented out code

* Fix merge resolution error

* Extract the Clique engine to own crate

* Extract NullEngine and the block_reward module from ethcore

* Extract InstantSeal engine to own crate

* Extract remaining engines

* Extract executive_state to own crate so it can be used by engine crates

* Remove snapshot stuff from the engine crate

* Put snapshot traits back in ethcore

* cleanup

* Remove stuff from ethcore

* Don't use itertools

* itertools in aura is legit-ish

* More post-merge fixes

* Re-export less types in client

* cleanup

* Extract spec to own crate

* Put back the test-helpers from basic-authority

* Fix ethcore benchmarks

* Reduce the public api of ethcore/verification

* WIP

* Add Cargo.toml

* Fix compilation outside ethcore

* Audit uses of import_verified_blocks() and remove unneeded calls
Cleanup

* cleanup

* Remove unused imports from ethcore

* Cleanup

* remove double semi-colons

* Add missing generic param

* More missing generics

* Update ethcore/block-reward/Cargo.toml

Co-Authored-By: Tomasz Drwięga <tomusdrw@users.noreply.github.com>

* Update ethcore/engines/basic-authority/Cargo.toml

Co-Authored-By: Tomasz Drwięga <tomusdrw@users.noreply.github.com>

* Update ethcore/engines/ethash/Cargo.toml

Co-Authored-By: Tomasz Drwięga <tomusdrw@users.noreply.github.com>

* Update ethcore/engines/clique/src/lib.rs

Co-Authored-By: Tomasz Drwięga <tomusdrw@users.noreply.github.com>

* signers is already a ref

* Add an EngineType enum to tighten up Engine.name()

* Introduce Snapshotting enum to distinguish the type of snapshots a chain uses

* Rename supports_warp to snapshot_mode

* Missing import

* Update ethcore/src/snapshot/consensus/mod.rs

Co-Authored-By: Tomasz Drwięga <tomusdrw@users.noreply.github.com>

* missing import

* Fix import

* double semi

* Fix merge problem

* cleanup

* Parametrise `ClientIoMessage` with `()` for the light client

* Add impl Tick for ()

* Address review feedback

* Move ClientIoMessage to common-types

* remove superseeded fixme

* fix merge conflict errors
2019-08-28 10:09:42 +02:00

209 lines
6.6 KiB
Rust

// Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity Ethereum.
// Parity Ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
//! State snapshotting tests.
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use hash::{KECCAK_NULL_RLP, keccak};
use types::{
basic_account::BasicAccount,
errors::EthcoreError as Error,
};
use client_traits::SnapshotWriter;
use snapshot::account;
use snapshot::{chunk_state, Error as SnapshotError, Progress, StateRebuilder, SNAPSHOT_SUBPARTS};
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader};
use super::helpers::StateProducer;
use rand::SeedableRng;
use rand_xorshift::XorShiftRng;
use ethereum_types::H256;
use journaldb::{self, Algorithm};
use kvdb_rocksdb::{Database, DatabaseConfig};
use parking_lot::Mutex;
use tempdir::TempDir;
const RNG_SEED: [u8; 16] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16];
#[test]
fn snap_and_restore() {
use hash_db::{HashDB, EMPTY_PREFIX};
let mut producer = StateProducer::new();
let mut rng = XorShiftRng::from_seed(RNG_SEED);
let mut old_db = journaldb::new_memory_db();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
for _ in 0..150 {
producer.tick(&mut rng, &mut old_db);
}
let tempdir = TempDir::new("").unwrap();
let snap_file = tempdir.path().join("SNAP");
let state_root = producer.state_root();
let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap());
let mut state_hashes = Vec::new();
for part in 0..SNAPSHOT_SUBPARTS {
let mut hashes = chunk_state(&old_db, &state_root, &writer, &Progress::default(), Some(part), 0).unwrap();
state_hashes.append(&mut hashes);
}
writer.into_inner().finish(::snapshot::ManifestData {
version: 2,
state_hashes: state_hashes,
block_hashes: Vec::new(),
state_root: state_root,
block_number: 1000,
block_hash: H256::zero(),
}).unwrap();
let db_path = tempdir.path().join("db");
let db = {
let new_db = Arc::new(Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap());
let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::OverlayRecent);
let reader = PackedReader::new(&snap_file).unwrap().unwrap();
let flag = AtomicBool::new(true);
for chunk_hash in &reader.manifest().state_hashes {
let raw = reader.chunk(*chunk_hash).unwrap();
let chunk = ::snappy::decompress(&raw).unwrap();
rebuilder.feed(&chunk, &flag).unwrap();
}
assert_eq!(rebuilder.state_root(), state_root);
rebuilder.finalize(1000, H256::zero()).unwrap();
new_db
};
let new_db = journaldb::new(db, Algorithm::OverlayRecent, ::db::COL_STATE);
assert_eq!(new_db.earliest_era(), Some(1000));
let keys = old_db.keys();
for key in keys.keys() {
assert_eq!(old_db.get(&key, EMPTY_PREFIX).unwrap(), new_db.as_hash_db().get(&key, EMPTY_PREFIX).unwrap());
}
}
#[test]
fn get_code_from_prev_chunk() {
use std::collections::HashSet;
use rlp::RlpStream;
use ethereum_types::{H256, U256};
use hash_db::{HashDB, EMPTY_PREFIX};
use account_db::{AccountDBMut, AccountDB};
let code = b"this is definitely code";
let mut used_code = HashSet::new();
let mut acc_stream = RlpStream::new_list(4);
acc_stream.append(&U256::default())
.append(&U256::default())
.append(&KECCAK_NULL_RLP)
.append(&keccak(code));
let (h1, h2) = (H256::random(), H256::random());
// two accounts with the same code, one per chunk.
// first one will have code inlined,
// second will just have its hash.
let thin_rlp = acc_stream.out();
let acc: BasicAccount = ::rlp::decode(&thin_rlp).expect("error decoding basic account");
let mut make_chunk = |acc, hash| {
let mut db = journaldb::new_memory_db();
AccountDBMut::from_hash(&mut db, hash).insert(EMPTY_PREFIX, &code[..]);
let p = Progress::default();
let fat_rlp = account::to_fat_rlps(&hash, &acc, &AccountDB::from_hash(&db, hash), &mut used_code, usize::max_value(), usize::max_value(), &p).unwrap();
let mut stream = RlpStream::new_list(1);
stream.append_raw(&fat_rlp[0], 1);
stream.out()
};
let chunk1 = make_chunk(acc.clone(), h1);
let chunk2 = make_chunk(acc, h2);
let tempdir = TempDir::new("").unwrap();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let new_db = Arc::new(Database::open(&db_cfg, tempdir.path().to_str().unwrap()).unwrap());
{
let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::OverlayRecent);
let flag = AtomicBool::new(true);
rebuilder.feed(&chunk1, &flag).unwrap();
rebuilder.feed(&chunk2, &flag).unwrap();
rebuilder.finalize(1000, H256::random()).unwrap();
}
let state_db = journaldb::new(new_db, Algorithm::OverlayRecent, ::db::COL_STATE);
assert_eq!(state_db.earliest_era(), Some(1000));
}
#[test]
fn checks_flag() {
let mut producer = StateProducer::new();
let mut rng = XorShiftRng::from_seed(RNG_SEED);
let mut old_db = journaldb::new_memory_db();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
for _ in 0..10 {
producer.tick(&mut rng, &mut old_db);
}
let tempdir = TempDir::new("").unwrap();
let snap_file = tempdir.path().join("SNAP");
let state_root = producer.state_root();
let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap());
let state_hashes = chunk_state(&old_db, &state_root, &writer, &Progress::default(), None, 0).unwrap();
writer.into_inner().finish(::snapshot::ManifestData {
version: 2,
state_hashes,
block_hashes: Vec::new(),
state_root,
block_number: 0,
block_hash: H256::zero(),
}).unwrap();
let tempdir = TempDir::new("").unwrap();
let db_path = tempdir.path().join("db");
{
let new_db = Arc::new(Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap());
let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::OverlayRecent);
let reader = PackedReader::new(&snap_file).unwrap().unwrap();
let flag = AtomicBool::new(false);
for chunk_hash in &reader.manifest().state_hashes {
let raw = reader.chunk(*chunk_hash).unwrap();
let chunk = ::snappy::decompress(&raw).unwrap();
match rebuilder.feed(&chunk, &flag) {
Err(Error::Snapshot(SnapshotError::RestorationAborted)) => {},
_ => panic!("unexpected result when feeding with flag off"),
}
}
}
}