openethereum/crates/ethcore/src/snapshot/tests/service.rs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

396 lines
12 KiB
Rust
Raw Normal View History

2020-09-22 14:53:52 +02:00
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
// This file is part of OpenEthereum.
2016-09-08 12:14:53 +02:00
2020-09-22 14:53:52 +02:00
// OpenEthereum is free software: you can redistribute it and/or modify
2016-09-08 12:14:53 +02:00
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
2020-09-22 14:53:52 +02:00
// OpenEthereum is distributed in the hope that it will be useful,
2016-09-08 12:14:53 +02:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
2020-09-22 14:53:52 +02:00
// along with OpenEthereum. If not, see <http://www.gnu.org/licenses/>.
2016-09-08 12:14:53 +02:00
//! Tests for the snapshot service.
use std::{fs, sync::Arc};
use blockchain::BlockProvider;
use client::{BlockInfo, Client, ClientConfig, ImportBlock};
use snapshot::{
chunk_secondary, chunk_state,
io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter},
2016-09-08 12:14:53 +02:00
service::{Service, ServiceParams},
ManifestData, Progress, RestorationStatus, SnapshotService,
};
2016-09-08 12:14:53 +02:00
use spec::Spec;
use tempdir::TempDir;
use test_helpers::{
generate_dummy_client_with_spec_and_data, new_db, new_temp_db, restoration_db_handler,
};
use types::ids::BlockId;
2016-09-08 12:14:53 +02:00
use io::IoChannel;
use kvdb_rocksdb::DatabaseConfig;
use parking_lot::Mutex;
use verification::queue::kind::blocks::Unverified;
2016-09-08 12:27:13 +02:00
2016-09-08 12:14:53 +02:00
#[test]
fn restored_is_equivalent() {
let _ = ::env_logger::try_init();
2020-08-05 06:08:03 +02:00
2016-09-08 12:14:53 +02:00
const NUM_BLOCKS: u32 = 400;
const TX_PER: usize = 5;
2020-08-05 06:08:03 +02:00
2016-09-08 12:14:53 +02:00
let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()];
let client = generate_dummy_client_with_spec_and_data(
Spec::new_null,
NUM_BLOCKS,
TX_PER,
&gas_prices,
false,
);
2020-08-05 06:08:03 +02:00
let tempdir = TempDir::new("").unwrap();
let client_db = tempdir.path().join("client_db");
let path = tempdir.path().join("snapshot");
2020-08-05 06:08:03 +02:00
2016-09-08 12:14:53 +02:00
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let restoration = restoration_db_handler(db_config);
let blockchain_db = restoration.open(&client_db).unwrap();
2020-08-05 06:08:03 +02:00
2016-09-08 12:14:53 +02:00
let spec = Spec::new_null();
let client2 = Client::new(
Default::default(),
&spec,
blockchain_db,
New Transaction Queue implementation (#8074) * Implementation of Verifier, Scoring and Ready. * Queue in progress. * TransactionPool. * Prepare for txpool release. * Miner refactor [WiP] * WiP reworking miner. * Make it compile. * Add some docs. * Split blockchain access to a separate file. * Work on miner API. * Fix ethcore tests. * Refactor miner interface for sealing/work packages. * Implement next nonce. * RPC compiles. * Implement couple of missing methdods for RPC. * Add transaction queue listeners. * Compiles! * Clean-up and parallelize. * Get rid of RefCell in header. * Revert "Get rid of RefCell in header." This reverts commit 0f2424c9b7319a786e1565ea2a8a6d801a21b4fb. * Override Sync requirement. * Fix status display. * Unify logging. * Extract some cheap checks. * Measurements and optimizations. * Fix scoring bug, heap size of bug and add cache * Disable tx queueing and parallel verification. * Make ethcore and ethcore-miner compile again. * Make RPC compile again. * Bunch of txpool tests. * Migrate transaction queue tests. * Nonce Cap * Nonce cap cache and tests. * Remove stale future transactions from the queue. * Optimize scoring and write some tests. * Simple penalization. * Clean up and support for different scoring algorithms. * Add CLI parameters for the new queue. * Remove banning queue. * Disable debug build. * Change per_sender limit to be 1% instead of 5% * Avoid cloning when propagating transactions. * Remove old todo. * Post-review fixes. * Fix miner options default. * Implement back ready transactions for light client. * Get rid of from_pending_block * Pass rejection reason. * Add more details to drop. * Rollback heap size of. * Avoid cloning hashes when propagating and include more details on rejection. * Fix tests. * Introduce nonces cache. * Remove uneccessary hashes allocation. * Lower the mem limit. * Re-enable parallel verification. * Add miner log. Don't check the type if not below min_gas_price. * Add more traces, fix disabling miner. * Fix creating pending blocks twice on AuRa authorities. * Fix tests. * re-use pending blocks in AuRa * Use reseal_min_period to prevent too frequent update_sealing. * Fix log to contain hash not sender. * Optimize local transactions. * Fix aura tests. * Update locks comments. * Get rid of unsafe Sync impl. * Review fixes. * Remove excessive matches. * Fix compilation errors. * Use new pool in private transactions. * Fix private-tx test. * Fix secret store tests. * Actually use gas_floor_target * Fix config tests. * Fix pool tests. * Address grumbles.
2018-04-13 17:34:27 +02:00
Arc::new(::miner::Miner::new_for_tests(&spec, None)),
2016-09-08 12:14:53 +02:00
IoChannel::disconnected(),
)
.unwrap();
2020-08-05 06:08:03 +02:00
2016-09-08 12:14:53 +02:00
let service_params = ServiceParams {
engine: spec.engine.clone(),
genesis_block: spec.genesis_block(),
restoration_db_handler: restoration,
2017-10-17 17:24:47 +02:00
pruning: ::journaldb::Algorithm::Archive,
2016-09-08 12:14:53 +02:00
channel: IoChannel::disconnected(),
snapshot_root: path,
client: client2.clone(),
2016-09-08 12:14:53 +02:00
};
2020-08-05 06:08:03 +02:00
2016-09-08 12:14:53 +02:00
let service = Service::new(service_params).unwrap();
service.take_snapshot(&client, NUM_BLOCKS as u64).unwrap();
2020-08-05 06:08:03 +02:00
2016-09-08 12:14:53 +02:00
let manifest = service.manifest().unwrap();
2020-08-05 06:08:03 +02:00
service.init_restore(manifest.clone(), true).unwrap();
assert!(service.init_restore(manifest.clone(), true).is_ok());
2020-08-05 06:08:03 +02:00
2016-09-08 12:14:53 +02:00
for hash in manifest.state_hashes {
let chunk = service.chunk(hash).unwrap();
service.feed_state_chunk(hash, &chunk);
}
2020-08-05 06:08:03 +02:00
2016-09-08 12:14:53 +02:00
for hash in manifest.block_hashes {
let chunk = service.chunk(hash).unwrap();
service.feed_block_chunk(hash, &chunk);
}
2020-08-05 06:08:03 +02:00
assert_eq!(service.restoration_status(), RestorationStatus::Inactive);
2020-08-05 06:08:03 +02:00
2016-09-08 12:14:53 +02:00
for x in 0..NUM_BLOCKS {
let block1 = client.block(BlockId::Number(x as u64)).unwrap();
let block2 = client2.block(BlockId::Number(x as u64)).unwrap();
2020-08-05 06:08:03 +02:00
2016-09-08 12:14:53 +02:00
assert_eq!(block1, block2);
}
2016-09-08 12:27:13 +02:00
}
// on windows the guards deletion (remove_dir_all)
// is not happening (error directory is not empty).
// So the test is disabled until windows api behave.
#[cfg(not(target_os = "windows"))]
2016-09-08 12:27:13 +02:00
#[test]
fn guards_delete_folders() {
let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()];
let client =
generate_dummy_client_with_spec_and_data(Spec::new_null, 400, 5, &gas_prices, false);
2020-08-05 06:08:03 +02:00
2016-09-08 12:27:13 +02:00
let spec = Spec::new_null();
let tempdir = TempDir::new("").unwrap();
2016-09-08 12:27:13 +02:00
let service_params = ServiceParams {
engine: spec.engine.clone(),
genesis_block: spec.genesis_block(),
restoration_db_handler: restoration_db_handler(DatabaseConfig::with_columns(
::db::NUM_COLUMNS,
)),
2017-10-17 17:24:47 +02:00
pruning: ::journaldb::Algorithm::Archive,
2016-09-08 12:27:13 +02:00
channel: IoChannel::disconnected(),
snapshot_root: tempdir.path().to_owned(),
client: client,
2016-09-08 12:27:13 +02:00
};
2020-08-05 06:08:03 +02:00
2016-09-08 12:27:13 +02:00
let service = Service::new(service_params).unwrap();
let path = tempdir.path().join("restoration");
2020-08-05 06:08:03 +02:00
2016-09-08 12:27:13 +02:00
let manifest = ManifestData {
2017-03-24 14:02:04 +01:00
version: 2,
2016-09-08 12:27:13 +02:00
state_hashes: vec![],
block_hashes: vec![],
block_number: 0,
block_hash: Default::default(),
state_root: Default::default(),
};
2020-08-05 06:08:03 +02:00
service.init_restore(manifest.clone(), true).unwrap();
2016-09-08 12:31:12 +02:00
assert!(path.exists());
2020-08-05 06:08:03 +02:00
// The `db` folder should have been deleted,
// while the `temp` one kept
2016-09-08 12:31:12 +02:00
service.abort_restore();
assert!(!path.join("db").exists());
assert!(path.join("temp").exists());
2020-08-05 06:08:03 +02:00
service.init_restore(manifest.clone(), true).unwrap();
2016-09-08 12:27:13 +02:00
assert!(path.exists());
2020-08-05 06:08:03 +02:00
2016-09-08 12:27:13 +02:00
drop(service);
assert!(!path.join("db").exists());
assert!(path.join("temp").exists());
}
#[test]
fn keep_ancient_blocks() {
let _ = ::env_logger::try_init();
2020-08-05 06:08:03 +02:00
// Test variables
const NUM_BLOCKS: u64 = 500;
const NUM_SNAPSHOT_BLOCKS: u64 = 300;
const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot {
blocks: NUM_SNAPSHOT_BLOCKS,
max_restore_blocks: NUM_SNAPSHOT_BLOCKS,
};
2020-08-05 06:08:03 +02:00
// Temporary folders
let tempdir = TempDir::new("").unwrap();
let snapshot_path = tempdir.path().join("SNAP");
2020-08-05 06:08:03 +02:00
// Generate blocks
let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()];
let spec_f = Spec::new_null;
let spec = spec_f();
let client =
generate_dummy_client_with_spec_and_data(spec_f, NUM_BLOCKS as u32, 5, &gas_prices, false);
2020-08-05 06:08:03 +02:00
let bc = client.chain();
2020-08-05 06:08:03 +02:00
// Create the Snapshot
let best_hash = bc.best_block_hash();
let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap());
let block_hashes = chunk_secondary(
Box::new(SNAPSHOT_MODE),
&bc,
best_hash,
&writer,
&Progress::default(),
)
.unwrap();
let state_db = client.state_db().journal_db().boxed_clone();
let start_header = bc.block_header_data(&best_hash).unwrap();
let state_root = start_header.state_root();
let state_hashes = chunk_state(
state_db.as_hash_db(),
&state_root,
&writer,
&Progress::default(),
Beta 2.5.3 (#10776) * ethcore/res: activate atlantis classic hf on block 8772000 (#10766) * fix docker tags for publishing (#10741) * fix: aura don't add `SystemTime::now()` (#10720) This commit does the following: - Prevent overflow in `verify_timestamp()` by not adding `now` to found faulty timestamp - Use explicit `CheckedSystemTime::checked_add` to prevent potential consensus issues because SystemTime is platform depedent - remove `#[cfg(not(time_checked_add))]` conditional compilation * Update version * Treat empty account the same as non-exist accounts in EIP-1052 (#10775) * DevP2p: Get node IP address and udp port from Socket, if not included in PING packet (#10705) * get node IP address and udp port from Socket, if not included in PING packet * prevent bootnodes from being added to host nodes * code corrections * code corrections * code corrections * code corrections * docs * code corrections * code corrections * Apply suggestions from code review Co-Authored-By: David <dvdplm@gmail.com> * Add a way to signal shutdown to snapshotting threads (#10744) * Add a way to signal shutdown to snapshotting threads * Pass Progress to fat_rlps() so we can abort from there too. * Checking for abort in a single spot * Remove nightly-only weak/strong counts * fix warning * Fix tests * Add dummy impl to abort snapshots * Add another dummy impl for TestSnapshotService * Remove debugging code * Return error instead of the odd Ok(()) Switch to AtomicU64 * revert .as_bytes() change * fix build * fix build maybe
2019-06-25 15:38:29 +02:00
None,
0,
)
.unwrap();
2020-08-05 06:08:03 +02:00
2020-07-29 10:57:15 +02:00
let manifest = ManifestData {
version: 2,
Beta 2.5.3 (#10776) * ethcore/res: activate atlantis classic hf on block 8772000 (#10766) * fix docker tags for publishing (#10741) * fix: aura don't add `SystemTime::now()` (#10720) This commit does the following: - Prevent overflow in `verify_timestamp()` by not adding `now` to found faulty timestamp - Use explicit `CheckedSystemTime::checked_add` to prevent potential consensus issues because SystemTime is platform depedent - remove `#[cfg(not(time_checked_add))]` conditional compilation * Update version * Treat empty account the same as non-exist accounts in EIP-1052 (#10775) * DevP2p: Get node IP address and udp port from Socket, if not included in PING packet (#10705) * get node IP address and udp port from Socket, if not included in PING packet * prevent bootnodes from being added to host nodes * code corrections * code corrections * code corrections * code corrections * docs * code corrections * code corrections * Apply suggestions from code review Co-Authored-By: David <dvdplm@gmail.com> * Add a way to signal shutdown to snapshotting threads (#10744) * Add a way to signal shutdown to snapshotting threads * Pass Progress to fat_rlps() so we can abort from there too. * Checking for abort in a single spot * Remove nightly-only weak/strong counts * fix warning * Fix tests * Add dummy impl to abort snapshots * Add another dummy impl for TestSnapshotService * Remove debugging code * Return error instead of the odd Ok(()) Switch to AtomicU64 * revert .as_bytes() change * fix build * fix build maybe
2019-06-25 15:38:29 +02:00
state_hashes,
state_root,
block_hashes,
block_number: NUM_BLOCKS,
block_hash: best_hash,
};
2020-08-05 06:08:03 +02:00
writer.into_inner().finish(manifest.clone()).unwrap();
2020-08-05 06:08:03 +02:00
// Initialize the Client
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let client_db = new_temp_db(&tempdir.path());
let client2 = Client::new(
ClientConfig::default(),
&spec,
client_db,
Arc::new(::miner::Miner::new_for_tests(&spec, None)),
IoChannel::disconnected(),
)
.unwrap();
2020-08-05 06:08:03 +02:00
// Add some ancient blocks
for block_number in 1..50 {
let block_hash = bc.block_hash(block_number).unwrap();
let block = bc.block(&block_hash).unwrap();
client2
Sunce86/eip 1559 (#393) * eip1559 hard fork activation * eip1559 hard fork activation 2 * added new transaction type for eip1559 * added base fee field to block header * fmt fix * added base fee calculation. added block header validation against base fee * fmt * temporarily added modified transaction pool * tx pool fix of PendingIterator * tx pool fix of UnorderedIterator * tx pool added test for set_scoring * transaction pool changes * added tests for eip1559 transaction and eip1559 receipt * added test for eip1559 transaction execution * block gas limit / block gas target handling * base fee verification moved out of engine * calculate_base_fee moved to EthereumMachine * handling of base_fee_per_gas as part of seal * handling of base_fee_per_gas changed. Different encoding/decoding of block header * eip1559 transaction execution - gas price handling * eip1559 transaction execution - verification, fee burning * effectiveGasPrice removed from the receipt payload (specs) * added support for 1559 txs in tx pool verification * added Aleut test network configuration * effective_tip_scaled replaced by typed_gas_price * eip 3198 - Basefee opcode * rpc - updated structs Block and Header * rpc changes for 1559 * variable renaming according to spec * - typed_gas_price renamed to effective_gas_price - elasticity_multiplier definition moved to update_schedule() * calculate_base_fee simplified * Evm environment context temporary fix for gas limit * fmt fix * fixed fake_sign::sign_call * temporary fix for GASLIMIT opcode to provide gas_target actually * gas_target removed from block header according to spec change: https://github.com/ethereum/EIPs/pull/3566 * tx pool verification fix * env_info base fee changed to Option * fmt fix * pretty format * updated ethereum tests * cache_pending refresh on each update of score * code review fixes * fmt fix * code review fix - changed handling of eip1559_base_fee_max_change_denominator * code review fix - modification.gas_price * Skip gas_limit_bump for Aura * gas_limit calculation changed to target ceil * gas_limit calculation will target ceil on 1559 activation block * transaction verification updated according spec: https://github.com/ethereum/EIPs/pull/3594 * updated json tests * ethereum json tests fix for base_fee
2021-06-04 12:12:24 +02:00
.import_block(
Unverified::from_rlp(block.into_inner(), spec.params().eip1559_transition).unwrap(),
)
.unwrap();
}
2020-08-05 06:08:03 +02:00
client2.import_verified_blocks();
client2.flush_queue();
2020-08-05 06:08:03 +02:00
// Restore the Snapshot
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
let service_params = ServiceParams {
engine: spec.engine.clone(),
genesis_block: spec.genesis_block(),
restoration_db_handler: restoration_db_handler(db_config),
pruning: ::journaldb::Algorithm::Archive,
channel: IoChannel::disconnected(),
snapshot_root: tempdir.path().to_owned(),
client: client2.clone(),
};
let service = Service::new(service_params).unwrap();
service.init_restore(manifest.clone(), false).unwrap();
2020-08-05 06:08:03 +02:00
for hash in &manifest.block_hashes {
let chunk = reader.chunk(*hash).unwrap();
service.feed_block_chunk(*hash, &chunk);
}
2020-08-05 06:08:03 +02:00
for hash in &manifest.state_hashes {
let chunk = reader.chunk(*hash).unwrap();
service.feed_state_chunk(*hash, &chunk);
}
2020-08-05 06:08:03 +02:00
match service.restoration_status() {
RestorationStatus::Inactive => (),
RestorationStatus::Failed => panic!("Snapshot Restoration has failed."),
RestorationStatus::Ongoing { .. } => panic!("Snapshot Restoration should be done."),
_ => panic!("Invalid Snapshot Service status."),
}
2020-08-05 06:08:03 +02:00
// Check that the latest block number is the right one
assert_eq!(
client2.block(BlockId::Latest).unwrap().number(),
NUM_BLOCKS as u64
);
2020-08-05 06:08:03 +02:00
// Check that we have blocks in [NUM_BLOCKS - NUM_SNAPSHOT_BLOCKS + 1 ; NUM_BLOCKS]
// but none before
assert!(client2
.block(BlockId::Number(NUM_BLOCKS - NUM_SNAPSHOT_BLOCKS + 1))
.is_some());
assert!(client2.block(BlockId::Number(100)).is_none());
2020-08-05 06:08:03 +02:00
// Check that the first 50 blocks have been migrated
for block_number in 1..49 {
assert!(client2.block(BlockId::Number(block_number)).is_some());
}
}
#[test]
fn recover_aborted_recovery() {
let _ = ::env_logger::try_init();
2020-08-05 06:08:03 +02:00
const NUM_BLOCKS: u32 = 400;
let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()];
let client =
generate_dummy_client_with_spec_and_data(Spec::new_null, NUM_BLOCKS, 5, &gas_prices, false);
2020-08-05 06:08:03 +02:00
let spec = Spec::new_null();
let tempdir = TempDir::new("oe_snapshot").unwrap();
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let client_db = new_db();
let client2 = Client::new(
Default::default(),
&spec,
client_db,
Arc::new(::miner::Miner::new_for_tests(&spec, None)),
IoChannel::disconnected(),
)
.unwrap();
let service_params = ServiceParams {
engine: spec.engine.clone(),
genesis_block: spec.genesis_block(),
restoration_db_handler: restoration_db_handler(db_config),
pruning: ::journaldb::Algorithm::Archive,
channel: IoChannel::disconnected(),
snapshot_root: tempdir.path().to_owned(),
client: client2.clone(),
};
2020-08-05 06:08:03 +02:00
let service = Service::new(service_params).unwrap();
service.take_snapshot(&client, NUM_BLOCKS as u64).unwrap();
2020-08-05 06:08:03 +02:00
let manifest = service.manifest().unwrap();
service.init_restore(manifest.clone(), true).unwrap();
2020-08-05 06:08:03 +02:00
// Restore only the state chunks
for hash in &manifest.state_hashes {
let chunk = service.chunk(*hash).unwrap();
service.feed_state_chunk(*hash, &chunk);
}
2020-08-05 06:08:03 +02:00
match service.restoration_status() {
RestorationStatus::Ongoing {
block_chunks_done,
state_chunks_done,
..
} => {
assert_eq!(state_chunks_done, manifest.state_hashes.len() as u32);
assert_eq!(block_chunks_done, 0);
}
e => panic!("Snapshot restoration must be ongoing ; {:?}", e),
}
2020-08-05 06:08:03 +02:00
// Abort the restore...
service.abort_restore();
2020-08-05 06:08:03 +02:00
// And try again!
service.init_restore(manifest.clone(), true).unwrap();
2020-08-05 06:08:03 +02:00
match service.restoration_status() {
RestorationStatus::Ongoing {
block_chunks_done,
state_chunks_done,
..
} => {
assert_eq!(state_chunks_done, manifest.state_hashes.len() as u32);
assert_eq!(block_chunks_done, 0);
}
e => panic!("Snapshot restoration must be ongoing ; {:?}", e),
}
// abort restoration so that we can delete snapshot root folder
service.abort_restore();
// Remove the snapshot directory, and restart the restoration
// It shouldn't have restored any previous blocks
fs::remove_dir_all(tempdir.path()).unwrap();
2020-08-05 06:08:03 +02:00
// And try again!
service.init_restore(manifest.clone(), true).unwrap();
2020-08-05 06:08:03 +02:00
match service.restoration_status() {
RestorationStatus::Ongoing {
block_chunks_done,
state_chunks_done,
..
} => {
assert_eq!(block_chunks_done, 0);
assert_eq!(state_chunks_done, 0);
}
_ => panic!("Snapshot restoration must be ongoing"),
}
}